code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import ssl
from pyOpenSSL import SSL
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv2)
SSL.Context(method=SSL.SSLv2_METHOD)
SSL.Context(method=SSL.SSLv23_METHOD)
herp_derp(ssl_version=ssl.PROTOCOL_SSLv2)
herp_derp(method=SSL.SSLv2_METHOD)
herp_derp(method=SSL.SSLv23_METHOD)
# strict tests
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3)
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1)
SSL.Context(method=SSL.SSLv3_METHOD)
SSL.Context(method=SSL.TLSv1_METHOD)
herp_derp(ssl_version=ssl.PROTOCOL_SSLv3)
herp_derp(ssl_version=ssl.PROTOCOL_TLSv1)
herp_derp(method=SSL.SSLv3_METHOD)
herp_derp(method=SSL.TLSv1_METHOD)
ssl.wrap_socket()
def open_ssl_socket(version=ssl.PROTOCOL_SSLv2):
pass
def open_ssl_socket(version=SSL.SSLv2_METHOD):
pass
def open_ssl_socket(version=SSL.SSLv23_METHOD):
pass
# this one will pass ok
def open_ssl_socket(version=SSL.TLSv1_1_METHOD):
pass
| chair6/bandit | examples/ssl-insecure-version.py | Python | apache-2.0 | 892 |
# Copyright (c) 2003-2013 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id$
#
# Description:
# EAP packets
#
# Author:
# Aureliano Calvo
from impacket.helper import ProtocolPacket, Byte, Word, Long, ThreeBytesBigEndian
DOT1X_AUTHENTICATION = 0x888E
class EAPExpanded(ProtocolPacket):
"""EAP expanded data according to RFC 3748, section 5.7"""
WFA_SMI = 0x00372a
SIMPLE_CONFIG = 0x00000001
header_size = 7
tail_size = 0
vendor_id = ThreeBytesBigEndian(0)
vendor_type = Long(3, ">")
class EAPR(ProtocolPacket):
"""It represents a request or a response in EAP (codes 1 and 2)"""
IDENTITY = 0x01
EXPANDED = 0xfe
header_size = 1
tail_size = 0
type = Byte(0)
class EAP(ProtocolPacket):
REQUEST = 0x01
RESPONSE = 0x02
SUCCESS = 0x03
FAILURE = 0x04
header_size = 4
tail_size = 0
code = Byte(0)
identifier = Byte(1)
length = Word(2, ">")
class EAPOL(ProtocolPacket):
EAP_PACKET = 0x00
EAPOL_START = 0x01
EAPOL_LOGOFF = 0x02
EAPOL_KEY = 0x03
EAPOL_ENCAPSULATED_ASF_ALERT = 0x04
DOT1X_VERSION = 0x01
header_size = 4
tail_size = 0
version = Byte(0)
packet_type = Byte(1)
body_length = Word(2, ">")
| hecchi777/S3-SlaacSecuritySolution | impacket-0.9.11/impacket/eap.py | Python | apache-2.0 | 1,437 |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import random
import time
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo import messaging
from oslo.utils import timeutils
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy import sql
from neutron.common import constants
from neutron.common import utils as n_utils
from neutron import context as n_ctx
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import model_base
from neutron.extensions import l3agentscheduler
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
L3_AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('router_auto_schedule', default=True,
help=_('Allow auto scheduling of routers to L3 agent.')),
cfg.BoolOpt('allow_automatic_l3agent_failover', default=False,
help=_('Automatically reschedule routers from offline L3 '
'agents to online L3 agents.')),
]
cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
class RouterL3AgentBinding(model_base.BASEV2):
"""Represents binding between neutron routers and L3 agents."""
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent = orm.relation(agents_db.Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agentschedulers_db.AgentSchedulerDbMixin):
"""Mixin class to add l3 agent scheduler extension to plugins
using the l3 agent for routing.
"""
router_scheduler = None
def start_periodic_agent_status_check(self):
if not cfg.CONF.allow_automatic_l3agent_failover:
LOG.info(_LI("Skipping period L3 agent status check because "
"automatic router rescheduling is disabled."))
return
self.periodic_agent_loop = loopingcall.FixedIntervalLoopingCall(
self.reschedule_routers_from_down_agents)
interval = max(cfg.CONF.agent_down_time / 2, 1)
# add random initial delay to allow agents to check in after the
# neutron server first starts. random to offset multiple servers
self.periodic_agent_loop.start(interval=interval,
initial_delay=random.randint(interval, interval * 2))
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
# give agents extra time to handle transient failures
agent_dead_limit = cfg.CONF.agent_down_time * 2
# check for an abrupt clock change since last check. if a change is
# detected, sleep for a while to let the agents check in.
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
timeutils.utcnow())
if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
LOG.warn(_LW("Time since last L3 agent reschedule check has "
"exceeded the interval between checks. Waiting "
"before check to allow agents to send a heartbeat "
"in case there was a clock adjustment."))
time.sleep(agent_dead_limit)
self._clock_jump_canary = timeutils.utcnow()
context = n_ctx.get_admin_context()
cutoff = timeutils.utcnow() - datetime.timedelta(
seconds=agent_dead_limit)
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
try:
for binding in down_bindings:
LOG.warn(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except db_exc.DBError:
# Catch DB errors here so a transient DB connectivity issue
# doesn't stop the loopingcall.
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
def validate_agent_router_combination(self, context, agent, router):
"""Validate if the router can be correctly assigned to the agent.
:raises: RouterL3AgentMismatch if attempting to assign DVR router
to legacy agent, or centralized router to compute's L3 agents.
:raises: InvalidL3Agent if attempting to assign router to an
unsuitable agent (disabled, type != L3, incompatible configuration)
:raises: DVRL3CannotAssignToDvrAgent if attempting to assign DVR
router from one DVR Agent to another.
"""
is_distributed = router.get('distributed')
agent_conf = self.get_configuration_dict(agent)
agent_mode = agent_conf.get('agent_mode', 'legacy')
router_type = ('distributed' if is_distributed else 'centralized')
is_agent_router_types_incompatible = (
agent_mode == 'dvr' and not is_distributed
or agent_mode == 'legacy' and is_distributed
)
if is_agent_router_types_incompatible:
raise l3agentscheduler.RouterL3AgentMismatch(
router_type=router_type, router_id=router['id'],
agent_mode=agent_mode, agent_id=agent['id'])
if agent_mode == 'dvr' and is_distributed:
raise l3agentscheduler.DVRL3CannotAssignToDvrAgent(
router_type=router_type, router_id=router['id'],
agent_id=agent['id'])
is_wrong_type_or_unsuitable_agent = (
agent['agent_type'] != constants.AGENT_TYPE_L3 or
not agent['admin_state_up'] or
not self.get_l3_agent_candidates(context, router, [agent])
)
if is_wrong_type_or_unsuitable_agent:
raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
def check_agent_router_scheduling_needed(self, context, agent, router):
"""Check if the router scheduling is needed.
:raises: RouterHostedByL3Agent if router is already assigned
to a different agent.
:returns: True if scheduling is needed, otherwise False
"""
router_id = router['id']
agent_id = agent['id']
query = context.session.query(RouterL3AgentBinding)
bindings = query.filter_by(router_id=router_id).all()
if not bindings:
return True
for binding in bindings:
if binding.l3_agent_id == agent_id:
# router already bound to the agent we need
return False
if router.get('distributed'):
return False
# non-dvr case: centralized router is already bound to some agent
raise l3agentscheduler.RouterHostedByL3Agent(
router_id=router_id,
agent_id=bindings[0].l3_agent_id)
def create_router_to_agent_binding(self, context, agent, router):
"""Create router to agent binding."""
router_id = router['id']
agent_id = agent['id']
if self.router_scheduler:
try:
self.router_scheduler.bind_router(context, router_id, agent)
except db_exc.DBError:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id, agent_id=agent_id)
def add_router_to_l3_agent(self, context, agent_id, router_id):
"""Add a l3 agent to host a router."""
with context.session.begin(subtransactions=True):
router = self.get_router(context, router_id)
agent = self._get_agent(context, agent_id)
self.validate_agent_router_combination(context, agent, router)
if self.check_agent_router_scheduling_needed(
context, agent, router):
self.create_router_to_agent_binding(context, agent, router)
else:
return
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_added_to_agent(
context, [router_id], agent.host)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
"""Remove the router from l3 agent.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another agent manually.
"""
agent = self._get_agent(context, agent_id)
self._unbind_router(context, router_id, agent_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_removed_from_agent(
context, router_id, agent.host)
def _unbind_router(self, context, router_id, agent_id):
with context.session.begin(subtransactions=True):
query = context.session.query(RouterL3AgentBinding)
query = query.filter(
RouterL3AgentBinding.router_id == router_id,
RouterL3AgentBinding.l3_agent_id == agent_id)
try:
binding = query.one()
except exc.NoResultFound:
raise l3agentscheduler.RouterNotHostedByL3Agent(
router_id=router_id, agent_id=agent_id)
context.session.delete(binding)
def reschedule_router(self, context, router_id, candidates=None):
"""Reschedule router to a new l3 agent
Remove the router from the agent(s) currently hosting it and
schedule it again
"""
cur_agents = self.list_l3_agents_hosting_router(
context, router_id)['agents']
with context.session.begin(subtransactions=True):
for agent in cur_agents:
self._unbind_router(context, router_id, agent['id'])
new_agent = self.schedule_router(context, router_id,
candidates=candidates)
if not new_agent:
raise l3agentscheduler.RouterReschedulingFailed(
router_id=router_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
for agent in cur_agents:
l3_notifier.router_removed_from_agent(
context, router_id, agent['host'])
l3_notifier.router_added_to_agent(
context, [router_id], new_agent.host)
def list_routers_on_l3_agent(self, context, agent_id):
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
router_ids = [item[0] for item in query]
if router_ids:
return {'routers':
self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
def list_active_sync_routers_on_active_l3_agent(
self, context, host, router_ids):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_ids:
query = query.filter(
RouterL3AgentBinding.router_id.in_(router_ids))
router_ids = [item[0] for item in query]
if router_ids:
if n_utils.is_extension_supported(self,
constants.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
else:
return self.get_sync_data(context, router_ids=router_ids,
active=True)
else:
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
active=None):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
def _get_l3_bindings_hosting_routers(self, context, router_ids):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
return query.all()
def list_l3_agents_hosting_router(self, context, router_id):
with context.session.begin(subtransactions=True):
bindings = self._get_l3_bindings_hosting_routers(
context, [router_id])
results = []
for binding in bindings:
l3_agent_dict = self._make_agent_dict(binding.l3_agent)
results.append(l3_agent_dict)
if results:
return {'agents': results}
else:
return {'agents': []}
def get_l3_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
agent_modes = filters.get('agent_modes', [])
if agent_modes:
agent_mode_key = '\"agent_mode\": \"'
configuration_filter = (
[agents_db.Agent.configurations.contains('%s%s\"' %
(agent_mode_key, agent_mode))
for agent_mode in agent_modes])
query = query.filter(or_(*configuration_filter))
return [l3_agent
for l3_agent in query
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
active, l3_agent)]
def check_ports_exist_on_l3agent(self, context, l3_agent, router_id):
"""
This function checks for existence of dvr serviceable
ports on the host, running the input l3agent.
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
core_plugin = manager.NeutronManager.get_plugin()
filter = {'fixed_ips': {'subnet_id': subnet_ids}}
ports = core_plugin.get_ports(context, filters=filter)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner']) and
l3_agent['host'] == port['binding:host_id']):
return True
return False
def get_snat_candidates(self, sync_router, l3_agents):
"""Get the valid snat enabled l3 agents for the distributed router."""
candidates = []
is_router_distributed = sync_router.get('distributed', False)
if not is_router_distributed:
return candidates
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get('agent_mode', 'legacy')
if agent_mode != 'dvr_snat':
continue
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
if not use_namespaces and router_id != sync_router['id']:
continue
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def get_l3_agent_candidates(self, context, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""
candidates = []
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
agent_mode = agent_conf.get('agent_mode', 'legacy')
if not use_namespaces and router_id != sync_router['id']:
continue
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
is_router_distributed = sync_router.get('distributed', False)
if agent_mode in ('legacy', 'dvr_snat') and (
not is_router_distributed):
candidates.append(l3_agent)
elif is_router_distributed and agent_mode.startswith('dvr') and (
self.check_ports_exist_on_l3agent(
context, l3_agent, sync_router['id'])):
candidates.append(l3_agent)
return candidates
def auto_schedule_routers(self, context, host, router_ids):
if self.router_scheduler:
return self.router_scheduler.auto_schedule_routers(
self, context, host, router_ids)
def schedule_router(self, context, router, candidates=None):
if self.router_scheduler:
return self.router_scheduler.schedule(
self, context, router, candidates=candidates)
def schedule_routers(self, context, routers):
"""Schedule the routers to l3 agents."""
for router in routers:
self.schedule_router(context, router, candidates=None)
def get_l3_agent_with_min_routers(self, context, agent_ids):
"""Return l3 agent with the least number of routers."""
query = context.session.query(
agents_db.Agent,
func.count(
RouterL3AgentBinding.router_id
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
RouterL3AgentBinding.l3_agent_id).order_by('count')
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
return res[0]
| projectcalico/calico-neutron | neutron/db/l3_agentschedulers_db.py | Python | apache-2.0 | 22,584 |
def check_resource_count(expected_count):
test.assertEqual(expected_count, len(reality.all_resources()))
example_template = Template({
'A': RsrcDef({}, []),
'B': RsrcDef({'a': '4alpha'}, ['A']),
'C': RsrcDef({'a': 'foo'}, ['B']),
'D': RsrcDef({'a': 'bar'}, ['C']),
})
engine.create_stack('foo', example_template)
engine.noop(1)
example_template2 = Template({
'A': RsrcDef({}, []),
'B': RsrcDef({'a': '4alpha'}, ['A']),
'C': RsrcDef({'a': 'blarg'}, ['B']),
'D': RsrcDef({'a': 'wibble'}, ['C']),
})
engine.update_stack('foo', example_template2)
engine.call(check_resource_count, 2)
engine.noop(11)
engine.call(verify, example_template2)
| zaneb/heat-convergence-prototype | scenarios/update_interrupt_create.py | Python | apache-2.0 | 673 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import argparse
import kudu
from kudu.client import Partitioning
# Parse arguments
parser = argparse.ArgumentParser(description='Basic Example for Kudu Python.')
parser.add_argument('--masters', '-m', nargs='+', default='localhost',
help='The master address(es) to connect to Kudu.')
parser.add_argument('--ports', '-p', nargs='+', default='7051',
help='The master server port(s) to connect to Kudu.')
args = parser.parse_args()
# Connect to Kudu master server(s).
client = kudu.connect(host=args.masters, port=args.ports)
# Define a schema for a new table.
builder = kudu.schema_builder()
builder.add_column('key').type(kudu.int64).nullable(False).primary_key()
builder.add_column('ts_val', type_=kudu.unixtime_micros, nullable=False, compression='lz4')
schema = builder.build()
# Define the partitioning schema.
partitioning = Partitioning().add_hash_partitions(column_names=['key'], num_buckets=3)
# Delete table if it already exists.
if client.table_exists('python-example'):
client.delete_table('python-example')
# Create a new table.
client.create_table('python-example', schema, partitioning)
# Open a table.
table = client.table('python-example')
# Create a new session so that we can apply write operations.
session = client.new_session()
# Insert a row.
op = table.new_insert({'key': 1, 'ts_val': datetime.utcnow()})
session.apply(op)
# Upsert a row.
op = table.new_upsert({'key': 2, 'ts_val': "2016-01-01T00:00:00.000000"})
session.apply(op)
# Update a row.
op = table.new_update({'key': 1, 'ts_val': ("2017-01-01", "%Y-%m-%d")})
session.apply(op)
# Delete a row.
op = table.new_delete({'key': 2})
session.apply(op)
# Flush write operations, if failures occur, print them.
try:
session.flush()
except kudu.KuduBadStatus:
print(session.get_pending_errors())
# Create a scanner and add a predicate.
scanner = table.scanner()
scanner.add_predicate(table['ts_val'] == datetime(2017, 1, 1))
# Open scanner and print all tuples.
# Note: This doesn't scale for large scans
# The expected output: [(1, datetime.datetime(2017, 1, 1, 0, 0, tzinfo=<UTC>))]
print(scanner.open().read_all_tuples())
| helifu/kudu | examples/python/basic-python-example/basic_example.py | Python | apache-2.0 | 2,977 |
from Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,
'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'StringInterpolationExpr': 64,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
'FunctionCallArgument': 96,
'TupleElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
'FunctionCallArgumentList': 164,
'TupleElementList': 165,
'ArrayElementList': 166,
'DictionaryElementList': 167,
'StringInterpolationSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
'PoundAssertStmt': 229,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
def get_serialization_code(syntax_kind):
return SYNTAX_NODE_SERIALIZATION_CODES[syntax_kind]
| amraboelela/swift | utils/gyb_syntax_support/NodeSerializationCodes.py | Python | apache-2.0 | 7,221 |
#!/usr/bin/env python3
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
""" Restore record file by replacing its video frames with image frames. """
import datetime
import errno
import glob
import os
import shutil
import time
from absl import app
from absl import flags
from absl import logging
import cv2
from cyber.python.cyber_py3.record import RecordReader, RecordWriter
from modules.drivers.proto.sensor_image_pb2 import CompressedImage
flags.DEFINE_string('from_record', None, 'The source record file that needs to be restored.')
flags.DEFINE_string('to_record', None, 'The restored record file.')
# The compressed channels that have videos we need to decode
IMAGE_FRONT_6MM_CHANNEL = '/apollo/sensor/camera/front_6mm/image/compressed'
IMAGE_FRONT_12MM_CHANNEL = '/apollo/sensor/camera/front_12mm/image/compressed'
IMAGE_REAR_6MM_CHANNEL = '/apollo/sensor/camera/rear_6mm/image/compressed'
IMAGE_LEFT_FISHEYE_CHANNEL = '/apollo/sensor/camera/left_fisheye/image/compressed'
IMAGE_RIGHT_FISHEYE_CHANNEL = '/apollo/sensor/camera/right_fisheye/image/compressed'
VIDEO_FRONT_6MM_CHANNEL = '/apollo/sensor/camera/front_6mm/video/compressed'
VIDEO_FRONT_12MM_CHANNEL = '/apollo/sensor/camera/front_12mm/video/compressed'
VIDEO_REAR_6MM_CHANNEL = '/apollo/sensor/camera/rear_6mm/video/compressed'
VIDEO_LEFT_FISHEYE_CHANNEL = '/apollo/sensor/camera/left_fisheye/video/compressed'
VIDEO_RIGHT_FISHEYE_CHANNEL = '/apollo/sensor/camera/right_fisheye/video/compressed'
VIDEO_CHANNELS = [
IMAGE_FRONT_6MM_CHANNEL,
IMAGE_FRONT_12MM_CHANNEL,
IMAGE_REAR_6MM_CHANNEL,
IMAGE_LEFT_FISHEYE_CHANNEL,
IMAGE_RIGHT_FISHEYE_CHANNEL,
VIDEO_FRONT_6MM_CHANNEL,
VIDEO_FRONT_12MM_CHANNEL,
VIDEO_REAR_6MM_CHANNEL,
VIDEO_LEFT_FISHEYE_CHANNEL,
VIDEO_RIGHT_FISHEYE_CHANNEL,
]
VIDEO_IMAGE_MAP = {
IMAGE_FRONT_6MM_CHANNEL: IMAGE_FRONT_6MM_CHANNEL,
IMAGE_FRONT_12MM_CHANNEL: IMAGE_FRONT_12MM_CHANNEL,
IMAGE_REAR_6MM_CHANNEL: IMAGE_REAR_6MM_CHANNEL,
IMAGE_LEFT_FISHEYE_CHANNEL: IMAGE_LEFT_FISHEYE_CHANNEL,
IMAGE_RIGHT_FISHEYE_CHANNEL: IMAGE_RIGHT_FISHEYE_CHANNEL,
VIDEO_FRONT_6MM_CHANNEL: IMAGE_FRONT_6MM_CHANNEL,
VIDEO_FRONT_12MM_CHANNEL: IMAGE_FRONT_12MM_CHANNEL,
VIDEO_REAR_6MM_CHANNEL: IMAGE_REAR_6MM_CHANNEL,
VIDEO_LEFT_FISHEYE_CHANNEL: IMAGE_LEFT_FISHEYE_CHANNEL,
VIDEO_RIGHT_FISHEYE_CHANNEL: IMAGE_RIGHT_FISHEYE_CHANNEL,
}
class VideoConverter(object):
"""Convert video into images."""
def __init__(self, work_dir, topic):
# Initial type of video frames that defined in apollo video drive proto
# The initial frame has meta data information shared by the following tens of frames
self.initial_frame_type = 1
self.image_ids = []
self.first_initial_found = False
video_dir = os.path.join(work_dir, 'videos')
self.video_file = os.path.join(video_dir, '{}.h265'.format(topic))
self.image_dir = '{}_images'.format(self.video_file)
makedirs(video_dir)
makedirs(self.image_dir)
self.frame_writer = open(self.video_file, 'wb+')
def close_writer(self):
"""Close the video frames writer"""
self.frame_writer.close()
def write_frame(self, py_message):
"""Write video frames into binary format file"""
if not self.first_initial_found:
proto = image_message_to_proto(py_message)
if proto.frame_type != self.initial_frame_type:
return
self.first_initial_found = True
self.frame_writer.write(py_message.message)
self.image_ids.append(get_message_id(py_message.timestamp, py_message.topic))
def decode(self):
"""Decode video file into images"""
video_decoder_exe = '/apollo/bazel-bin/modules/drivers/video/tools/decode_video/video2jpg'
return_code = os.system('{} --input_video={} --output_dir={}'.format(
video_decoder_exe, self.video_file, self.image_dir))
if return_code != 0:
logging.error('Failed to execute video2jpg for video {}'.format(self.video_file))
return False
generated_images = sorted(glob.glob('{}/*.jpg'.format(self.image_dir)))
if len(generated_images) != len(self.image_ids):
logging.error('Mismatch between original {} and generated frames {}'.format(
len(self.image_ids), len(generated_images)))
return False
for idx in range(len(generated_images)):
os.rename(generated_images[idx], os.path.join(self.image_dir, self.image_ids[idx]))
return True
def move_images(self, overall_image_dir):
"""Move self's images to overall image dir"""
for image_file in os.listdir(self.image_dir):
shutil.move(os.path.join(self.image_dir, image_file),
os.path.join(overall_image_dir, image_file))
def restore_record(input_record, output_record):
"""Entrance of processing."""
# Define working dirs that store intermediate results in the middle of processing
work_dir = 'restore_video_work_dir_{}'.format(
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S'))
# Decode videos
converters = {}
for topic in VIDEO_CHANNELS:
converters[topic] = VideoConverter(work_dir, topic)
reader = RecordReader(input_record)
for message in reader.read_messages():
if message.topic in VIDEO_CHANNELS:
converters[message.topic].write_frame(message)
image_dir = os.path.join(work_dir, 'images')
makedirs(image_dir)
for topic in VIDEO_CHANNELS:
converters[topic].close_writer()
converters[topic].decode()
converters[topic].move_images(image_dir)
# Restore target record file
writer = RecordWriter(0, 0)
writer.open(output_record)
topic_descs = {}
counter = 0
reader = RecordReader(input_record)
for message in reader.read_messages():
message_content = message.message
message_topic = message.topic
if message.topic in VIDEO_CHANNELS:
message_content = retrieve_image(image_dir, message)
message_topic = VIDEO_IMAGE_MAP[message.topic]
if not message_content:
continue
counter += 1
if counter % 1000 == 0:
logging.info('rewriting {} th message to record {}'.format(counter, output_record))
writer.write_message(message_topic, message_content, message.timestamp)
if message_topic not in topic_descs:
topic_descs[message_topic] = reader.get_protodesc(message_topic)
writer.write_channel(message_topic, message.data_type, topic_descs[message_topic])
writer.close()
logging.info('All Done, converted record: {}'.format(output_record))
def retrieve_image(image_dir, message):
"""Actually change the content of message from video bytes to image bytes"""
message_id = get_message_id(message.timestamp, message.topic)
message_path = os.path.join(image_dir, message_id)
if not os.path.exists(message_path):
logging.error('message {} not found in image dir'.format(message_id))
return None
img_bin = cv2.imread(message_path)
# Check by using NoneType explicitly to avoid ambitiousness
if img_bin is None:
logging.error('failed to read original message: {}'.format(message_path))
return None
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
result, encode_img = cv2.imencode('.jpg', img_bin, encode_param)
if not result:
logging.error('failed to encode message {}'.format(message_id))
return None
message_proto = image_message_to_proto(message)
message_proto.format = '; jpeg compressed bgr8'
message_proto.data = message_proto.data.replace(message_proto.data[:], bytearray(encode_img))
return message_proto.SerializeToString()
def get_message_id(timestamp, topic):
"""Unify the way to get a unique identifier for the given message"""
return '{}{}'.format(timestamp, topic.replace('/', '_'))
def image_message_to_proto(py_message):
"""Message to prototype"""
message_proto = CompressedImage()
message_proto.ParseFromString(py_message.message)
return message_proto
def makedirs(dir_path):
"""Make directories recursively."""
if os.path.exists(dir_path):
return
try:
os.makedirs(dir_path)
except OSError as error:
if error.errno != errno.EEXIST:
logging.error('Failed to makedir ' + dir_path)
raise
def main(argv):
"""Main process."""
if not flags.FLAGS.from_record or not os.path.exists(flags.FLAGS.from_record):
logging.error('Please provide valid source record file.')
return
to_record = flags.FLAGS.to_record
if not to_record:
to_record = '{}_restored'.format(flags.FLAGS.from_record)
logging.warn('The default restored record file is set as {}'.format(to_record))
restore_record(flags.FLAGS.from_record, to_record)
if __name__ == '__main__':
app.run(main)
| xiaoxq/apollo | modules/tools/restore_video_record/restore_video_record.py | Python | apache-2.0 | 9,788 |
'''
Datastore via remote webdav connection
'''
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
import os
import tarfile
import logging
from fs.contrib.davfs import DAVFS
from urllib.parse import urlparse
from contextlib import closing
from sumatra.core import component
from .archivingfs import ArchivingFileSystemDataStore, ArchivedDataFile, TIMESTAMP_FORMAT
class DavFsDataItem(ArchivedDataFile):
"""Base class for data item classes, that may represent files or database records."""
def __init__(self, path, store):
# needs to be first cause _get_info is called in Base __init__
self.store = store
super(DavFsDataItem, self).__init__(path, store)
def get_content(self, max_length=None):
obj = self.store.dav_fs.open(self.tarfile_path, 'rb')
with closing(tarfile.open(fileobj=obj)) as data_archive:
f = data_archive.extractfile(self.path)
if max_length:
content = f.read(max_length)
else:
content = f.read()
f.close()
return content
# mandatory repeat
content = property(fget=get_content)
def _get_info(self):
obj = self.store.dav_fs.open(self.tarfile_path, 'rb')
with closing(tarfile.open(fileobj=obj)) as data_archive:
return data_archive.getmember(self.path)
return tarfile.TarInfo()
@component
class DavFsDataStore(ArchivingFileSystemDataStore):
"""ArchivingFileSystemDataStore that archives to webdav storage"""
data_item_class = DavFsDataItem
def __init__(self, root, dav_url, dav_user=None, dav_pw=None):
super(DavFsDataStore, self).__init__(root)
parsed = urlparse(dav_url)
self.dav_user = dav_user or parsed.username
self.dav_pw = dav_pw or parsed.password
self.dav_url = parsed.geturl()
self.dav_fs = DAVFS(url=self.dav_url, credentials={'username': self.dav_user, 'password': self.dav_pw})
def __getstate__(self):
return {'root': self.root, 'dav_url': self.dav_url, 'dav_user': self.dav_user, 'dav_pw': self.dav_pw}
def find_new_data(self, timestamp):
"""Finds newly created/changed data items"""
new_files = self._find_new_data_files(timestamp)
label = timestamp.strftime(TIMESTAMP_FORMAT)
archive_paths = self._archive(label, new_files)
return [DavFsDataItem(path, self).generate_key()
for path in archive_paths]
def _archive(self, label, files, delete_originals=True):
"""
Archives files and, by default, deletes the originals.
"""
fs = self.dav_fs
if not fs.isdir(self.archive_store):
fs.makedir(self.archive_store, recursive=True)
tf_obj = fs.open(os.path.join(self.archive_store, label + ".tar.gz"), mode='wb')
with tarfile.open(fileobj=tf_obj, mode='w:gz') as tf:
logging.info("Archiving data to file %s" % tf.name)
# Add data files
archive_paths = []
for file_path in files:
archive_path = os.path.join(label, file_path)
tf.add(os.path.join(self.root, file_path), archive_path)
archive_paths.append(archive_path)
tf.close()
tf_obj.close()
# Delete original files.
if delete_originals:
for file_path in files:
os.remove(os.path.join(self.root, file_path))
self._last_label = label # useful for testing
return archive_paths
| open-research/sumatra | sumatra/datastore/davfs.py | Python | bsd-2-clause | 3,593 |
class DSF_SIC_Map(object):
"""docstring for SIC_Map"""
def __init__(self, dsffile = 'crsp/dsf.csv', sicfile = 'sic_codes.txt'):
self.dsf = pd.read_csv("dsf.csv", dtype = {'CUSIP': np.str, 'PRC': np.float}, na_values = {'PRC': '-'})
self.sic = pd.read_table(sicfile, header = 1)
self.sic.columns = ['HSICCD', 'SICNAME']
def process(self, day = 20100101, columns = ['PERMNO', 'DATE', 'PRC', 'VOL', 'SHROUT', 'RET', 'HSICCD']):
self.dsf_startdate(date = day)
self.dsf_subset(to_keep = columns)
self.sic_merge()
def dsf_startdate(self, date = 20100101):
self.dsf = self.dsf[self.dsf.DATE >= date]
def dsf_subset(self, to_keep = ['PERMNO', 'DATE', 'PRC', 'VOL', 'SHROUT', 'RET', 'HSICCD']):
self.dsf = self.dsf[to_keep]
def sic_merge(self):
self.clean_dsf = self.dsf.merge(self.sic, how = "left") | dlab-projects/python-taq | marketflow/dsf_with_sic.py | Python | bsd-2-clause | 887 |
from bravado_core.spec import Spec
import mock
from pyramid.config import Configurator
from pyramid.registry import Registry
import pytest
from swagger_spec_validator.common import SwaggerValidationError
import pyramid_swagger
from pyramid_swagger.model import SwaggerSchema
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
@mock.patch('pyramid_swagger.get_swagger_schema')
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_disable_api_doc_views(_1, _2, mock_register):
settings = {
'pyramid_swagger.enable_api_doc_views': False,
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator,
registry=mock.Mock(spec=Registry, settings=settings))
pyramid_swagger.includeme(mock_config)
assert not mock_register.called
def test_bad_schema_validated_on_include():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': True,
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
with pytest.raises(SwaggerValidationError):
pyramid_swagger.includeme(mock_config)
# TODO: Figure out why this assertion fails on travis
# assert "'info' is a required property" in str(excinfo.value)
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_bad_schema_not_validated_if_spec_validation_is_disabled(_):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator, registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_20_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert not settings['pyramid_swagger.schema12']
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_and_20(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2', '2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 2
| analogue/pyramid_swagger | tests/includeme_test.py | Python | bsd-3-clause | 3,308 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hhlregistrations', '0004_auto_20150411_1935'),
]
operations = [
migrations.AddField(
model_name='event',
name='payment_due',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='event',
name='require_registration',
field=models.BooleanField(default=False),
),
]
| hacklab-fi/hhlevents | hhlevents/apps/hhlregistrations/migrations/0005_auto_20150412_1806.py | Python | bsd-3-clause | 592 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from tracing.mre import job as job_module
class Failure(object):
def __init__(self, job, function_handle_string, trace_canonical_url,
failure_type_name, description, stack):
assert isinstance(job, job_module.Job)
self.job = job
self.function_handle_string = function_handle_string
self.trace_canonical_url = trace_canonical_url
self.failure_type_name = failure_type_name
self.description = description
self.stack = stack
def __str__(self):
return (
'Failure for job %s with function handle %s and trace handle %s:\n'
'of type "%s" with description "%s". Stack:\n\n%s' % (
self.job.guid, self.function_handle_string,
self.trace_canonical_url, self.failure_type_name,
self.description, self.stack))
def AsDict(self):
return {
'job_guid': str(self.job.guid),
'function_handle_string': self.function_handle_string,
'trace_canonical_url': self.trace_canonical_url,
'type': self.failure_type_name,
'description': self.description,
'stack': self.stack
}
@staticmethod
def FromDict(failure_dict, job, failure_names_to_constructors=None):
if failure_names_to_constructors is None:
failure_names_to_constructors = {}
failure_type_name = failure_dict['type']
if failure_type_name in failure_names_to_constructors:
cls = failure_names_to_constructors[failure_type_name]
else:
cls = Failure
return cls(job,
failure_dict['function_handle_string'],
failure_dict['trace_canonical_url'],
failure_type_name, failure_dict['description'],
failure_dict['stack'])
| catapult-project/catapult | tracing/tracing/mre/failure.py | Python | bsd-3-clause | 1,876 |
from django import forms
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.contenttypes.admin import GenericStackedInline
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.core import checks
from django.test import SimpleTestCase, override_settings
from .models import (
Album, Author, Book, City, Influence, Song, State, TwoAlbumFKAndAnE,
)
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
def check(self, **kwargs):
return ['error!']
class AuthenticationMiddlewareSubclass(AuthenticationMiddleware):
pass
class MessageMiddlewareSubclass(MessageMiddleware):
pass
class ModelBackendSubclass(ModelBackend):
pass
class SessionMiddlewareSubclass(SessionMiddleware):
pass
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'admin_checks',
],
)
class SystemChecksTestCase(SimpleTestCase):
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_apps_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.contenttypes' must be in "
"INSTALLED_APPS in order to use the admin application.",
id="admin.E401",
),
checks.Error(
"'django.contrib.auth' must be in INSTALLED_APPS in order "
"to use the admin application.",
id='admin.E405',
),
checks.Error(
"'django.contrib.messages' must be in INSTALLED_APPS in order "
"to use the admin application.",
id='admin.E406',
),
]
self.assertEqual(errors, expected)
@override_settings(TEMPLATES=[])
def test_no_template_engines(self):
self.assertEqual(admin.checks.check_dependencies(), [
checks.Error(
"A 'django.template.backends.django.DjangoTemplates' "
"instance must be configured in TEMPLATES in order to use "
"the admin application.",
id='admin.E403',
)
])
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [],
},
}],
)
def test_context_processor_dependencies(self):
expected = [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id='admin.E402',
),
checks.Error(
"'django.contrib.messages.context_processors.messages' must "
"be enabled in DjangoTemplates (TEMPLATES) in order to use "
"the admin application.",
id='admin.E404',
),
checks.Warning(
"'django.template.context_processors.request' must be enabled "
"in DjangoTemplates (TEMPLATES) in order to use the admin "
"navigation sidebar.",
id='admin.W411',
)
]
self.assertEqual(admin.checks.check_dependencies(), expected)
# The first error doesn't happen if
# 'django.contrib.auth.backends.ModelBackend' isn't in
# AUTHENTICATION_BACKENDS.
with self.settings(AUTHENTICATION_BACKENDS=[]):
self.assertEqual(admin.checks.check_dependencies(), expected[1:])
@override_settings(
AUTHENTICATION_BACKENDS=['admin_checks.tests.ModelBackendSubclass'],
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
}],
)
def test_context_processor_dependencies_model_backend_subclass(self):
self.assertEqual(admin.checks.check_dependencies(), [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id='admin.E402',
),
])
@override_settings(
TEMPLATES=[
{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
'DIRS': [],
'APP_DIRS': True,
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
],
)
def test_several_templates_backends(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[])
def test_middleware_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.auth.middleware.AuthenticationMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id='admin.E408',
),
checks.Error(
"'django.contrib.messages.middleware.MessageMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id='admin.E409',
),
checks.Error(
"'django.contrib.sessions.middleware.SessionMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
hint=(
"Insert "
"'django.contrib.sessions.middleware.SessionMiddleware' "
"before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
),
id='admin.E410',
),
]
self.assertEqual(errors, expected)
@override_settings(MIDDLEWARE=[
'admin_checks.tests.AuthenticationMiddlewareSubclass',
'admin_checks.tests.MessageMiddlewareSubclass',
'admin_checks.tests.SessionMiddlewareSubclass',
])
def test_middleware_subclasses(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[
'django.contrib.does.not.Exist',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
])
def test_admin_check_ignores_import_error_in_middleware(self):
self.assertEqual(admin.checks.check_dependencies(), [])
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
def test_allows_checks_relying_on_other_modeladmins(self):
class MyBookAdmin(admin.ModelAdmin):
def check(self, **kwargs):
errors = super().check(**kwargs)
author_admin = self.admin_site._registry.get(Author)
if author_admin is None:
errors.append('AuthorAdmin missing!')
return errors
class MyAuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(Book, MyBookAdmin)
admin.site.register(Author, MyAuthorAdmin)
try:
self.assertEqual(admin.site.check(None), [])
finally:
admin.site.unregister(Book)
admin.site.unregister(Author)
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
obj=SongAdmin,
id='admin.E122',
)
]
self.assertEqual(errors, expected)
def test_list_editable_not_a_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
list_editable = 'test'
self.assertEqual(SongAdmin(Song, AdminSite()).check(), [
checks.Error(
"The value of 'list_editable' must be a list or tuple.",
obj=SongAdmin,
id='admin.E120',
)
])
def test_list_editable_missing_field(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ('test',)
self.assertEqual(SongAdmin(Song, AdminSite()).check(), [
checks.Error(
"The value of 'list_editable[0]' refers to 'test', which is "
"not an attribute of 'admin_checks.Song'.",
obj=SongAdmin,
id='admin.E121',
)
])
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin.",
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
The fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
The first fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": "title" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
The second fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
"fields": ("title",)
}),
('foo', {
"fields": "author" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'.",
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
A model without a GenericForeignKey raises problems if it's included
in a GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"""
A GenericInlineModelAdmin errors if the ct_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"""
A GenericInlineModelAdmin errors if the ct_fk_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_field points to a
field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'name' and object ID field 'object_id'.",
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_fk_field points to
a field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'content_type' and object ID field 'name'.",
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistentAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexistent',)
errors = RawIdNonexistentAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'raw_id_fields[0]' refers to 'nonexistent', "
"which is not an attribute of 'admin_checks.Album'.",
obj=RawIdNonexistentAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey "
"to 'admin_checks.Album'. You must specify a 'fk_name' "
"attribute.",
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inlines_property(self):
class CitiesInline(admin.TabularInline):
model = City
class StateAdmin(admin.ModelAdmin):
@property
def inlines(self):
return [CitiesInline]
errors = StateAdmin(State, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
@admin.display
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
@admin.display
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
@admin.display
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'.",
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'.",
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_readonly_fields_not_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = 'test'
self.assertEqual(SongAdmin(Song, AdminSite()).check(), [
checks.Error(
"The value of 'readonly_fields' must be a list or tuple.",
obj=SongAdmin,
id='admin.E034',
)
])
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
@admin.display
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model.",
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model.",
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ['authorsbooks__featured']
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
| wkschwartz/django | tests/admin_checks/tests.py | Python | bsd-3-clause | 32,171 |
##########################################################################
#
# Copyright (c) 2013-2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferOSL
import imath
import functools
_channelNamesOptions = {
"RGB" : IECore.Color3fData( imath.Color3f( 1 ) ),
"RGBA" : IECore.Color4fData( imath.Color4f( 1 ) ),
"R" : IECore.FloatData( 1 ),
"G" : IECore.FloatData( 1 ),
"B" : IECore.FloatData( 1 ),
"A" : IECore.FloatData( 1 ),
"customChannel" : IECore.FloatData( 1 ),
"customLayer" : IECore.Color3fData( imath.Color3f( 1 ) ),
"customLayerRGBA" : IECore.Color4fData( imath.Color4f( 1 ) ),
"closure" : None,
}
##########################################################################
# _ChannelsFooter
##########################################################################
class _ChannelsFooter( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug )
with row :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
menuButton = GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu(
Gaffer.WeakMethod( self.__menuDefinition ),
title = "Add Input"
),
toolTip = "Add Input"
)
menuButton.setEnabled( not Gaffer.MetadataAlgo.readOnly( plug ) )
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
def _updateFromPlug( self ) :
self.setEnabled( self._editable() )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
usedNames = set()
for p in self.getPlug().children():
# TODO - this method for checking if a plug variesWithContext should probably live in PlugAlgo
# ( it's based on Switch::variesWithContext )
sourcePlug = p["name"].source()
variesWithContext = sourcePlug.direction() == Gaffer.Plug.Direction.Out and isinstance( ComputeNode, sourcePlug.node() )
if not variesWithContext:
usedNames.add( p["name"].getValue() )
# Use a fixed order for some standard options that we want to list in a specific order
sortedOptions = []
for label in ["RGB", "RGBA", "R", "G", "B", "A" ]:
sortedOptions.append( (label, _channelNamesOptions[label] ) )
for label, defaultData in sorted( _channelNamesOptions.items() ):
if not label in [ i[0] for i in sortedOptions ]:
sortedOptions.append( (label, defaultData) )
categories = { "Standard" : [], "Custom" : [], "Advanced" : [] }
for label, defaultData in sortedOptions:
if label == "closure":
categories["Advanced"].append( ( label, label, defaultData ) )
else:
bareLabel = label.replace( "RGBA", "" ).replace( "RGB", "" )
channelName = bareLabel
if label.startswith( "custom" ):
if channelName in usedNames:
suffix = 2
while True:
channelName = bareLabel + str( suffix )
if not channelName in usedNames:
break
suffix += 1
categories["Custom"].append( ( label, channelName, defaultData ) )
else:
if channelName in usedNames:
continue
categories["Standard"].append( ( label, channelName, defaultData ) )
for category in [ "Standard", "Custom", "Advanced" ]:
for ( menuLabel, channelName, defaultData ) in categories[category]:
result.append(
"/" + category + "/" + menuLabel,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), channelName, defaultData ),
}
)
return result
def __addPlug( self, name, defaultData ) :
alphaValue = None
if isinstance( defaultData, IECore.Color4fData ):
alphaValue = Gaffer.FloatPlug( "value", Gaffer.Plug.Direction.In, defaultData.value.a )
defaultData = IECore.Color3fData( imath.Color3f( defaultData.value.r, defaultData.value.g, defaultData.value.b ) )
if defaultData == None:
plugName = "closure"
name = ""
valuePlug = GafferOSL.ClosurePlug( "value" )
else:
plugName = "channel"
valuePlug = Gaffer.PlugAlgo.createPlugFromData( "value", Gaffer.Plug.Direction.In, Gaffer.Plug.Flags.Default, defaultData )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( Gaffer.NameValuePlug( name, valuePlug, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
if alphaValue:
self.getPlug().addChild(
Gaffer.NameValuePlug( name + ".A" if name else "A", alphaValue, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
)
def __channelLabelFromPlug( plug ):
if plug.typeId() == GafferOSL.ClosurePlug.staticTypeId():
return plug.parent().getName()
elif plug.typeId() == Gaffer.Color3fPlug.staticTypeId() and plug.parent()["name"].getValue() == "":
return "[RGB]"
else:
return plug.parent()["name"].getValue()
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferOSL.OSLImage,
"description",
"""
Executes OSL shaders to perform image processing. Use the shaders from
the OSL/ImageProcessing menu to read values from the input image and
then write values back to it.
""",
"plugAdderOptions", IECore.CompoundData( _channelNamesOptions ),
"layout:activator:defaultFormatActive", lambda node : not node["in"].getInput(),
plugs = {
"defaultFormat" : [
"description",
"""
The resolution and aspect ratio to output when there is no input image provided.
""",
"layout:activator", "defaultFormatActive",
],
"channels" : [
"description",
"""
Define image channels to output by adding child plugs and connecting
corresponding OSL shaders. You can drive RGB layers with a color,
or connect individual channels to a float.
If you want to add multiple channels at once, you can also add a closure plug,
which can accept a connection from an OSLCode with a combined output closure.
""",
"layout:customWidget:footer:widgetType", "GafferOSLUI.OSLImageUI._ChannelsFooter",
"layout:customWidget:footer:index", -1,
"nodule:type", "GafferUI::CompoundNodule",
"noduleLayout:section", "left",
"noduleLayout:spacing", 0.2,
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
# Add + button for showing and hiding parameters in the GraphEditor
"noduleLayout:customGadget:addButton:gadgetType", "GafferOSLUI.OSLImageUI.PlugAdder",
],
"channels.*" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"noduleLayout:section", "left",
"nodule:type", "GafferUI::CompoundNodule",
"nameValuePlugPlugValueWidget:ignoreNamePlug", lambda plug : isinstance( plug["value"], GafferOSL.ClosurePlug ),
],
"channels.*.name" : [
"nodule:type", "",
"stringPlugValueWidget:placeholderText", lambda plug : "[RGB]" if isinstance( plug.parent()["value"], Gaffer.Color3fPlug ) else None,
],
"channels.*.enabled" : [
"nodule:type", "",
],
"channels.*.value" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"noduleLayout:section", "left",
"nodule:type", "GafferUI::StandardNodule",
"noduleLayout:label", __channelLabelFromPlug,
"ui:visibleDimensions", lambda plug : 2 if hasattr( plug, "interpretation" ) and plug.interpretation() == IECore.GeometricData.Interpretation.UV else None,
],
}
)
| lucienfostier/gaffer | python/GafferOSLUI/OSLImageUI.py | Python | bsd-3-clause | 9,411 |
import django.db.models.deletion
import oauthlib.common
from django.db import migrations, models
def move_existing_token(apps, schema_editor):
ServiceAccount = apps.get_model("account", "ServiceAccount")
for service_account in ServiceAccount.objects.iterator():
service_account.tokens.create(
name="Default", auth_token=service_account.auth_token
)
class Migration(migrations.Migration):
dependencies = [("account", "0033_serviceaccount")]
operations = [
migrations.CreateModel(
name="ServiceAccountToken",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(blank=True, default="", max_length=128)),
(
"auth_token",
models.CharField(
default=oauthlib.common.generate_token,
max_length=30,
unique=True,
),
),
(
"service_account",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="tokens",
to="account.ServiceAccount",
),
),
],
),
migrations.RunPython(move_existing_token),
migrations.RemoveField(model_name="serviceaccount", name="auth_token"),
]
| maferelo/saleor | saleor/account/migrations/0034_service_account_token.py | Python | bsd-3-clause | 1,717 |
import six
from django import template
from oscar.core.loading import get_model
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import resolve, Resolver404
from oscar.apps.customer import history
from oscar.core.compat import urlparse
Site = get_model('sites', 'Site')
register = template.Library()
@register.inclusion_tag('customer/history/recently_viewed_products.html',
takes_context=True)
def recently_viewed_products(context):
"""
Inclusion tag listing the most recently viewed products
"""
request = context['request']
products = history.get(request)
return {'products': products,
'request': request}
@register.assignment_tag(takes_context=True)
def get_back_button(context):
"""
Show back button, custom title available for different urls, for
example 'Back to search results', no back button if user came from other
site
"""
request = context.get('request', None)
if not request:
raise Exception('Cannot get request from context')
referrer = request.META.get('HTTP_REFERER', None)
if not referrer:
return None
try:
url = urlparse.urlparse(referrer)
except:
return None
if request.get_host() != url.netloc:
try:
Site.objects.get(domain=url.netloc)
except Site.DoesNotExist:
# Came from somewhere else, don't show back button:
return None
try:
match = resolve(url.path)
except Resolver404:
return None
# This dict can be extended to link back to other browsing pages
titles = {
'search:search': _('Back to search results'),
}
title = titles.get(match.view_name, None)
if title is None:
return None
return {'url': referrer, 'title': six.text_type(title), 'match': match}
| MrReN/django-oscar | oscar/templatetags/history_tags.py | Python | bsd-3-clause | 1,884 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class DevToolsFrontend(config_util.Config):
"""Basic Config class for DevTools frontend."""
@staticmethod
def fetch_spec(props):
url = 'https://chromium.googlesource.com/devtools/devtools-frontend.git'
solution = {
'name' : 'devtools-frontend',
'url' : url,
'deps_file' : 'DEPS',
'managed' : False,
'custom_deps' : {},
}
spec = {
'solutions': [solution],
'with_branch_heads': True,
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'devtools-frontend'
def main(argv=None):
return DevToolsFrontend().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| endlessm/chromium-browser | third_party/depot_tools/fetch_configs/devtools-frontend.py | Python | bsd-3-clause | 1,092 |
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if this Binary tree is Balanced, or false.
"""
def isBalanced(self, root):
# write your code here
isbalanced, h = self.isBalancedandHeight(root)
return isbalanced
def isBalancedandHeight(self, root):
if root is None:
return True, 0
l, r = root.left, root.right
l_balanced, l_h = self.isBalancedandHeight(l)
if not l_balanced:
return False, 0
r_balanced, r_h = self.isBalancedandHeight(r)
if not r_balanced:
return False, 0
if abs(l_h - r_h) < 2:
return True, max(l_h, r_h) + 1
return False, 0
| Chasego/codirit | jiuzhang/Nine Chapters/3 Binary Tree & Divide Conquer/py/BalancedBinaryTree_rec.py | Python | mit | 873 |
# -*- coding: utf-8 -*-
# -- ==alkane_monolayer== --
import mbuild as mb
from mbuild.lib.surfaces import Betacristobalite
from mbuild.lib.atoms import H
from mbuild.examples.alkane_monolayer.alkylsilane import AlkylSilane
class AlkaneMonolayer(mb.Monolayer):
"""An akylsilane monolayer on beta-cristobalite. """
def __init__(self, pattern, tile_x=1, tile_y=1, chain_length=10):
"""Create an alkylsilane monolayer on beta-cristobalite.
Parameters
----------
pattern : np.ndarray, shape=(n, 3), optional, default=None
An array of planar binding locations. If not provided, the entire
surface will be filled with `chain`.
tile_x : int, optional, default=1
Number of times to replicate substrate in x-direction.
tile_y : int, optional, default=1
Number of times to replicate substrate in y-direction.
chain_length : int, optional, default=10
Number of carbon atoms per chain.
"""
surface = Betacristobalite()
alkylsilane = AlkylSilane(chain_length)
hydrogen = H()
super(AlkaneMonolayer, self).__init__(surface, alkylsilane, backfill=hydrogen,
pattern=pattern, tile_x=tile_x,
tile_y=tile_y)
# -- ==alkane_monolayer== -- | ctk3b/mbuild | mbuild/examples/alkane_monolayer/alkane_monolayer.py | Python | mit | 1,378 |
from app import celery
from flask import current_app as app
from datetime import timedelta
from celery.decorators import periodic_task
from flask import jsonify, request, abort
import requests
import json
@periodic_task(run_every=(timedelta(seconds=1)))
def ping():
print "ping!"
headers = {'content-type': 'application/json'}
response = requests.post("http://localhost:9000" + "/send_ping", headers=headers, data=json.dumps({}))
r = json.loads(response.text)
if r['success'] is True:
print r['server_views']
| darksigma/traceless | slave/app/async/routes.py | Python | mit | 537 |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'whatify.views.index'),
url(r'^search/(.+)$', 'whatify.views.search'),
url(r'^torrent_groups/(\d+)$', 'whatify.views.get_torrent_group'),
url(r'^torrent_groups/(\d+)/download$', 'whatify.views.download_torrent_group'),
url(r'^torrent_groups/random$', 'whatify.views.random_torrent_groups'),
url(r'^torrent_groups/top10$', 'whatify.views.top10_torrent_groups'),
url(r'^artists/(\d+)$', 'whatify.views.get_artist'),
)
| grandmasterchef/WhatManager2 | whatify/urls.py | Python | mit | 531 |
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode.
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "PNG"
#Set height, Width and quality of the image
imageHeight = 1.0;
imageWidth = 1.0;
imageQuality = "default";
try:
#invoke Aspose.BarCode Cloud SDK API to generate image with specific height, width, and quality along with auto size option
response = barcodeApi.PutBarcodeGenerateFile(name, file= None, text=text, type=type, format=format, imageHeight=imageHeight, imageWidth=imageWidth, imageQuality=imageQuality)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1 | farooqsheikhpk/Aspose.BarCode-for-Cloud | Examples/Python/generating-saving/cloud-storage/set-barcode-image-height-width-quality-settings.py | Python | mit | 2,140 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines Entry classes for containing experimental data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jun 27, 2012"
from pymatgen.analysis.phase_diagram import PDEntry
from pymatgen.core.composition import Composition
from monty.json import MSONable
from pymatgen.analysis.thermochemistry import ThermoData
class ExpEntry(PDEntry, MSONable):
"""
An lightweight ExpEntry object containing experimental data for a
composition for many purposes. Extends a PDEntry so that it can be used for
phase diagram generation and reaction calculation.
Current version works only with solid phases and at 298K. Further
extensions for temperature dependence are planned.
"""
def __init__(self, composition, thermodata, temperature=298):
"""
Args:
composition: Composition of the entry. For flexibility, this can take
the form of all the typical input taken by a Composition, including
a {symbol: amt} dict, a string formula, and others.
thermodata: A sequence of ThermoData associated with the entry.
temperature: A temperature for the entry in Kelvin. Defaults to 298K.
"""
comp = Composition(composition)
self._thermodata = thermodata
found = False
enthalpy = float("inf")
for data in self._thermodata:
if data.type == "fH" and data.value < enthalpy and \
(data.phaseinfo != "gas" and data.phaseinfo != "liquid"):
enthalpy = data.value
found = True
if not found:
raise ValueError("List of Thermodata does not contain enthalpy "
"values.")
self.temperature = temperature
super().__init__(comp, enthalpy)
def __repr__(self):
return "ExpEntry {}, Energy = {:.4f}".format(self.composition.formula,
self.energy)
def __str__(self):
return self.__repr__()
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: ExpEntry
"""
thermodata = [ThermoData.from_dict(td) for td in d["thermodata"]]
return cls(d["composition"], thermodata, d["temperature"])
def as_dict(self):
"""
:return: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"thermodata": [td.as_dict() for td in self._thermodata],
"composition": self.composition.as_dict(),
"temperature": self.temperature}
| mbkumar/pymatgen | pymatgen/entries/exp_entries.py | Python | mit | 2,923 |
#!/usr/bin/env python3
# Copyright (C) 2010-2011 Marcin Kościelnicki <[email protected]>
# Copyright (C) 2010 Luca Barbieri <[email protected]>
# Copyright (C) 2010 Marcin Slusarz <[email protected]>
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import rnn
import sys
startcol = 64
fouts = {}
def printdef(name, val, file):
fout = fouts[file]
fout.write("#define {}{} {}\n".format(name, " " * (startcol - len(name)), val))
def printvalue(val, shift):
if val.varinfo.dead:
return
if val.value is not None:
printdef(val.fullname, hex(val.value << shift), val.file)
def printtypeinfo(ti, prefix, shift, file):
if isinstance(ti, rnn.TypeHex) or isinstance(ti, rnn.TypeInt):
if ti.shr:
printdef (prefix + "__SHR", str(ti.shr), file)
if ti.min is not None:
printdef (prefix + "__MIN", hex(ti.min), file)
if ti.max is not None:
printdef (prefix + "__MAX", hex(ti.max), file)
if ti.align is not None:
printdef (prefix + "__ALIGN", hex(ti.align), file)
if isinstance(ti, rnn.TypeFixed):
if ti.min is not None:
printdef (prefix + "__MIN", hex(ti.min), file)
if ti.max is not None:
printdef (prefix + "__MAX", hex(ti.max), file)
if ti.radix is not None:
printdef (prefix + "__RADIX", str(ti.radix), file)
if isinstance(ti, rnn.Enum) and ti.inline:
for val in ti.vals:
printvalue(val, shift)
if isinstance(ti, rnn.Bitset) and ti.inline:
for bitfield in ti.bitfields:
printbitfield(bitfield, shift)
def printbitfield(bf, shift):
if bf.varinfo.dead:
return
if isinstance(bf.typeinfo, rnn.TypeBoolean):
printdef(bf.fullname, hex(bf.mask << shift), bf.file)
else:
printdef(bf.fullname + "__MASK", hex(bf.mask << shift), bf.file)
printdef(bf.fullname + "__SHIFT", str(bf.low + shift), bf.file)
printtypeinfo(bf.typeinfo, bf.fullname, bf.low + shift, bf.file)
def printdelem(elem, offset, strides):
if elem.varinfo.dead:
return
if elem.length != 1:
strides = strides + [elem.stride]
offset = offset + elem.offset
if elem.name:
if strides:
name = elem.fullname + '(' + ", ".join("i{}".format(i) for i in range(len(strides))) + ')'
val = '(' + hex(offset) + "".join(" + {:x} * i{}".format(stride, i) for i, stride in enumerate(strides)) + ')'
printdef(name, val, elem.file)
else:
printdef(elem.fullname, hex(offset), elem.file)
if elem.stride:
printdef(elem.fullname +"__ESIZE", hex(elem.stride), elem.file)
if elem.length != 1:
printdef(elem.fullname + "__LEN", hex(elem.length), elem.file)
if isinstance(elem, rnn.Reg):
printtypeinfo(elem.typeinfo, elem.fullname, 0, elem.file)
fouts[elem.file].write("\n")
if isinstance(elem, rnn.Stripe):
for subelem in elem.elems:
printdelem(subelem, offset, strides)
def print_file_info(fout, file):
#struct stat sb;
#struct tm tm;
#stat(file, &sb);
#gmtime_r(&sb.st_mtime, &tm);
#char timestr[64];
#strftime(timestr, sizeof(timestr), "%Y-%m-%d %H:%M:%S", tm);
#fprintf(dst, "(%7Lu bytes, from %s)\n", (unsigned long long)sb->st_size, timestr);
fout.write("\n")
def printhead(file, db):
fout = fouts[file]
fout.write("#ifndef {}\n".format(guard(file)))
fout.write("#define {}\n".format(guard(file)))
fout.write("\n")
fout.write(
"/* Autogenerated file, DO NOT EDIT manually!\n"
"\n"
"This file was generated by the rules-ng-ng headergen tool in this git repository:\n"
"http://github.com/envytools/envytools/\n"
"git clone https://github.com/envytools/envytools.git\n"
"\n"
"The rules-ng-ng source files this header was generated from are:\n")
#struct stat sb;
#struct tm tm;
#stat(f.name, &sb);
#gmtime_r(&sb.st_mtime, &tm);
maxlen = max(len(file) for file in db.files)
for file in db.files:
fout.write("- {} ".format(file + " " * (maxlen - len(file))))
print_file_info(fout, file)
fout.write(
"\n"
"Copyright (C) ")
#if(db->copyright.firstyear && db->copyright.firstyear < (1900 + tm.tm_year))
# fout.write("%u-", db->copyright.firstyear);
#fout.write("%u", 1900 + tm.tm_year);
if db.copyright.authors:
fout.write(" by the following authors:")
for author in db.copyright.authors:
fout.write("\n- ")
if author.name:
fout.write(author.name)
if author.email:
fout.write(" <{}>".format(author.email))
if author.nicknames:
fout.write(" ({})".format(", ".join(author.nicknames)))
fout.write("\n")
if db.copyright.license:
fout.write("\n{}\n".format(db.copyright.license))
fout.write("*/\n\n\n")
def guard(file):
return ''.join(c.upper() if c.isalnum() else '_' for c in file)
def process(mainfile):
db = rnn.Database()
rnn.parsefile(db, mainfile)
db.prep()
for file in db.files:
fouts[file] = open(file.replace('/', '_') + '.h', 'w')
printhead(file, db)
for enum in db.enums:
if not enum.inline:
for val in enum.vals:
printvalue(val, 0)
for bitset in db.bitsets:
if not bitset.inline:
for bitfield in bitset.bitfields:
printbitfield(bitfield, 0)
for domain in db.domains:
if domain.size:
printdef(domain.fullname + "__SIZE", hex(domain.size), domain.file)
for elem in domain.elems:
printdelem(elem, 0, [])
for file in fouts:
fouts[file].write("\n#endif /* {} */\n".format(guard(file)))
fouts[file].close()
return db.estatus
if len(sys.argv) < 2:
sys.stdout.write ("Usage:\n"
"\theadergen file.xml\n"
)
sys.exit(2)
sys.exit(process(sys.argv[1]))
| hakzsam/envytools | rnn/headergen.py | Python | mit | 7,156 |
id_mappings = {
"EX1_097": "Abomination",
"CS2_188": "Abusive Sergeant",
"EX1_007": "Acolyte of Pain",
"NEW1_010": "Al'Akir the Windlord",
"EX1_006": "Alarm-o-Bot",
"EX1_382": "Aldor Peacekeeper",
"EX1_561": "Alexstrasza",
"EX1_393": "Amani Berserker",
"CS2_038": "Ancestral Spirit",
"EX1_057": "Ancient Brewmaster",
"EX1_584": "Ancient Mage",
"NEW1_008b": "Ancient Secrets",
"NEW1_008a": "Ancient Teachings",
"EX1_045": "Ancient Watcher",
"NEW1_008": "Ancient of Lore",
"EX1_178": "Ancient of War",
"EX1_009": "Angry Chicken",
"EX1_398": "Arathi Weaponsmith",
"EX1_089": "Arcane Golem",
"EX1_559": "Archmage Antonidas",
"EX1_067": "Argent Commander",
"EX1_362": "Argent Protector",
"EX1_008": "Argent Squire",
"EX1_402": "Armorsmith",
"EX1_383t": "Ashbringer",
"EX1_591": "Auchenai Soulpriest",
"EX1_384": "Avenging Wrath",
"EX1_284": "Azure Drake",
"EX1_110t": "Baine Bloodhoof",
"EX1_014t": "Bananas",
"EX1_320": "Bane of Doom",
"EX1_249": "Baron Geddon",
"EX1_398t": "Battle Axe",
"EX1_392": "Battle Rage",
"EX1_165b": "Bear Form",
"EX1_549": "Bestial Wrath",
"EX1_126": "Betrayal",
"EX1_005": "Big Game Hunter",
"EX1_570": "Bite",
"CS2_233": "Blade Flurry",
"EX1_355": "Blessed Champion",
"EX1_363": "Blessing of Wisdom",
"CS2_028": "Blizzard",
"EX1_323w": "Blood Fury",
"CS2_059": "Blood Imp",
"EX1_590": "Blood Knight",
"EX1_012": "Bloodmage Thalnos",
"NEW1_025": "Bloodsail Corsair",
"NEW1_018": "Bloodsail Raider",
"EX1_407": "Brawl",
"EX1_091": "Cabal Shadow Priest",
"EX1_110": "Cairne Bloodhoof",
"NEW1_024": "Captain Greenskin",
"EX1_165a": "Cat Form",
"EX1_573": "Cenarius",
"EX1_621": "Circle of Healing",
"CS2_073": "Cold Blood",
"EX1_050": "Coldlight Oracle",
"EX1_103": "Coldlight Seer",
"NEW1_036": "Commanding Shout",
"EX1_128": "Conceal",
"EX1_275": "Cone of Cold",
"EX1_287": "Counterspell",
"EX1_059": "Crazed Alchemist",
"EX1_603": "Cruel Taskmaster",
"EX1_595": "Cult Master",
"skele21": "Damaged Golem",
"EX1_046": "Dark Iron Dwarf",
"EX1_617": "Deadly Shot",
"NEW1_030": "Deathwing",
"EX1_130a": "Defender",
"EX1_093": "Defender of Argus",
"EX1_131t": "Defias Bandit",
"EX1_131": "Defias Ringleader",
"EX1_573a": "Demigod's Favor",
"EX1_102": "Demolisher",
"EX1_596": "Demonfire",
"EX1_tk29": "Devilsaur",
"EX1_162": "Dire Wolf Alpha",
"EX1_166b": "Dispel",
"EX1_349": "Divine Favor",
"EX1_310": "Doomguard",
"EX1_567": "Doomhammer",
"NEW1_021": "Doomsayer",
"NEW1_022": "Dread Corsair",
"DREAM_04": "Dream",
"EX1_165t2": "Druid of the Claw (bear)",
"EX1_165": "Druid of the Claw",
"EX1_165t1": "Druid of the Claw (cat)",
"EX1_243": "Dust Devil",
"EX1_536": "Eaglehorn Bow",
"EX1_250": "Earth Elemental",
"EX1_245": "Earth Shock",
"CS2_117": "Earthen Ring Farseer",
"EX1_613": "Edwin VanCleef",
"DREAM_03": "Emerald Drake",
"EX1_170": "Emperor Cobra",
"EX1_619": "Equality",
"EX1_274": "Ethereal Arcanist",
"EX1_124": "Eviscerate",
"EX1_537": "Explosive Shot",
"EX1_610": "Explosive Trap",
"EX1_132": "Eye for an Eye",
"EX1_564": "Faceless Manipulator",
"NEW1_023": "Faerie Dragon",
"CS2_053": "Far Sight",
"EX1_301": "Felguard",
"CS1_069": "Fen Creeper",
"EX1_248": "Feral Spirit",
"EX1_finkle": "Finkle Einhorn",
"EX1_319": "Flame Imp",
"EX1_614t": "Flame of Azzinoth",
"EX1_544": "Flare",
"tt_004": "Flesheating Ghoul",
"EX1_571": "Force of Nature",
"EX1_251": "Forked Lightning",
"EX1_611": "Freezing Trap",
"EX1_283": "Frost Elemental",
"EX1_604": "Frothing Berserker",
"EX1_095": "Gadgetzan Auctioneer",
"DS1_188": "Gladiator's Longbow",
"NEW1_040t": "Gnoll",
"EX1_411": "Gorehowl",
"EX1_414": "Grommash Hellscream",
"NEW1_038": "Gruul",
"EX1_558": "Harrison Jones",
"EX1_556": "Harvest Golem",
"EX1_137": "Headcrack",
"EX1_409t": "Heavy Axe",
"NEW1_040": "Hogger",
"EX1_624": "Holy Fire",
"EX1_365": "Holy Wrath",
"EX1_538t": "Hound",
"NEW1_017": "Hungry Crab",
"EX1_534t": "Hyena",
"EX1_289": "Ice Barrier",
"EX1_295": "Ice Block",
"CS2_031": "Ice Lance",
"EX1_614": "Illidan Stormrage",
"EX1_598": "Imp",
"EX1_597": "Imp Master",
"EX1_tk34": "Infernal",
"CS2_181": "Injured Blademaster",
"CS1_129": "Inner Fire",
"EX1_607": "Inner Rage",
"CS2_203": "Ironbeak Owl",
"EX1_017": "Jungle Panther",
"EX1_166": "Keeper of the Grove",
"NEW1_005": "Kidnapper",
"EX1_543": "King Krush",
"EX1_014": "King Mukla",
"EX1_612": "Kirin Tor Mage",
"NEW1_019": "Knife Juggler",
"DREAM_01": "Laughing Sister",
"EX1_241": "Lava Burst",
"EX1_354": "Lay on Hands",
"EX1_160b": "Leader of the Pack",
"EX1_116": "Leeroy Jenkins",
"EX1_029": "Leper Gnome",
"EX1_238": "Lightning Bolt",
"EX1_259": "Lightning Storm",
"EX1_335": "Lightspawn",
"EX1_001": "Lightwarden",
"EX1_341": "Lightwell",
"EX1_096": "Loot Hoarder",
"EX1_323": "Lord Jaraxxus",
"EX1_100": "Lorewalker Cho",
"EX1_082": "Mad Bomber",
"EX1_563": "Malygos",
"EX1_055": "Mana Addict",
"EX1_575": "Mana Tide Totem",
"EX1_616": "Mana Wraith",
"NEW1_012": "Mana Wyrm",
"EX1_155": "Mark of Nature",
"EX1_155b": "Mark of Nature",
"EX1_155a": "Mark of Nature",
"EX1_626": "Mass Dispel",
"NEW1_037": "Master Swordsmith",
"NEW1_014": "Master of Disguise",
"NEW1_029": "Millhouse Manastorm",
"EX1_085": "Mind Control Tech",
"EX1_345": "Mindgames",
"EX1_294": "Mirror Entity",
"EX1_533": "Misdirection",
"EX1_396": "Mogu'shan Warden",
"EX1_620": "Molten Giant",
"EX1_166a": "Moonfire",
"EX1_408": "Mortal Strike",
"EX1_105": "Mountain Giant",
"EX1_509": "Murloc Tidecaller",
"EX1_507": "Murloc Warleader",
"EX1_557": "Nat Pagle",
"EX1_161": "Naturalize",
"DREAM_05": "Nightmare",
"EX1_130": "Noble Sacrifice",
"EX1_164b": "Nourish",
"EX1_164a": "Nourish",
"EX1_164": "Nourish",
"EX1_560": "Nozdormu",
"EX1_562": "Onyxia",
"EX1_160t": "Panther",
"EX1_522": "Patient Assassin",
"EX1_133": "Perdition's Blade",
"EX1_076": "Pint-Sized Summoner",
"EX1_313": "Pit Lord",
"EX1_316": "Power Overwhelming",
"EX1_160": "Power of the Wild",
"EX1_145": "Preparation",
"EX1_583": "Priestess of Elune",
"EX1_350": "Prophet Velen",
"EX1_279": "Pyroblast",
"EX1_044": "Questing Adventurer",
"EX1_412": "Raging Worgen",
"EX1_298": "Ragnaros the Firelord",
"CS2_104": "Rampage",
"CS2_161": "Ravenholdt Assassin",
"EX1_136": "Redemption",
"EX1_379": "Repentance",
"EX1_178a": "Rooted",
"EX1_134": "SI:7 Agent",
"EX1_578": "Savagery",
"EX1_534": "Savannah Highmane",
"EX1_020": "Scarlet Crusader",
"EX1_531": "Scavenging Hyena",
"EX1_586": "Sea Giant",
"EX1_080": "Secretkeeper",
"EX1_317": "Sense Demons",
"EX1_334": "Shadow Madness",
"EX1_345t": "Shadow of Nothing",
"EX1_303": "Shadowflame",
"EX1_625": "Shadowform",
"EX1_144": "Shadowstep",
"EX1_573b": "Shan'do's Lesson",
"EX1_410": "Shield Slam",
"EX1_405": "Shieldbearer",
"EX1_332": "Silence",
"CS2_151": "Silver Hand Knight",
"EX1_023": "Silvermoon Guardian",
"EX1_309": "Siphon Soul",
"EX1_391": "Slam",
"EX1_554t": "Snake",
"EX1_554": "Snake Trap",
"EX1_609": "Snipe",
"EX1_608": "Sorcerer's Apprentice",
"EX1_158": "Soul of the Forest",
"NEW1_027": "Southsea Captain",
"CS2_146": "Southsea Deckhand",
"tt_010a": "Spellbender (minion)",
"tt_010": "Spellbender",
"EX1_048": "Spellbreaker",
"EX1_tk11": "Spirit Wolf",
"CS2_221": "Spiteful Smith",
"CS2_152": "Squire",
"EX1_tk28": "Squirrel",
"NEW1_041": "Stampeding Kodo",
"NEW1_007a": "Starfall",
"NEW1_007b": "Starfall",
"NEW1_007": "Starfall",
"EX1_247": "Stormforged Axe",
"EX1_028": "Stranglethorn Tiger",
"EX1_160a": "Summon a Panther",
"EX1_315": "Summoning Portal",
"EX1_058": "Sunfury Protector",
"EX1_032": "Sunwalker",
"EX1_366": "Sword of Justice",
"EX1_016": "Sylvanas Windrunner",
"EX1_390": "Tauren Warrior",
"EX1_623": "Temple Enforcer",
"EX1_577": "The Beast",
"EX1_002": "The Black Knight",
"EX1_339": "Thoughtsteal",
"EX1_021": "Thrallmar Farseer",
"EX1_083": "Tinkmaster Overspark",
"EX1_383": "Tirion Fordring",
"EX1_tk9": "Treant (charge)",
"EX1_573t": "Treant (taunt)",
"EX1_158t": "Treant",
"EX1_043": "Twilight Drake",
"EX1_312": "Twisting Nether",
"EX1_258": "Unbound Elemental",
"EX1_538": "Unleash the Hounds",
"EX1_409": "Upgrade!",
"EX1_178b": "Uproot",
"EX1_594": "Vaporize",
"CS2_227": "Venture Co. Mercenary",
"NEW1_026t": "Violet Apprentice",
"NEW1_026": "Violet Teacher",
"EX1_304": "Void Terror",
"ds1_whelptoken": "Whelp",
"EX1_116t": "Whelp",
"NEW1_020": "Wild Pyromancer",
"EX1_033": "Windfury Harpy",
"CS2_231": "Wisp",
"EX1_010": "Worgen Infiltrator",
"EX1_317t": "Worthless Imp",
"EX1_154b": "Wrath",
"EX1_154a": "Wrath",
"EX1_154": "Wrath",
"CS2_169": "Young Dragonhawk",
"EX1_004": "Young Priestess",
"EX1_049": "Youthful Brewmaster",
"EX1_572": "Ysera",
"DREAM_02": "Ysera Awakens",
"EX1_066": "Acidic Swamp Ooze",
"CS2_041": "Ancestral Healing",
"NEW1_031": "Animal Companion",
"CS2_025": "Arcane Explosion",
"CS2_023": "Arcane Intellect",
"EX1_277": "Arcane Missiles",
"DS1_185": "Arcane Shot",
"CS2_112": "Arcanite Reaper",
"CS2_155": "Archmage",
"CS2_080": "Assassin's Blade",
"CS2_076": "Assassinate",
"GAME_002": "Avatar of the Coin",
"CS2_072": "Backstab",
"CS2_092": "Blessing of Kings",
"CS2_087": "Blessing of Might",
"CS2_172": "Bloodfen Raptor",
"CS2_046": "Bloodlust",
"CS2_173": "Bluegill Warrior",
"CS2_boar": "Boar",
"CS2_187": "Booty Bay Bodyguard",
"CS2_200": "Boulderfist Ogre",
"CS2_103": "Charge",
"CS2_182": "Chillwind Yeti",
"CS2_005": "Claw",
"CS2_114": "Cleave",
"CS2_093": "Consecration",
"CS2_201": "Core Hound",
"CS2_063": "Corruption",
"EX1_582": "Dalaran Mage",
"DS1_055": "Darkscale Healer",
"CS2_074": "Deadly Poison",
"CS2_236": "Divine Spirit",
"EX1_025": "Dragonling Mechanic",
"CS2_061": "Drain Life",
"CS2_064": "Dread Infernal",
"CS2_189": "Elven Archer",
"CS2_013t": "Excess Mana",
"CS2_108": "Execute",
"EX1_129": "Fan of Knives",
"CS2_106": "Fiery War Axe",
"CS2_042": "Fire Elemental",
"CS2_029": "Fireball",
"CS2_032": "Flamestrike",
"EX1_565": "Flametongue Totem",
"hexfrog": "Frog",
"CS2_026": "Frost Nova",
"CS2_037": "Frost Shock",
"CS2_024": "Frostbolt",
"CS2_121": "Frostwolf Grunt",
"CS2_226": "Frostwolf Warlord",
"CS2_147": "Gnomish Inventor",
"CS1_042": "Goldshire Footman",
"EX1_508": "Grimscale Oracle",
"CS2_088": "Guardian of Kings",
"EX1_399": "Gurubashi Berserker",
"CS2_094": "Hammer of Wrath",
"EX1_371": "Hand of Protection",
"NEW1_009": "Healing Totem",
"CS2_007": "Healing Touch",
"CS2_062": "Hellfire",
"CS2_105": "Heroic Strike",
"EX1_246": "Hex",
"CS2_089": "Holy Light",
"CS1_112": "Holy Nova",
"CS1_130": "Holy Smite",
"DS1_070": "Houndmaster",
"NEW1_034": "Huffer",
"EX1_360": "Humility",
"CS2_084": "Hunter's Mark",
"EX1_169": "Innervate",
"CS2_232": "Ironbark Protector",
"CS2_141": "Ironforge Rifleman",
"CS2_125": "Ironfur Grizzly",
"EX1_539": "Kill Command",
"CS2_142": "Kobold Geomancer",
"NEW1_011": "Kor'kron Elite",
"NEW1_033": "Leokk",
"CS2_091": "Light's Justice",
"CS2_162": "Lord of the Arena",
"CS2_118": "Magma Rager",
"CS2_009": "Mark of the Wild",
"EX1_025t": "Mechanical Dragonling",
"DS1_233": "Mind Blast",
"CS1_113": "Mind Control",
"CS2_003": "Mind Vision",
"CS2_mirror": "Mirror Image (minion)",
"CS2_027": "Mirror Image",
"NEW1_032": "Misha",
"CS2_008": "Moonfire",
"EX1_302": "Mortal Coil",
"DS1_183": "Multi-Shot",
"CS2_168": "Murloc Raider",
"EX1_506a": "Murloc Scout",
"EX1_506": "Murloc Tidehunter",
"GAME_006": "NOOOOOOOOOOOO",
"EX1_593": "Nightblade",
"CS2_235": "Northshire Cleric",
"EX1_015": "Novice Engineer",
"CS2_119": "Oasis Snapjaw",
"CS2_197": "Ogre Magi",
"CS2_022": "Polymorph",
"CS2_004": "Power Word: Shield",
"CS2_122": "Raid Leader",
"CS2_196": "Razorfen Hunter",
"CS2_213": "Reckless Rocketeer",
"CS2_120": "River Crocolisk",
"CS2_045": "Rockbiter Weapon",
"NEW1_003": "Sacrificial Pact",
"EX1_581": "Sap",
"CS2_011": "Savage Roar",
"CS2_050": "Searing Totem",
"CS2_179": "Sen'jin Shieldmasta",
"CS2_057": "Shadow Bolt",
"EX1_622": "Shadow Word: Death",
"CS2_234": "Shadow Word: Pain",
"EX1_019": "Shattered Sun Cleric",
"CS2_tk1": "Sheep",
"EX1_606": "Shield Block",
"EX1_278": "Shiv",
"CS2_101t": "Silver Hand Recruit",
"CS2_127": "Silverback Patriarch",
"CS2_075": "Sinister Strike",
"skele11": "Skeleton",
"EX1_308": "Soulfire",
"CS2_077": "Sprint",
"EX1_173": "Starfire",
"CS2_237": "Starving Buzzard",
"CS2_051": "Stoneclaw Totem",
"CS2_171": "Stonetusk Boar",
"CS2_150": "Stormpike Commando",
"CS2_222": "Stormwind Champion",
"CS2_131": "Stormwind Knight",
"EX1_306": "Succubus",
"CS2_012": "Swipe",
"GAME_005": "The Coin",
"DS1_175": "Timber Wolf",
"EX1_244": "Totemic Might",
"DS1_184": "Tracking",
"CS2_097": "Truesilver Champion",
"DS1_178": "Tundra Rhino",
"NEW1_004": "Vanish",
"CS2_065": "Voidwalker",
"EX1_011": "Voodoo Doctor",
"CS2_186": "War Golem",
"EX1_084": "Warsong Commander",
"CS2_033": "Water Elemental",
"EX1_400": "Whirlwind",
"CS2_082": "Wicked Knife",
"CS2_013": "Wild Growth",
"CS2_039": "Windfury",
"EX1_587": "Windspeaker",
"CS2_124": "Wolfrider",
"CS2_052": "Wrath of Air Totem",
"FP1_026": "Anub'ar Ambusher",
"FP1_020": "Avenge",
"FP1_031": "Baron Rivendare",
"FP1_029": "Dancing Swords",
"FP1_023": "Dark Cultist",
"FP1_021": "Death's Bite",
"NAX6_03": "Deathbloom",
"FP1_006": "Deathcharger",
"FP1_009": "Deathlord",
"FP1_018": "Duplicate",
"FP1_003": "Echoing Ooze",
"NAX12_04": "Enrage",
"NAX11_03": "Fallout Slime",
"NAX13_04H": "Feugen",
"FP1_015": "Feugen",
"NAX14_03": "Frozen Champion",
"NAX15_03t": "Guardian of Icecrown",
"NAX15_03n": "Guardian of Icecrown",
"FP1_002": "Haunted Creeper",
"NAX10_02": "Hook",
"NAX10_02H": "Hook",
"NAX12_03": "Jaws",
"NAX12_03H": "Jaws",
"FP1_013": "Kel'Thuzad",
"NAX9_02H": "Lady Blaumeux",
"NAX9_02": "Lady Blaumeux",
"FP1_030": "Loatheb",
"NAX1_05": "Locust Swarm",
"FP1_004": "Mad Scientist",
"FP1_010": "Maexxna",
"NAX9_07": "Mark of the Horsemen",
"NAX7_04H": "Massive Runeblade",
"NAX7_04": "Massive Runeblade",
"NAX7_05": "Mind Control Crystal",
"NAX5_03": "Mindpocalypse",
"NAX15_05": "Mr. Bigglesworth",
"NAX11_04": "Mutating Injection",
"NAXM_001": "Necroknight",
"NAX3_03": "Necrotic Poison",
"FP1_017": "Nerub'ar Weblord",
"NAX1h_03": "Nerubian (normal)",
"NAX1_03": "Nerubian (heroic)",
"FP1_007t": "Nerubian",
"FP1_007": "Nerubian Egg",
"NAX4_05": "Plague",
"FP1_019": "Poison Seeds",
"NAX14_04": "Pure Cold",
"FP1_025": "Reincarnate",
"NAX9_05H": "Runeblade",
"NAX9_05": "Runeblade",
"FP1_005": "Shade of Naxxramas",
"NAX9_04": "Sir Zeliek",
"NAX9_04H": "Sir Zeliek",
"NAXM_002": "Skeletal Smith",
"NAX4_03H": "Skeleton",
"NAX4_03": "Skeleton",
"FP1_012t": "Slime",
"FP1_012": "Sludge Belcher",
"FP1_008": "Spectral Knight",
"NAX8_05t": "Spectral Rider",
"FP1_002t": "Spectral Spider",
"NAX8_03t": "Spectral Trainee",
"NAX8_04t": "Spectral Warrior",
"NAX6_03t": "Spore",
"NAX6_04": "Sporeburst",
"NAX13_05H": "Stalagg",
"FP1_014": "Stalagg",
"FP1_027": "Stoneskin Gargoyle",
"NAX13_03": "Supercharge",
"FP1_014t": "Thaddius",
"NAX9_03H": "Thane Korth'azz",
"NAX9_03": "Thane Korth'azz",
"FP1_019t": "Treant (poison seeds)",
"NAX7_02": "Understudy",
"FP1_028": "Undertaker",
"NAX8_05": "Unrelenting Rider",
"NAX8_03": "Unrelenting Trainee",
"NAX8_04": "Unrelenting Warrior",
"FP1_024": "Unstable Ghoul",
"FP1_022": "Voidcaller",
"FP1_016": "Wailing Soul",
"FP1_011": "Webspinner",
"NAX2_05": "Worshipper",
"NAX2_05H": "Worshipper",
"FP1_001": "Zombie Chow",
"GVG_029": "Ancestor's Call",
"GVG_077": "Anima Golem",
"GVG_085": "Annoy-o-Tron",
"GVG_030": "Anodized Robo Cub",
"GVG_069": "Antique Healbot",
"GVG_091": "Arcane Nullifier X-21",
"PART_001": "Armor Plating",
"GVG_030a": "Attack Mode",
"GVG_119": "Blingtron 3000",
"GVG_063": "Bolvar Fordragon",
"GVG_099": "Bomb Lobber",
"GVG_110t": "Boom Bot",
"GVG_050": "Bouncing Blade",
"GVG_068": "Burly Rockjaw Trogg",
"GVG_056t": "Burrowing Mine",
"GVG_017": "Call Pet",
"GVG_092t": "Chicken (Gnomish Experimenter)",
"GVG_121": "Clockwork Giant",
"GVG_082": "Clockwork Gnome",
"GVG_062": "Cobalt Guardian",
"GVG_073": "Cobra Shot",
"GVG_059": "Coghammer",
"GVG_013": "Cogmaster",
"GVG_024": "Cogmaster's Wrench",
"GVG_038": "Crackle",
"GVG_052": "Crush",
"GVG_041": "Dark Wispers",
"GVG_041b": "Dark Wispers",
"GVG_041a": "Dark Wispers",
"GVG_015": "Darkbomb",
"GVG_019": "Demonheart",
"GVG_110": "Dr. Boom",
"GVG_080t": "Druid of the Fang (cobra)",
"GVG_080": "Druid of the Fang",
"GVG_066": "Dunemaul Shaman",
"GVG_005": "Echo of Medivh",
"PART_005": "Emergency Coolant",
"GVG_107": "Enhance-o Mechano",
"GVG_076": "Explosive Sheep",
"GVG_026": "Feign Death",
"GVG_020": "Fel Cannon",
"GVG_016": "Fel Reaver",
"PART_004": "Finicky Cloakfield",
"GVG_007": "Flame Leviathan",
"GVG_001": "Flamecannon",
"GVG_100": "Floating Watcher",
"GVG_084": "Flying Machine",
"GVG_113": "Foe Reaper 4000",
"GVG_079": "Force-Tank MAX",
"GVG_049": "Gahz'rilla",
"GVG_028t": "Gallywix's Coin",
"GVG_117": "Gazlowe",
"GVG_032b": "Gift of Cards",
"GVG_032a": "Gift of Mana",
"GVG_081": "Gilblin Stalker",
"GVG_043": "Glaivezooka",
"GVG_098": "Gnomeregan Infantry",
"GVG_092": "Gnomish Experimenter",
"GVG_023": "Goblin Auto-Barber",
"GVG_004": "Goblin Blastmage",
"GVG_095": "Goblin Sapper",
"GVG_032": "Grove Tender",
"GVG_120": "Hemet Nesingwary",
"GVG_104": "Hobgoblin",
"GVG_089": "Illuminator",
"GVG_045t": "Imp (warlock)",
"GVG_045": "Imp-losion",
"GVG_056": "Iron Juggernaut",
"GVG_027": "Iron Sensei",
"GVG_094": "Jeeves",
"GVG_106": "Junkbot",
"GVG_074": "Kezan Mystic",
"GVG_046": "King of Beasts",
"GVG_012": "Light of the Naaru",
"GVG_008": "Lightbomb",
"GVG_097": "Lil' Exorcist",
"GVG_071": "Lost Tallstrider",
"GVG_090": "Madder Bomber",
"GVG_021": "Mal'Ganis",
"GVG_035": "Malorne",
"GVG_034": "Mech-Bear-Cat",
"GVG_078": "Mechanical Yeti",
"GVG_006": "Mechwarper",
"GVG_116": "Mekgineer Thermaplugg",
"GVG_048": "Metaltooth Leaper",
"GVG_103": "Micro Machine",
"GVG_111": "Mimiron's Head",
"GVG_109": "Mini-Mage",
"GVG_018": "Mistress of Pain",
"GVG_112": "Mogor the Ogre",
"GVG_061": "Muster for Battle",
"GVG_042": "Neptulon",
"GVG_065": "Ogre Brute",
"GVG_088": "Ogre Ninja",
"GVG_054": "Ogre Warmaul",
"GVG_025": "One-eyed Cheat",
"GVG_096": "Piloted Shredder",
"GVG_105": "Piloted Sky Golem",
"GVG_036": "Powermace",
"GVG_064": "Puddlestomper",
"GVG_060": "Quartermaster",
"GVG_108": "Recombobulator",
"GVG_031": "Recycle",
"PART_006": "Reversing Switch",
"PART_003": "Rusty Horn",
"GVG_047": "Sabotage",
"GVG_070": "Salty Dog",
"GVG_101": "Scarlet Purifier",
"GVG_055": "Screwjank Clunker",
"GVG_057": "Seal of Light",
"GVG_009": "Shadowbomber",
"GVG_072": "Shadowboxer",
"GVG_058": "Shielded Minibot",
"GVG_053": "Shieldmaiden",
"GVG_075": "Ship's Cannon",
"GVG_011": "Shrinkmeister",
"GVG_086": "Siege Engine",
"GVG_040": "Siltfin Spiritwalker",
"GVG_114": "Sneed's Old Shredder",
"GVG_002": "Snowchugger",
"GVG_123": "Soot Spewer",
"GVG_044": "Spider Tank",
"GVG_087": "Steamwheedle Sniper",
"GVG_067": "Stonesplinter Trogg",
"GVG_030b": "Tank Mode",
"GVG_093": "Target Dummy",
"PART_002": "Time Rewinder",
"GVG_022": "Tinker's Sharpsword Oil",
"GVG_102": "Tinkertown Technician",
"GVG_115": "Toshley",
"GVG_028": "Trade Prince Gallywix",
"GVG_033": "Tree of Life",
"GVG_118": "Troggzor the Earthinator",
"GVG_003": "Unstable Portal",
"GVG_083": "Upgraded Repair Bot",
"GVG_111t": "V-07-TR-0N",
"GVG_010": "Velen's Chosen",
"GVG_039": "Vitality Totem",
"GVG_014": "Vol'jin",
"GVG_051": "Warbot",
"GVG_122": "Wee Spellstopper",
"PART_007": "Whirling Blades",
"GVG_037": "Whirling Zap-o-matic",
"NEW1_016": "Captain's Parrot",
"EX1_062": "Old Murk-Eye",
"Mekka4t": "Chicken",
"PRO_001": "Elite Tauren Chieftain",
"Mekka3": "Emboldener 3000",
"EX1_112": "Gelbin Mekkatorque",
"Mekka1": "Homing Chicken",
"PRO_001a": "I Am Murloc",
"PRO_001at": "Murloc",
"Mekka4": "Poultryizer",
"PRO_001c": "Power of the Horde",
"Mekka2": "Repair Bot",
"PRO_001b": "Rogues Do It...",
"BRM_016": "Axe Flinger",
"BRM_034": "Blackwing Corruptor",
"BRM_033": "Blackwing Technician",
"BRM_031": "Chromaggus",
"BRM_014": "Core Rager",
"BRM_008": "Dark Iron Skulker",
"BRM_005": "Demonwrath",
"BRM_018": "Dragon Consort",
"BRM_022": "Dragon Egg",
"BRM_003": "Dragon's Breath",
"BRM_020": "Dragonkin Sorcerer",
"BRM_024": "Drakonid Crusher",
"BRM_010": "Druid of the Flame",
"BRM_028": "Emperor Thaurissan",
"BRM_012": "Fireguard Destroyer",
"BRM_002": "Flamewaker",
"BRM_007": "Gang Up",
"BRM_019": "Grim Patron",
"BRM_026": "Hungry Dragon",
"BRM_006": "Imp Gang Boss",
"BRM_011": "Lava Shock",
"BRM_027": "Majordomo Executus",
"BRM_030": "Nefarian",
"BRM_013": "Quick Shot",
"BRM_029": "Rend Blackhand",
"BRM_017": "Resurrect",
"BRM_015": "Revenge",
"BRM_001": "Solemn Vigil",
"BRM_004": "Twilight Whelp",
"BRM_025": "Volcanic Drake",
"BRM_009": "Volcanic Lumberer",
}
| slaymaker1907/hearthbreaker | tests/card_tests/id_mapping.py | Python | mit | 23,324 |
# -*- coding: utf-8 -*-
## $Id: webmessage_templates.py,v 1.32 2008/03/26 23:26:23 tibor Exp $
##
## handles rendering of webmessage module
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Templates for field exporter plugin """
__revision__ = "$Id: webmessage_templates.py,v 1.32 2008/03/26 23:26:23 tibor Exp $"
import cgi
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL
from invenio.base.i18n import gettext_set_language
from invenio.utils.date import convert_datestruct_to_datetext, convert_datetext_to_dategui, convert_datestruct_to_dategui
from invenio.legacy.bibexport.fieldexporter_dblayer import Job, JobResult
class Template:
"""Templates for field exporter plugin"""
_JOBS_URL = "%s/exporter/jobs" % (CFG_SITE_URL, )
_EDIT_JOB_URL = "%s/exporter/edit_job" % (CFG_SITE_URL, )
_EDIT_QUERY_URL = "%s/exporter/edit_query" % (CFG_SITE_URL, )
_JOB_RESULTS_URL = "%s/exporter/job_results" % (CFG_SITE_URL, )
_DISPLAY_JOB_RESULT_URL = "%s/exporter/display_job_result" % (CFG_SITE_URL, )
_DOWNLOAD_JOB_RESULT_URL = "%s/exporter/download_job_result" % (CFG_SITE_URL, )
_JOB_HISTORY_URL = "%s/exporter/history" % (CFG_SITE_URL, )
def tmpl_styles(self):
"""Defines the local CSS styles used in the plugin"""
styles = """
<style type="text/css">
.label{
white-space: nowrap;
padding-right: 15px;
}
.textentry{
width: 350px;
}
table.spacedcells td{
padding-right: 20px;
white-space: nowrap;
}
table.spacedcells th{
padding-right: 20px;
text-align: left;
}
</style>
<script type="text/javascript">
<!--
function SetAllCheckBoxes(FormName, FieldName, CheckValue)
{
if(!document.forms[FormName])
return;
var objCheckBoxes = document.forms[FormName].elements[FieldName];
if(!objCheckBoxes)
return;
var countCheckBoxes = objCheckBoxes.length;
if(!countCheckBoxes)
objCheckBoxes.checked = CheckValue;
else
// set the check value for all check boxes
for(var i = 0; i < countCheckBoxes; i++)
objCheckBoxes[i].checked = CheckValue;
}
// -->
</script>
"""
return styles
def tmpl_navigation_menu(self, language = CFG_SITE_LANG):
"""Returns HTML representing navigation menu for field exporter."""
_ = gettext_set_language(language)
navigation_menu = """
<table class="headermodulebox">
<tbody><tr>
<td class="headermoduleboxbody">
<a class="header" href="%(job_verview_url)s?ln=%(language)s">%(label_job_overview)s</a>
</td>
<td class="headermoduleboxbody">
<a class="header" href="%(edit_job_url)s?ln=%(language)s">%(label_new_job)s</a>
</td>
<td class="headermoduleboxbody">
<a class="header" href="%(job_history_url)s?ln=%(language)s">%(label_job_history)s</a>
</td>
</tr></tbody></table>
""" % {"edit_job_url" : self._EDIT_JOB_URL,
"job_verview_url" : self._JOBS_URL,
"job_history_url" : self._JOB_HISTORY_URL,
"language" : language,
"label_job_overview" : _("Export Job Overview"),
"label_new_job" : _("New Export Job"),
"label_job_history" : _("Export Job History")
}
return navigation_menu
def tmpl_display_jobs(self, jobs, language = CFG_SITE_LANG):
"""
Creates page for displaying of all the jobs.
@param jobs: list of the jobs that have to be displayed
@param language: language of the page
"""
_ = gettext_set_language(language)
table_rows = ""
for current_job in jobs:
# convert last run date into text proper to be shown to the user
datetext = convert_datestruct_to_datetext(current_job.get_last_run())
last_run = convert_datetext_to_dategui(datetext, language)
# obtain text corresponding to the frequency of execution
frequency = current_job.get_frequency()
frequency_text = self._get_frequency_text(frequency)
row = """<tr>
<td><input type="checkbox" name="selected_jobs" value="%(job_id)s"></input></td>
<td><a href="%(edit_job_url)s?id=%(job_id)s&ln=%(language)s">%(name)s</a></td>
<td>%(frequency)s</td>
<td>%(last_run)s</td>
</tr>""" % self._html_escape_dictionary({
"edit_job_url" : self._EDIT_JOB_URL,
"job_id" : current_job.get_id(),
"name" : current_job.get_name(),
"frequency" : frequency_text,
"language" : language,
"last_run" : last_run
})
table_rows += row
select_all_none_row = """
<tr><td colspan="4">
<small>%s</small><br><br>
</td></tr>""" \
%(self._get_select_all_none_html("jobsForm",
"selected_jobs",
language))
table_rows += select_all_none_row
buttons_row = """<tr>
<td colspan="3">
<input type="Submit" name="run_button" value="%(label_run)s" class="formbutton">
<input type="Submit" name="delete_button" value="%(label_delete)s" class="formbutton">
</td>
<td align="right">
<input type="Submit" name="new_button" value="%(label_new)s" class="formbutton">
</td>
</tr>""" % {
"label_run" : _("Run"),
"label_delete" : _("Delete"),
"label_new" : _("New")
}
table_rows += buttons_row
body = """
<form method="post" name="jobsForm">
<table class="spacedcells">
<th></th>
<th>%(label_name)s</th>
<th>%(label_frequency)s</th>
<th>%(label_last_run)s</th>
%(table_rows)s
</table>
</form>
""" % {
"table_rows" : table_rows,
"label_name" : _("Name"),
"label_frequency" : _("Run"),
"label_last_run" : _("Last run")
}
return body
def tmpl_edit_job(self, job, language = CFG_SITE_LANG):
"""
Creates page for editing of jobs.
@param job: The job that will be edited
@param language: language of the page
"""
_ = gettext_set_language(language)
job_frequency = job.get_frequency()
frequency_select_box_html = self._create_frequency_select_box("job_frequency", job_frequency, language)
output_format_select_box_html = self._create_output_format_select_box(selected_value = job.get_output_format())
body = """
<form method="post">
<input type="Hidden" name="id" value="%(job_id)s">
<table>
<tr>
<td class = "label">%(name_label)s</td>
<td colspan="2"><input type="text" name="job_name" class="textentry" value="%(job_name)s"></td>
</tr>
<tr>
<td class = "label">%(frequency_label)s</td>
<td colspan="2">%(frequency_select_box)s</td>
</tr>
<tr>
<td class = "label">%(output_format_label)s</td>
<td colspan="2">%(output_format_select_box)s</td>
</tr>
<tr>
<td class = "label">%(start_label)s</td>
<td colspan="2"><input type="text" name="last_run" class="textentry" value="%(job_last_run)s"></td>
</tr>
<tr>
<td class = "label">%(output_directory_label)s</td>
<td colspan="2"><input type="text" name="output_directory" class="textentry" value="%(output_directory)s"></td>
</tr>
<tr>
<td></td>
<td>
<input type="Submit" name="save_button" value="%(save_label)s" class="formbutton">
<input type="Submit" name="cancel_button" value="%(cancel_label)s" class="formbutton">
</td>
<td align="right">
<input type="Submit" name="edit_queries_button" value="%(edit_queries_label)s" class="formbutton">
</td>
</tr>
</table>
</form>
""" % {
"name_label" : _("Name"),
"frequency_label" : _("Frequency"),
"output_format_label" : _("Output Format"),
"start_label" : _("Start"),
"output_directory_label" : _("Output Directory"),
"save_label" : _("Save"),
"cancel_label" : _("Cancel"),
"edit_queries_label" : _("Edit Queries"),
"job_id" : self._html_escape_content(job.get_id()),
"job_name" : self._html_escape_content(job.get_name()),
"frequency_select_box" : frequency_select_box_html,
"output_format_select_box" : output_format_select_box_html,
"job_last_run" : convert_datestruct_to_datetext(job.get_last_run()),
"output_directory" : self._html_escape_content(job.get_output_directory())
}
return body
def tmpl_display_job_queries(self, job_queries, job_id, language = CFG_SITE_LANG):
"""
Creates page for displaying of queries of a given jobs.
@param job_queries: list of JobQuery objects that have to be displayed
@param job_id: identifier of the job that own the queries
@param language: language of the page
"""
_ = gettext_set_language(language)
table_rows = ""
for current_query in job_queries:
output_fields = ", ".join(current_query.get_output_fields())
row = """<tr>
<td><input type="checkbox" name="selected_queries" value="%(query_id)s"></input></td>
<td><a href="%(edit_query_url)s?id=%(query_id)s&job_id=%(job_id)s&ln=%(language)s">%(name)s</a></td>
<td><input type="text" value="%(search_criteria)s" readonly style="border: none; width: 130px"></td>
<td><input type="text" value="%(output_fields)s" readonly style="border: none; width: 130px"></td>
<td><input type="text" value="%(comment)s" readonly style="border: none; width: 130px"></td>
</tr>""" % self._html_escape_dictionary({
"edit_query_url" : self._EDIT_QUERY_URL,
"language" : language,
"query_id" : current_query.get_id(),
"search_criteria" : current_query.get_search_criteria(),
"name" : current_query.get_name(),
"comment" : current_query.get_comment(),
"output_fields" : output_fields,
"job_id" : job_id
})
table_rows += row
select_all_none_row = """
<tr><td colspan="4">
<small>%s</small><br><br>
</td></tr>""" \
% (self._get_select_all_none_html("queriesForm",
"selected_queries",
language))
table_rows += select_all_none_row
buttons_row = """<tr>
<td colspan="4">
<input type="Submit" name="run_button" value="%(label_run)s" class="formbutton">
<input type="Submit" name="delete_button" value="%(label_delete)s" class="formbutton">
</td>
<td align="right">
<input type="Submit" name="new_button" value="%(label_new)s" class="formbutton">
</td>
</tr>""" % {
"label_run" : _("Run"),
"label_delete" : _("Delete"),
"label_new" : _("New")
}
table_rows += buttons_row
body = """
<form method="post" name="queriesForm">
<input type="Hidden" name="job_id" value="%(job_id)s">
<table class="spacedcells">
<th></th>
<th>%(label_name)s</th>
<th>%(label_search_criteria)s</th>
<th>%(label_output_fields)s</th>
<th>%(label_comment)s</th>
%(table_rows)s
</table>
</form>
""" % {
"table_rows" : table_rows,
"label_name" : _("Name"),
"label_search_criteria" : _("Query"),
"label_comment" : _("Comment"),
"label_output_fields" : _("Output Fields"),
"job_id" : self._html_escape_content(job_id)
}
return body
def tmpl_edit_query(self, query, job_id, language = CFG_SITE_LANG):
"""
Creates page for editing of a query.
@param query: the query that will be edited
@param language: language of the page
@return: The HTML content of the page
"""
_ = gettext_set_language(language)
body = """
<form method="post">
<input type="Hidden" name="id" value="%(id)s">
<input type="Hidden" name="job_id" value="%(job_id)s">
<table >
<tr>
<td class = "label">%(name_label)s</td>
<td><input type="text" name="name" class="textentry" value="%(name)s"></td>
</tr>
<tr>
<td class = "label">%(query_label)s</td>
<td><input type="text" name="search_criteria" class="textentry" value="%(search_criteria)s"></td>
</tr>
<tr>
<td class = "label">%(output_fields_label)s</td>
<td><input type="text" name="output_fields" class="textentry" value="%(output_fields)s"></td>
</tr>
<tr>
<td class = "label">%(comment_label)s</td>
<td><textarea name="comment" rows="6" class="textentry">%(comment)s</textarea></td>
</tr>
<tr>
<td></td>
<td>
<input type="Submit" name="save_button" value="%(save_label)s" class="formbutton">
<input type="Submit" name="cancel_button" value="%(cancel_label)s" class="formbutton">
</td>
</tr>
</table>
</form>
""" % self._html_escape_dictionary({
"name_label" : _("Name"),
"query_label" : _("Query"),
"output_fields_label" : _("Output fields"),
"comment_label" : _("Comment"),
"save_label" : _("Save"),
"cancel_label" : _("Cancel"),
"job_id" : job_id,
"id" : query.get_id(),
"name" : query.get_name(),
"search_criteria" : query.get_search_criteria(),
"output_fields" : ", ".join(query.get_output_fields()),
"comment" : query.get_comment(),
})
return body
def tmpl_display_queries_results(self, job_result, language = CFG_SITE_LANG):
"""Creates a page displaying results from execution of multiple queries.
@param job_result: JobResult object containing the job results
that will be displayed
@param language: language of the page
@return: The HTML content of the page
"""
_ = gettext_set_language(language)
queries_results = job_result.get_query_results()
output_format = job_result.get_job().get_output_format()
job_result_id = job_result.get_id()
body = ""
if job_result_id != JobResult.ID_MISSING:
download_and_format_html = """
<a href="%(download_job_results_url)s?result_id=%(job_result_id)s&ln=%(language)s"><input type="button" value="%(label_download)s" class="formbutton"></a>
<strong>%(label_view_as)s</strong>
<a href="%(display_job_result_url)s?result_id=%(job_result_id)s&output_format=%(output_format_marcxml)s&ln=%(language)s">MARCXML</a>
<a href="%(display_job_result_url)s?result_id=%(job_result_id)s&output_format=%(output_format_marc)s&ln=%(language)s">MARC</a>
""" % self._html_escape_dictionary({
"label_download" : _("Download"),
"label_view_as" : _("View as: "),
"output_format_marcxml" : Job.OUTPUT_FORMAT_MARCXML,
"output_format_marc" : Job.OUTPUT_FORMAT_MARC,
"download_job_results_url" : self._DOWNLOAD_JOB_RESULT_URL,
"language" : language,
"display_job_result_url" : self._DISPLAY_JOB_RESULT_URL,
"job_result_id" : job_result_id
})
body += download_and_format_html
for query_result in queries_results:
query = query_result.get_query()
results = query_result.get_result(output_format)
html = """
<h2>%(name)s</h2>
<strong>%(query_label)s: </strong>%(search_criteria)s<br>
<strong>%(output_fields_label)s: </strong>%(output_fields)s<br>
<textarea rows="10" style="width: 100%%" wrap="off" readonly>%(results)s</textarea></td>
""" % self._html_escape_dictionary({
"query_label" : _("Query"),
"output_fields_label" : _("Output fields"),
"name" : query.get_name(),
"search_criteria" : query.get_search_criteria(),
"output_fields" : ",".join(query.get_output_fields()),
"results" : results
})
body += html
return body
def tmpl_display_job_history(self, job_results, language = CFG_SITE_LANG):
"""Creates a page displaying information about
the job results given as a parameter.
@param job_results: List of JobResult objects containing
information about the job results that have to be displayed
@param language: language of the page
@return: The HTML content of the page
"""
_ = gettext_set_language(language)
table_rows = ""
for current_job_result in job_results:
current_job = current_job_result.get_job()
# convert execution date into text proper to be shown to the user
execution_date_time = current_job_result.get_execution_date_time()
date = convert_datestruct_to_dategui(execution_date_time)
# obtain text corresponding to the frequency of execution
frequency = current_job.get_frequency()
frequency_text = self._get_frequency_text(frequency, language)
# set the status text
if current_job_result.STATUS_CODE_OK == current_job_result.get_status():
status = _("OK")
else:
status = _("Error")
records_found = current_job_result.get_number_of_records_found()
row = """<tr>
<td><a href="%(job_results_url)s?result_id=%(job_result_id)s&ln=%(language)s">%(job_name)s</a></td>
<td>%(job_frequency)s</td>
<td>%(execution_date)s</td>
<td><b>%(status)s</b>
<a href="%(display_job_result_url)s?result_id=%(job_result_id)s&ln=%(language)s">
<small>%(number_of_records_found)s %(label_records_found)s</small>
</a>
</td>
</tr>""" % self._html_escape_dictionary({
"job_name" : current_job.get_name(),
"job_frequency" : frequency_text,
"execution_date" : date,
"status" : status,
"number_of_records_found" : records_found,
"label_records_found" : _("records found"),
"job_results_url" : self._JOB_RESULTS_URL,
"display_job_result_url" : self._DISPLAY_JOB_RESULT_URL,
"language" : language,
"job_result_id" : current_job_result.get_id()
})
table_rows += row
body = """
<table class="spacedcells">
<th>%(label_job_name)s</th>
<th>%(label_job_frequency)s</th>
<th>%(label_execution_date)s</th>
<th>%(label_status)s</th>
%(table_rows)s
</table>
""" % {
"table_rows" : table_rows,
"label_job_name" : _("Job"),
"label_job_frequency" : _("Run"),
"label_execution_date" : _("Date"),
"label_status" : _("Status")
}
return body
def tmpl_display_job_result_information(self, job_result, language = CFG_SITE_LANG):
"""Creates a page with information about a given job result
@param job_result: JobResult object with containg the job result
@param language: language of the page
@return: The HTML content of the page
"""
_ = gettext_set_language(language)
table_rows = ""
for current_query_result in job_result.get_query_results():
current_query_name = current_query_result.get_query().get_name()
# set the status text
if current_query_result.STATUS_CODE_OK == current_query_result.get_status():
status = _("OK")
else:
status = _("Error")
records_found = current_query_result.get_number_of_records_found()
row = """<tr>
<td>%(query_name)s</td>
<td><b>%(status)s</b></td>
<td><small>%(number_of_records_found)s %(label_records_found)s</small></td>
</tr>""" % self._html_escape_dictionary({
"query_name" : current_query_name,
"status" : status,
"number_of_records_found" : records_found,
"label_records_found" : _("records found")
})
table_rows += row
number_of_all_records_found = job_result.get_number_of_records_found()
job_result_id = job_result.get_id()
final_row = """
<tr>
<td></td>
<td><b>%(label_total)s</b></td>
<td>
<a href="%(display_job_results_url)s?result_id=%(job_result_id)s&ln=%(language)s">
<b>%(number_of_all_records_found)s %(label_records_found)s</b>
</a>
</td>
</tr>""" % self._html_escape_dictionary({
"label_total" : _("Total"),
"number_of_all_records_found" : number_of_all_records_found,
"label_records_found" : _("records found"),
"display_job_results_url" : self._DISPLAY_JOB_RESULT_URL,
"language" : language,
"job_result_id" : job_result_id
})
table_rows += final_row
download_row = """
<tr>
<td></td><td></td><td>
<a href="%(download_job_results_url)s?result_id=%(job_result_id)s&ln=%(language)s">
<input type="button" value="%(label_download)s" class="formbutton">
</a>
</td>
</tr>""" % self._html_escape_dictionary({
"label_download" : _("Download"),
"download_job_results_url" : self._DOWNLOAD_JOB_RESULT_URL,
"language" : language,
"job_result_id" : job_result_id
})
table_rows += download_row
job_name = self._html_escape_content(job_result.get_job().get_name())
if(job_result.get_status() == job_result.STATUS_CODE_ERROR):
status_messasge = job_result.get_status_message()
else:
status_messasge = ""
status_messasge = self._html_escape_content(status_messasge)
body = """
<h2>%(job_name)s</h2>
<table class="spacedcells">
<th>%(label_query)s</th>
<th>%(label_status)s</th>
<th></th>
%(table_rows)s
</table>
<br>
<pre style="color: Red;">%(status_message)s</pre>
""" % {
"table_rows" : table_rows,
"label_query" : _("Query"),
"label_status" : _("Status"),
"job_name" : job_name,
"status_message" : status_messasge
}
return body
def _get_select_all_none_html(self, form_name, field_name, language = CFG_SITE_LANG):
"""Returns HTML providing Select All|None links
@param form_name: the name of the form containing the checkboxes
@param field_name: the name of the checkbox fields that will be affected
@param language: language for output
"""
_ = gettext_set_language(language)
output_html = """
%(label_select)s: <a href="javascript:SetAllCheckBoxes('%(form_name)s', '%(field_name)s', true);">%(label_all)s</a>, <a href="javascript:SetAllCheckBoxes('%(form_name)s', '%(field_name)s', false);">%(label_none)s</a>
"""% {
"label_select" : _("Select"),
"label_all" : _("All"),
"label_none" : _("None"),
"form_name" : form_name,
"field_name" : field_name
}
return output_html
def _get_frequency_text(self, frequency, language = CFG_SITE_LANG):
"""
Returns text representation of the frequency: Manually, Daily, Weekly, Monthly
@param frequency: integer containg the number of hours between every execution.
@param language: language for output
"""
_ = gettext_set_language(language)
if 0 == frequency:
frequency_text = _("Manually")
elif 24 == frequency:
frequency_text = _("Daily")
elif 168 == frequency:
frequency_text = _("Weekly")
elif 720 == frequency:
frequency_text = _("Monthly")
else:
frequency_text = "Every %s hours" % (frequency,)
return frequency_text
def _create_output_format_select_box(self, selected_value = 0):
"""
Creates a select box for output format of a job.
@param name: name of the control
@param language: language of the menu
@param selected_value: value selected in the control
@return: HTML string representing HTML select control.
"""
items = [("MARCXML", Job.OUTPUT_FORMAT_MARCXML),
("MARC", Job.OUTPUT_FORMAT_MARC)]
html_output = self._create_select_box("output_format", items, selected_value)
return html_output
def _create_frequency_select_box(self, name, selected_value = 0, language = CFG_SITE_LANG):
"""
Creates a select box for frequency of an action/task.
@param name: name of the control
@param language: language of the menu
@param selected_value: value selected in the control
@return: HTML string representing HTML select control.
"""
items = [(self._get_frequency_text(0, language), 0),
(self._get_frequency_text(24, language), 24),
(self._get_frequency_text(168, language), 168),
(self._get_frequency_text(720, language), 720)]
html_output = self._create_select_box(name, items, selected_value)
return html_output
def _create_select_box(self, name, items, selected_value = None):
""" Returns the HTML code for a select box.
@param name: the name of the control
@param items: list of (text, value) tuples where text is the text to be displayed
and value is the value corresponding to the text in the select box
e.g. [("first", 1), ("second", 2), ("third", 3)]
@param selected_value: the value that will be selected
in the select box.
"""
html_output = """<select name="%s">""" % name
for text, value in items:
if selected_value == value:
selected = 'selected="selected"'
else:
selected = ""
current_option = """<option value="%(value)s" %(selected)s>%(text)s</option>""" % self._html_escape_dictionary({
"value" : value,
"text" : text,
"selected" :selected
})
html_output += current_option
html_output += """</select>"""
return html_output
def _html_escape_dictionary(self, dictionaty_to_escape):
"""Escapes all the values in the dictionary and transform
them in strings that are safe to siplay in HTML page.
HTML special symbols are replaced with their sage equivalents.
@param dictionaty_to_escape: dictionary containing values
that have to be escaped.
@return: returns dictionary with the same keys where the
values are escaped strings"""
for key in dictionaty_to_escape:
value = "%s" % dictionaty_to_escape[key]
dictionaty_to_escape[key] = cgi.escape(value)
return dictionaty_to_escape
def _html_escape_content(self, content_to_escape):
"""Escapes the value given as parameter and
trasforms it to a string that is safe for display in HTML page.
@param content_to_escape: contains the content that have to be escaped.
@return: string containing the escaped content
"""
text_content = "%s" % content_to_escape
escaped_content = cgi.escape(text_content)
return escaped_content
| MSusik/invenio | invenio/legacy/bibexport/templates.py | Python | gpl-2.0 | 29,937 |
from autotest.client.shared import error
from virttest import qemu_monitor
def run(test, params, env):
"""
QMP Specification test-suite: this checks if the *basic* protocol conforms
to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.
IMPORTANT NOTES:
o Most tests depend heavily on QMP's error information (eg. classes),
this might have bad implications as the error interface is going to
change in QMP
o Command testing is *not* covered in this suite. Each command has its
own specification and should be tested separately
o We use the same terminology as used by the QMP specification,
specially with regard to JSON types (eg. a Python dict is called
a json-object)
o This is divided in sub test-suites, please check the bottom of this
file to check the order in which they are run
TODO:
o Finding which test failed is not as easy as it should be
o Are all those check_*() functions really needed? Wouldn't a
specialized class (eg. a Response class) do better?
"""
def fail_no_key(qmp_dict, key):
if not isinstance(qmp_dict, dict):
raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
type(qmp_dict))
if key not in qmp_dict:
raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
(key, str(qmp_dict)))
def check_dict_key(qmp_dict, key, keytype):
"""
Performs the following checks on a QMP dict key:
1. qmp_dict is a dict
2. key exists in qmp_dict
3. key is of type keytype
If any of these checks fails, error.TestFail is raised.
"""
fail_no_key(qmp_dict, key)
if not isinstance(qmp_dict[key], keytype):
raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
(key, keytype, type(qmp_dict[key])))
def check_key_is_dict(qmp_dict, key):
check_dict_key(qmp_dict, key, dict)
def check_key_is_list(qmp_dict, key):
check_dict_key(qmp_dict, key, list)
def check_key_is_str(qmp_dict, key):
check_dict_key(qmp_dict, key, unicode)
def check_str_key(qmp_dict, keyname, value=None):
check_dict_key(qmp_dict, keyname, unicode)
if value and value != qmp_dict[keyname]:
raise error.TestFail("'%s' key value '%s' should be '%s'" %
(keyname, str(qmp_dict[keyname]), str(value)))
def check_key_is_int(qmp_dict, key):
fail_no_key(qmp_dict, key)
try:
int(qmp_dict[key])
except Exception:
raise error.TestFail("'%s' key is not of type int, it's '%s'" %
(key, type(qmp_dict[key])))
def check_bool_key(qmp_dict, keyname, value=None):
check_dict_key(qmp_dict, keyname, bool)
if value and value != qmp_dict[keyname]:
raise error.TestFail("'%s' key value '%s' should be '%s'" %
(keyname, str(qmp_dict[keyname]), str(value)))
def check_success_resp(resp, empty=False):
"""
Check QMP OK response.
:param resp: QMP response
:param empty: if True, response should not contain data to return
"""
check_key_is_dict(resp, "return")
if empty and len(resp["return"]) > 0:
raise error.TestFail("success response is not empty ('%s')" %
str(resp))
def check_error_resp(resp, classname=None, datadict=None):
"""
Check QMP error response.
:param resp: QMP response
:param classname: Expected error class name
:param datadict: Expected error data dictionary
"""
check_key_is_dict(resp, "error")
check_key_is_str(resp["error"], "class")
if classname and resp["error"]["class"] != classname:
raise error.TestFail("got error class '%s' expected '%s'" %
(resp["error"]["class"], classname))
def test_version(version):
"""
Check the QMP greeting message version key which, according to QMP's
documentation, should be:
{ "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
"package": json-string }
"""
check_key_is_dict(version, "qemu")
for key in ("major", "minor", "micro"):
check_key_is_int(version["qemu"], key)
check_key_is_str(version, "package")
def test_greeting(greeting):
check_key_is_dict(greeting, "QMP")
check_key_is_dict(greeting["QMP"], "version")
check_key_is_list(greeting["QMP"], "capabilities")
def greeting_suite(monitor):
"""
Check the greeting message format, as described in the QMP
specfication section '2.2 Server Greeting'.
{ "QMP": { "version": json-object, "capabilities": json-array } }
"""
greeting = monitor.get_greeting()
test_greeting(greeting)
test_version(greeting["QMP"]["version"])
def json_parsing_errors_suite(monitor):
"""
Check that QMP's parser is able to recover from parsing errors, please
check the JSON spec for more info on the JSON syntax (RFC 4627).
"""
# We're quite simple right now and the focus is on parsing errors that
# have already biten us in the past.
#
# TODO: The following test-cases are missing:
#
# - JSON numbers, strings and arrays
# - More invalid characters or malformed structures
# - Valid, but not obvious syntax, like zillion of spaces or
# strings with unicode chars (different suite maybe?)
bad_json = []
# A JSON value MUST be an object, array, number, string, true, false,
# or null
#
# NOTE: QMP seems to ignore a number of chars, like: | and ?
bad_json.append(":")
bad_json.append(",")
# Malformed json-objects
#
# NOTE: sending only "}" seems to break QMP
# NOTE: Duplicate keys are accepted (should it?)
bad_json.append("{ \"execute\" }")
bad_json.append("{ \"execute\": \"query-version\", }")
bad_json.append("{ 1: \"query-version\" }")
bad_json.append("{ true: \"query-version\" }")
bad_json.append("{ []: \"query-version\" }")
bad_json.append("{ {}: \"query-version\" }")
for cmd in bad_json:
resp = monitor.cmd_raw(cmd)
check_error_resp(resp, "GenericError")
def test_id_key(monitor):
"""
Check that QMP's "id" key is correctly handled.
"""
# The "id" key must be echoed back in error responses
id_key = "virt-test"
resp = monitor.cmd_qmp("eject", {"foobar": True}, q_id=id_key)
check_error_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key must be echoed back in success responses
resp = monitor.cmd_qmp("query-status", q_id=id_key)
check_success_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key can be any json-object
for id_key in (True, 1234, "string again!", [1, [], {}, True, "foo"],
{"key": {}}):
resp = monitor.cmd_qmp("query-status", q_id=id_key)
check_success_resp(resp)
if resp["id"] != id_key:
raise error.TestFail("expected id '%s' but got '%s'" %
(str(id_key), str(resp["id"])))
def test_invalid_arg_key(monitor):
"""
Currently, the only supported keys in the input object are: "execute",
"arguments" and "id". Although expansion is supported, invalid key
names must be detected.
"""
resp = monitor.cmd_obj({"execute": "eject", "foobar": True})
check_error_resp(resp, "GenericError", {"member": "foobar"})
def test_bad_arguments_key_type(monitor):
"""
The "arguments" key must be an json-object.
We use the eject command to perform the tests, but that's a random
choice, any command that accepts arguments will do, as the command
doesn't get called.
"""
for item in (True, [], 1, "foo"):
resp = monitor.cmd_obj({"execute": "eject", "arguments": item})
check_error_resp(resp, "GenericError",
{"member": "arguments", "expected": "object"})
def test_bad_execute_key_type(monitor):
"""
The "execute" key must be a json-string.
"""
for item in (False, 1, {}, []):
resp = monitor.cmd_obj({"execute": item})
check_error_resp(resp, "GenericError",
{"member": "execute", "expected": "string"})
def test_no_execute_key(monitor):
"""
The "execute" key must exist, we also test for some stupid parsing
errors.
"""
for cmd in ({}, {"execut": "qmp_capabilities"},
{"executee": "qmp_capabilities"}, {"foo": "bar"}):
resp = monitor.cmd_obj(cmd)
check_error_resp(resp) # XXX: check class and data dict?
def test_bad_input_obj_type(monitor):
"""
The input object must be... an json-object.
"""
for cmd in ("foo", [], True, 1):
resp = monitor.cmd_obj(cmd)
check_error_resp(resp, "GenericError", {"expected": "object"})
def test_good_input_obj(monitor):
"""
Basic success tests for issuing QMP commands.
"""
# NOTE: We don't use the cmd_qmp() method here because the command
# object is in a 'random' order
resp = monitor.cmd_obj({"execute": "query-version"})
check_success_resp(resp)
resp = monitor.cmd_obj({"arguments": {}, "execute": "query-version"})
check_success_resp(resp)
idd = "1234foo"
resp = monitor.cmd_obj({"id": idd, "execute": "query-version",
"arguments": {}})
check_success_resp(resp)
check_str_key(resp, "id", idd)
# TODO: would be good to test simple argument usage, but we don't have
# a read-only command that accepts arguments.
def input_object_suite(monitor):
"""
Check the input object format, as described in the QMP specfication
section '2.3 Issuing Commands'.
{ "execute": json-string, "arguments": json-object, "id": json-value }
"""
test_good_input_obj(monitor)
test_bad_input_obj_type(monitor)
test_no_execute_key(monitor)
test_bad_execute_key_type(monitor)
test_bad_arguments_key_type(monitor)
test_id_key(monitor)
test_invalid_arg_key(monitor)
def argument_checker_suite(monitor):
"""
Check that QMP's argument checker is detecting all possible errors.
We use a number of different commands to perform the checks, but the
command used doesn't matter much as QMP performs argument checking
_before_ calling the command.
"""
# stop doesn't take arguments
resp = monitor.cmd_qmp("stop", {"foo": 1})
check_error_resp(resp, "GenericError", {"name": "foo"})
# required argument omitted
resp = monitor.cmd_qmp("screendump")
check_error_resp(resp, "GenericError", {"name": "filename"})
# 'bar' is not a valid argument
resp = monitor.cmd_qmp("screendump", {"filename": "outfile",
"bar": "bar"})
check_error_resp(resp, "GenericError", {"name": "bar"})
# test optional argument: 'force' is omitted, but it's optional, so
# the handler has to be called. Test this happens by checking an
# error that is generated by the handler itself.
resp = monitor.cmd_qmp("eject", {"device": "foobar"})
check_error_resp(resp, "DeviceNotFound")
# filename argument must be a json-string
for arg in ({}, [], 1, True):
resp = monitor.cmd_qmp("screendump", {"filename": arg})
check_error_resp(resp, "GenericError",
{"name": "filename", "expected": "string"})
# force argument must be a json-bool
for arg in ({}, [], 1, "foo"):
resp = monitor.cmd_qmp("eject", {"force": arg, "device": "foo"})
check_error_resp(resp, "GenericError",
{"name": "force", "expected": "bool"})
# val argument must be a json-int
for arg in ({}, [], True, "foo"):
resp = monitor.cmd_qmp("memsave", {"val": arg, "filename": "foo",
"size": 10})
check_error_resp(resp, "GenericError",
{"name": "val", "expected": "int"})
# value argument must be a json-number
for arg in ({}, [], True, "foo"):
resp = monitor.cmd_qmp("migrate_set_speed", {"value": arg})
check_error_resp(resp, "GenericError",
{"name": "value", "expected": "number"})
# qdev-type commands have their own argument checker, all QMP does
# is to skip its checking and pass arguments through. Check this
# works by providing invalid options to device_add and expecting
# an error message from qdev
resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"})
check_error_resp(resp, "GenericError",
{"device": "e1000", "property": "foo"})
def unknown_commands_suite(monitor):
"""
Check that QMP handles unknown commands correctly.
"""
# We also call a HMP-only command, to be sure it will fail as expected
for cmd in ("bar", "query-", "query-foo", "q", "help"):
resp = monitor.cmd_qmp(cmd)
check_error_resp(resp, "CommandNotFound", {"name": cmd})
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
# Look for the first qmp monitor available, otherwise, fail the test
qmp_monitor = vm.get_monitors_by_type("qmp")
if qmp_monitor:
qmp_monitor = qmp_monitor[0]
else:
raise error.TestError('Could not find a QMP monitor, aborting test')
# Run all suites
greeting_suite(qmp_monitor)
input_object_suite(qmp_monitor)
argument_checker_suite(qmp_monitor)
unknown_commands_suite(qmp_monitor)
json_parsing_errors_suite(qmp_monitor)
# check if QMP is still alive
if not qmp_monitor.is_responsive():
raise error.TestFail('QMP monitor is not responsive after testing')
| uni-peter-zheng/tp-qemu | qemu/tests/qmp_basic.py | Python | gpl-2.0 | 14,858 |
'''OpenGL extension NV.transform_feedback
This module customises the behaviour of the
OpenGL.raw.GL.NV.transform_feedback to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a new mode to the GL, called transform feedback,
which records vertex attributes of the primitives processed by the GL.
The selected attributes are written into buffer objects, and can be
written with each attribute in a separate buffer object or with all
attributes interleaved into a single buffer object. If a geometry program
or shader is active, the primitives recorded are those emitted by the
geometry program. Otherwise, transform feedback captures primitives whose
vertex are transformed by a vertex program or shader, or by fixed-function
vertex processing. In either case, the primitives captured are those
generated prior to clipping. Transform feedback mode is capable of
capturing transformed vertex data generated by fixed-function vertex
processing, outputs from assembly vertex or geometry programs, or varying
variables emitted from GLSL vertex or geometry shaders.
The vertex data recorded in transform feedback mode is stored into buffer
objects as an array of vertex attributes. The regular representation and
the use of buffer objects allows the recorded data to be processed
directly by the GL without requiring CPU intervention to copy data. In
particular, transform feedback data can be used for vertex arrays (via
vertex buffer objects), as the source for pixel data (via pixel buffer
objects), as program constant data (via the NV_parameter_buffer_object or
EXT_bindable_uniform extension), or via any other extension that makes use
of buffer objects.
This extension introduces new query object support to allow transform
feedback mode to operate asynchronously. Query objects allow applications
to determine when transform feedback results are complete, as well as the
number of primitives processed and written back to buffer objects while in
transform feedback mode. This extension also provides a new rasterizer
discard enable, which allows applications to use transform feedback to
capture vertex attributes without rendering anything.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/transform_feedback.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.transform_feedback import *
### END AUTOGENERATED SECTION | D4wN/brickv | src/build_data/windows/OpenGL/GL/NV/transform_feedback.py | Python | gpl-2.0 | 2,570 |
#!/usr/bin/env python
print(CurrentScript().arguments)
| dthain/cctools | weaver/src/examples/arguments.py | Python | gpl-2.0 | 56 |
from __future__ import absolute_import
class DummyException(Exception):
pass
def import_global(
name, modules=None, exceptions=DummyException, locals_=None,
globals_=None, level=-1):
'''Import the requested items into the global scope
WARNING! this method _will_ overwrite your global scope
If you have a variable named "path" and you call import_global('sys')
it will be overwritten with sys.path
Args:
name (str): the name of the module to import, e.g. sys
modules (str): the modules to import, use None for everything
exception (Exception): the exception to catch, e.g. ImportError
`locals_`: the `locals()` method (in case you need a different scope)
`globals_`: the `globals()` method (in case you need a different scope)
level (int): the level to import from, this can be used for
relative imports
'''
frame = None
try:
# If locals_ or globals_ are not given, autodetect them by inspecting
# the current stack
if locals_ is None or globals_ is None:
import inspect
frame = inspect.stack()[1][0]
if locals_ is None:
locals_ = frame.f_locals
if globals_ is None:
globals_ = frame.f_globals
try:
name = name.split('.')
# Relative imports are supported (from .spam import eggs)
if not name[0]:
name = name[1:]
level = 1
# raise IOError((name, level))
module = __import__(
name=name[0] or '.',
globals=globals_,
locals=locals_,
fromlist=name[1:],
level=max(level, 0),
)
# Make sure we get the right part of a dotted import (i.e.
# spam.eggs should return eggs, not spam)
try:
for attr in name[1:]:
module = getattr(module, attr)
except AttributeError:
raise ImportError('No module named ' + '.'.join(name))
# If no list of modules is given, autodetect from either __all__
# or a dir() of the module
if not modules:
modules = getattr(module, '__all__', dir(module))
else:
modules = set(modules).intersection(dir(module))
# Add all items in modules to the global scope
for k in set(dir(module)).intersection(modules):
if k and k[0] != '_':
globals_[k] = getattr(module, k)
except exceptions as e:
return e
finally:
# Clean up, just to be sure
del name, modules, exceptions, locals_, globals_, frame
| samowitsch/bCNC | bCNC/lib/python_utils/import_.py | Python | gpl-2.0 | 2,797 |
# -*- coding: iso-8859-1 -*-
""" crypto.cipher.rijndael
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr1
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
| felipenaselva/felipe.repository | script.module.cryptopy/lib/crypto/cipher/rijndael.py | Python | gpl-2.0 | 14,723 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial.distreporter}.
"""
from cStringIO import StringIO
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial.unittest import TestCase
from twisted.trial.reporter import TreeReporter
class DistReporterTestCase(TestCase):
"""
Tests for L{DistReporter}.
"""
def setUp(self):
self.stream = StringIO()
self.distReporter = DistReporter(TreeReporter(self.stream))
self.test = TestCase()
def test_startSuccessStop(self):
"""
Success output only gets sent to the stream after the test has stopped.
"""
self.distReporter.startTest(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.addSuccess(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.stopTest(self.test)
self.assertNotEqual(self.stream.getvalue(), "")
def test_startErrorStop(self):
"""
Error output only gets sent to the stream after the test has stopped.
"""
self.distReporter.startTest(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.addError(self.test, "error")
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.stopTest(self.test)
self.assertNotEqual(self.stream.getvalue(), "")
def test_forwardedMethods(self):
"""
Calling methods of L{DistReporter} add calls to the running queue of
the test.
"""
self.distReporter.startTest(self.test)
self.distReporter.addFailure(self.test, "foo")
self.distReporter.addError(self.test, "bar")
self.distReporter.addSkip(self.test, "egg")
self.distReporter.addUnexpectedSuccess(self.test, "spam")
self.distReporter.addExpectedFailure(self.test, "err", "foo")
self.assertEqual(len(self.distReporter.running[self.test.id()]), 6)
| geodrinx/gearthview | ext-libs/twisted/trial/_dist/test/test_distreporter.py | Python | gpl-3.0 | 2,019 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# (c) 2019, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_ssh_key_info
short_description: Get infos about the Vultr SSH keys available.
description:
- Get infos about SSH keys available.
version_added: "2.9"
author:
- "Yanis Guenane (@Spredzy)"
- "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Get Vultr SSH keys infos
vultr_ssh_key_info:
register: result
- name: Print the infos
debug:
var: result.vultr_ssh_key_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_ssh_key_info:
description: Response from Vultr API as list
returned: success
type: complex
contains:
id:
description: ID of the ssh key
returned: success
type: str
sample: 5904bc6ed9234
name:
description: Name of the ssh key
returned: success
type: str
sample: my ssh key
date_created:
description: Date the ssh key was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
ssh_key:
description: SSH public key
returned: success
type: str
sample: "ssh-rsa AA... [email protected]"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrSSHKeyInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrSSHKeyInfo, self).__init__(module, "vultr_ssh_key_info")
self.returns = {
'SSHKEYID': dict(key='id'),
'name': dict(),
'ssh_key': dict(),
'date_created': dict(),
}
def get_sshkeys(self):
return self.api_query(path="/v1/sshkey/list")
def parse_keys_list(keys_list):
if not keys_list:
return []
return [key for id, key in keys_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
sshkey_info = AnsibleVultrSSHKeyInfo(module)
result = sshkey_info.get_result(parse_keys_list(sshkey_info.get_sshkeys()))
module.exit_json(**result)
if __name__ == '__main__':
main()
| aperigault/ansible | lib/ansible/modules/cloud/vultr/vultr_ssh_key_info.py | Python | gpl-3.0 | 3,294 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
#
# Chip specific properties:
#
# - vendor: chip manufacturer
# - model: chip model
# - size: total EEPROM size (in number of bytes)
# - page_size: page size (in number of bytes)
# - page_wraparound: Whether writes wrap-around at page boundaries
# - addr_bytes: number of EEPROM address bytes used
# - addr_pins: number of address pins (A0/A1/A2) on this chip
# - max_speed: max. supported I²C speed (in kHz)
#
chips = {
# Generic chip (128 bytes, 8 bytes page size)
'generic': {
'vendor': '',
'model': 'Generic',
'size': 128,
'page_size': 8,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 3,
'max_speed': 400,
},
# Microchip
'microchip_24aa65': {
'vendor': 'Microchip',
'model': '24AA65',
'size': 8 * 1024,
'page_size': 64, # Actually 8, but there are 8 pages of "input cache"
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3,
'max_speed': 400,
},
'microchip_24lc65': {
'vendor': 'Microchip',
'model': '24LC65',
'size': 8 * 1024,
'page_size': 64, # Actually 8, but there are 8 pages of "input cache"
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3,
'max_speed': 400,
},
'microchip_24c65': {
'vendor': 'Microchip',
'model': '24C65',
'size': 8 * 1024,
'page_size': 64, # Actually 8, but there are 8 pages of "input cache"
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3,
'max_speed': 400,
},
'microchip_24aa64': {
'vendor': 'Microchip',
'model': '24AA64',
'size': 8 * 1024,
'page_size': 32,
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3,
'max_speed': 400, # 100 for VCC < 2.5V
},
'microchip_24lc64': {
'vendor': 'Microchip',
'model': '24LC64',
'size': 8 * 1024,
'page_size': 32,
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3,
'max_speed': 400,
},
'microchip_24aa02uid': {
'vendor': 'Microchip',
'model': '24AA02UID',
'size': 256,
'page_size': 8,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 0, # Pins A0, A1, A2 not used
'max_speed': 400,
},
'microchip_24aa025uid': {
'vendor': 'Microchip',
'model': '24AA025UID',
'size': 256,
'page_size': 16,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 3,
'max_speed': 400,
},
'microchip_24aa025uid_sot23': {
'vendor': 'Microchip',
'model': '24AA025UID (SOT-23)',
'size': 256,
'page_size': 16,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 2, # SOT-23 package: A2 not available
'max_speed': 400,
},
# ON Semiconductor
'onsemi_cat24c256': {
'vendor': 'ON Semiconductor',
'model': 'CAT24C256',
'size': 32 * 1024,
'page_size': 64,
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3,
'max_speed': 1000,
},
'onsemi_cat24m01': {
'vendor': 'ON Semiconductor',
'model': 'CAT24M01',
'size': 128 * 1024,
'page_size': 256,
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 2, # Pin A0 not connected
'max_speed': 1000,
},
# Siemens
'siemens_slx_24c01': {
'vendor': 'Siemens',
'model': 'SLx 24C01',
'size': 128,
'page_size': 8,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 0, # Pins A0, A1, A2 are not connected (NC)
'max_speed': 400,
},
'siemens_slx_24c02': {
'vendor': 'Siemens',
'model': 'SLx 24C02',
'size': 256,
'page_size': 8,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 0, # Pins A0, A1, A2 are not connected (NC)
'max_speed': 400,
},
# ST
'st_m24c01': {
'vendor': 'ST',
'model': 'M24C01',
'size': 128,
'page_size': 16,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 3, # Called E0, E1, E2 on this chip.
'max_speed': 400,
},
'st_m24c02': {
'vendor': 'ST',
'model': 'M24C02',
'size': 256,
'page_size': 16,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 3, # Called E0, E1, E2 on this chip.
'max_speed': 400,
},
'st_m24c32': {
'vendor': 'ST',
'model': 'M24C32',
'size': 4 * 1024,
'page_size': 32,
'page_wraparound': True,
'addr_bytes': 2,
'addr_pins': 3, # Called E0, E1, E2 on this chip.
'max_speed': 1000,
},
# Xicor
'xicor_x24c02': {
'vendor': 'Xicor',
'model': 'X24C02',
'size': 256,
'page_size': 4,
'page_wraparound': True,
'addr_bytes': 1,
'addr_pins': 3,
'max_speed': 100,
},
}
| sigrokproject/libsigrokdecode | decoders/eeprom24xx/lists.py | Python | gpl-3.0 | 5,965 |
#!/usr/bin/env /usr/bin/python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from gnuradio import gr, filter
from . import dtv_python as dtv
# FIXME move these into separate constants module
ATSC_CHANNEL_BW = 6.0e6
ATSC_SYMBOL_RATE = 4.5e6/286*684 # ~10.76 Mbaud
ATSC_RRC_SYMS = 8 # filter kernel extends over 2N+1 symbols
class atsc_rx_filter(gr.hier_block2):
def __init__(self, input_rate, sps):
gr.hier_block2.__init__(self, "atsc_rx_filter",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
# Create matched RX filter with RRC response for fractional
# interpolator.
nfilts = 16
output_rate = ATSC_SYMBOL_RATE*sps # Desired oversampled sample rate
filter_rate = input_rate*nfilts
symbol_rate = ATSC_SYMBOL_RATE / 2.0 # One-sided bandwidth of sideband
excess_bw = 0.1152 #1.0-(0.5*ATSC_SYMBOL_RATE/ATSC_CHANNEL_BW) # ~10.3%
ntaps = int((2*ATSC_RRC_SYMS+1)*sps*nfilts)
interp = output_rate / input_rate
gain = nfilts*symbol_rate/filter_rate
rrc_taps = filter.firdes.root_raised_cosine(gain, # Filter gain
filter_rate, # PFB filter prototype rate
symbol_rate, # ATSC symbol rate
excess_bw, # ATSC RRC excess bandwidth
ntaps) # Length of filter
pfb = filter.pfb_arb_resampler_ccf(interp, rrc_taps, nfilts)
# Connect pipeline
self.connect(self, pfb, self)
| mrjacobagilbert/gnuradio | gr-dtv/python/dtv/atsc_rx_filter.py | Python | gpl-3.0 | 2,537 |
##
## This file is part of the libsigrok project.
##
## Copyright (C) 2014 Martin Ling <[email protected]>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from __future__ import print_function
from xml.etree import ElementTree
import sys, os
language, input_file = sys.argv[1:3]
if len(sys.argv) == 4:
mode = sys.argv[3]
input_dir = os.path.dirname(input_file)
index = ElementTree.parse(input_file)
def get_text(node):
paras = node.findall('para')
return str.join('\n\n', [p.text.rstrip() for p in paras if p.text])
for compound in index.findall('compound'):
if compound.attrib['kind'] != 'class':
continue
class_name = compound.find('name').text
if not class_name.startswith('sigrok::'):
continue
trimmed_name = class_name.split('::')[1]
doc = ElementTree.parse("%s/%s.xml" % (input_dir, compound.attrib['refid']))
cls = doc.find('compounddef')
brief = get_text(cls.find('briefdescription'))
if brief:
if language == 'python':
print('%%feature("docstring") %s "%s";' % (class_name, brief))
elif language == 'java':
print('%%typemap(javaclassmodifiers) %s "/** %s */\npublic class"' % (
class_name, brief))
constants = []
for section in cls.findall('sectiondef'):
kind = section.attrib['kind']
if kind not in ('public-func', 'public-static-attrib'):
continue
for member in section.findall('memberdef'):
member_name = member.find('name').text
brief = get_text(member.find('briefdescription')).replace('"', '\\"')
parameters = {}
for para in member.find('detaileddescription').findall('para'):
paramlist = para.find('parameterlist')
if paramlist is not None:
for param in paramlist.findall('parameteritem'):
namelist = param.find('parameternamelist')
name = namelist.find('parametername').text
description = get_text(param.find('parameterdescription'))
if description:
parameters[name] = description
if brief:
if language == 'python' and kind == 'public-func':
print(str.join('\n', [
'%%feature("docstring") %s::%s "%s' % (
class_name, member_name, brief)] + [
'@param %s %s' % (name, desc)
for name, desc in parameters.items()]) + '";')
elif language == 'java' and kind == 'public-func':
print(str.join('\n', [
'%%javamethodmodifiers %s::%s "/** %s' % (
class_name, member_name, brief)] + [
' * @param %s %s' % (name, desc)
for name, desc in parameters.items()])
+ ' */\npublic"')
elif kind == 'public-static-attrib':
constants.append((member_name, brief))
if language == 'java' and constants:
print('%%typemap(javacode) %s %%{' % class_name)
for member_name, brief in constants:
print(' /** %s */\n public static final %s %s = new %s(classesJNI.%s_%s_get(), false);\n' % (
brief, trimmed_name, member_name, trimmed_name,
trimmed_name, member_name))
print('%}')
elif language == 'python' and constants:
if mode == 'start':
print('%%extend %s {\n%%pythoncode %%{' % class_name)
for member_name, brief in constants:
print(' ## @brief %s\n %s = None' % (brief, member_name))
print('%}\n}')
elif mode == 'end':
print('%pythoncode %{')
for member_name, brief in constants:
print('%s.%s.__doc__ = """%s"""' % (
trimmed_name, member_name, brief))
print('%}')
| mtitinger/libsigrok | bindings/swig/doc.py | Python | gpl-3.0 | 4,655 |
# -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.exceptions import UserError
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
class MaintenanceStage(models.Model):
""" Model for case stages. This models the main stages of a Maintenance Request management flow. """
_name = 'maintenance.stage'
_description = 'Maintenance Stage'
_order = 'sequence, id'
name = fields.Char('Name', required=True, translate=True)
sequence = fields.Integer('Sequence', default=20)
fold = fields.Boolean('Folded in Maintenance Pipe')
done = fields.Boolean('Request Done')
class MaintenanceEquipmentCategory(models.Model):
_name = 'maintenance.equipment.category'
_inherit = ['mail.alias.mixin', 'mail.thread']
_description = 'Asset Category'
@api.one
@api.depends('equipment_ids')
def _compute_fold(self):
self.fold = False if self.equipment_count else True
name = fields.Char('Category Name', required=True, translate=True)
technician_user_id = fields.Many2one('res.users', 'Responsible', track_visibility='onchange', default=lambda self: self.env.uid, oldname='user_id')
color = fields.Integer('Color Index')
note = fields.Text('Comments', translate=True)
equipment_ids = fields.One2many('maintenance.equipment', 'category_id', string='Equipments', copy=False)
equipment_count = fields.Integer(string="Equipment", compute='_compute_equipment_count')
maintenance_ids = fields.One2many('maintenance.request', 'category_id', copy=False)
maintenance_count = fields.Integer(string="Maintenance", compute='_compute_maintenance_count')
alias_id = fields.Many2one(
'mail.alias', 'Alias', ondelete='restrict', required=True,
help="Email alias for this equipment category. New emails will automatically "
"create new maintenance request for this equipment category.")
fold = fields.Boolean(string='Folded in Maintenance Pipe', compute='_compute_fold', store=True)
@api.multi
def _compute_equipment_count(self):
equipment_data = self.env['maintenance.equipment'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id'])
mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in equipment_data])
for category in self:
category.equipment_count = mapped_data.get(category.id, 0)
@api.multi
def _compute_maintenance_count(self):
maintenance_data = self.env['maintenance.request'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id'])
mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in maintenance_data])
for category in self:
category.maintenance_count = mapped_data.get(category.id, 0)
@api.model
def create(self, vals):
self = self.with_context(alias_model_name='maintenance.request', alias_parent_model_name=self._name)
if not vals.get('alias_name'):
vals['alias_name'] = vals.get('name')
category_id = super(MaintenanceEquipmentCategory, self).create(vals)
category_id.alias_id.write({'alias_parent_thread_id': category_id.id, 'alias_defaults': {'category_id': category_id.id}})
return category_id
@api.multi
def unlink(self):
MailAlias = self.env['mail.alias']
for category in self:
if category.equipment_ids or category.maintenance_ids:
raise UserError(_("You cannot delete an equipment category containing equipments or maintenance requests."))
MailAlias += category.alias_id
res = super(MaintenanceEquipmentCategory, self).unlink()
MailAlias.unlink()
return res
def get_alias_model_name(self, vals):
return vals.get('alias_model', 'maintenance.equipment')
def get_alias_values(self):
values = super(MaintenanceEquipmentCategory, self).get_alias_values()
values['alias_defaults'] = {'category_id': self.id}
return values
class MaintenanceEquipment(models.Model):
_name = 'maintenance.equipment'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = 'Equipment'
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'owner_user_id' in init_values and self.owner_user_id:
return 'maintenance.mt_mat_assign'
return super(MaintenanceEquipment, self)._track_subtype(init_values)
@api.multi
def name_get(self):
result = []
for record in self:
if record.name and record.serial_no:
result.append((record.id, record.name + '/' + record.serial_no))
if record.name and not record.serial_no:
result.append((record.id, record.name))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('name', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
name = fields.Char('Equipment Name', required=True, translate=True)
active = fields.Boolean(default=True)
technician_user_id = fields.Many2one('res.users', string='Technician', track_visibility='onchange', oldname='user_id')
owner_user_id = fields.Many2one('res.users', string='Owner', track_visibility='onchange')
category_id = fields.Many2one('maintenance.equipment.category', string='Equipment Category',
track_visibility='onchange', group_expand='_read_group_category_ids')
partner_id = fields.Many2one('res.partner', string='Vendor', domain="[('supplier', '=', 1)]")
partner_ref = fields.Char('Vendor Reference')
location = fields.Char('Location')
model = fields.Char('Model')
serial_no = fields.Char('Serial Number', copy=False)
assign_date = fields.Date('Assigned Date', track_visibility='onchange')
cost = fields.Float('Cost')
note = fields.Text('Note')
warranty = fields.Date('Warranty')
color = fields.Integer('Color Index')
scrap_date = fields.Date('Scrap Date')
maintenance_ids = fields.One2many('maintenance.request', 'equipment_id')
maintenance_count = fields.Integer(compute='_compute_maintenance_count', string="Maintenance", store=True)
maintenance_open_count = fields.Integer(compute='_compute_maintenance_count', string="Current Maintenance", store=True)
period = fields.Integer('Days between each preventive maintenance')
next_action_date = fields.Date(compute='_compute_next_maintenance', string='Date of the next preventive maintenance', store=True)
maintenance_team_id = fields.Many2one('maintenance.team', string='Maintenance Team')
maintenance_duration = fields.Float(help="Maintenance Duration in hours.")
@api.depends('period', 'maintenance_ids.request_date', 'maintenance_ids.close_date')
def _compute_next_maintenance(self):
date_now = fields.Date.context_today(self)
for equipment in self.filtered(lambda x: x.period > 0):
next_maintenance_todo = self.env['maintenance.request'].search([
('equipment_id', '=', equipment.id),
('maintenance_type', '=', 'preventive'),
('stage_id.done', '!=', True),
('close_date', '=', False)], order="request_date asc", limit=1)
last_maintenance_done = self.env['maintenance.request'].search([
('equipment_id', '=', equipment.id),
('maintenance_type', '=', 'preventive'),
('stage_id.done', '=', True),
('close_date', '!=', False)], order="close_date desc", limit=1)
if next_maintenance_todo and last_maintenance_done:
next_date = next_maintenance_todo.request_date
date_gap = fields.Date.from_string(next_maintenance_todo.request_date) - fields.Date.from_string(last_maintenance_done.close_date)
# If the gap between the last_maintenance_done and the next_maintenance_todo one is bigger than 2 times the period and next request is in the future
# We use 2 times the period to avoid creation too closed request from a manually one created
if date_gap > timedelta(0) and date_gap > timedelta(days=equipment.period) * 2 and fields.Date.from_string(next_maintenance_todo.request_date) > fields.Date.from_string(date_now):
# If the new date still in the past, we set it for today
if fields.Date.from_string(last_maintenance_done.close_date) + timedelta(days=equipment.period) < fields.Date.from_string(date_now):
next_date = date_now
else:
next_date = fields.Date.to_string(fields.Date.from_string(last_maintenance_done.close_date) + timedelta(days=equipment.period))
elif next_maintenance_todo:
next_date = next_maintenance_todo.request_date
date_gap = fields.Date.from_string(next_maintenance_todo.request_date) - fields.Date.from_string(date_now)
# If next maintenance to do is in the future, and in more than 2 times the period, we insert an new request
# We use 2 times the period to avoid creation too closed request from a manually one created
if date_gap > timedelta(0) and date_gap > timedelta(days=equipment.period) * 2:
next_date = fields.Date.to_string(fields.Date.from_string(date_now)+timedelta(days=equipment.period))
elif last_maintenance_done:
next_date = fields.Date.from_string(last_maintenance_done.close_date)+timedelta(days=equipment.period)
# If when we add the period to the last maintenance done and we still in past, we plan it for today
if next_date < fields.Date.from_string(date_now):
next_date = date_now
else:
next_date = fields.Date.to_string(fields.Date.from_string(date_now) + timedelta(days=equipment.period))
equipment.next_action_date = next_date
@api.one
@api.depends('maintenance_ids.stage_id.done')
def _compute_maintenance_count(self):
self.maintenance_count = len(self.maintenance_ids)
self.maintenance_open_count = len(self.maintenance_ids.filtered(lambda x: not x.stage_id.done))
@api.onchange('category_id')
def _onchange_category_id(self):
self.technician_user_id = self.category_id.technician_user_id
_sql_constraints = [
('serial_no', 'unique(serial_no)', "Another asset already exists with this serial number!"),
]
@api.model
def create(self, vals):
equipment = super(MaintenanceEquipment, self).create(vals)
if equipment.owner_user_id:
equipment.message_subscribe_users(user_ids=[equipment.owner_user_id.id])
return equipment
@api.multi
def write(self, vals):
if vals.get('owner_user_id'):
self.message_subscribe_users(user_ids=[vals['owner_user_id']])
return super(MaintenanceEquipment, self).write(vals)
@api.model
def _read_group_category_ids(self, categories, domain, order):
""" Read group customization in order to display all the categories in
the kanban view, even if they are empty.
"""
category_ids = categories._search([], order=order, access_rights_uid=SUPERUSER_ID)
return categories.browse(category_ids)
def _create_new_request(self, date):
self.ensure_one()
self.env['maintenance.request'].create({
'name': _('Preventive Maintenance - %s') % self.name,
'request_date': date,
'schedule_date': date,
'category_id': self.category_id.id,
'equipment_id': self.id,
'maintenance_type': 'preventive',
'owner_user_id': self.owner_user_id.id,
'technician_user_id': self.technician_user_id.id,
'maintenance_team_id': self.maintenance_team_id.id,
'duration': self.maintenance_duration,
})
@api.model
def _cron_generate_requests(self):
"""
Generates maintenance request on the next_action_date or today if none exists
"""
for equipment in self.search([('period', '>', 0)]):
next_requests = self.env['maintenance.request'].search([('stage_id.done', '=', False),
('equipment_id', '=', equipment.id),
('maintenance_type', '=', 'preventive'),
('request_date', '=', equipment.next_action_date)])
if not next_requests:
equipment._create_new_request(equipment.next_action_date)
class MaintenanceRequest(models.Model):
_name = 'maintenance.request'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = 'Maintenance Requests'
_order = "id desc"
@api.returns('self')
def _default_stage(self):
return self.env['maintenance.stage'].search([], limit=1)
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'stage_id' in init_values and self.stage_id.sequence <= 1:
return 'maintenance.mt_req_created'
elif 'stage_id' in init_values and self.stage_id.sequence > 1:
return 'maintenance.mt_req_status'
return super(MaintenanceRequest, self)._track_subtype(init_values)
def _get_default_team_id(self):
return self.env.ref('maintenance.equipment_team_maintenance', raise_if_not_found=False)
name = fields.Char('Subjects', required=True)
description = fields.Text('Description')
request_date = fields.Date('Request Date', track_visibility='onchange', default=fields.Date.context_today,
help="Date requested for the maintenance to happen")
owner_user_id = fields.Many2one('res.users', string='Created by', default=lambda s: s.env.uid)
category_id = fields.Many2one('maintenance.equipment.category', related='equipment_id.category_id', string='Category', store=True, readonly=True)
equipment_id = fields.Many2one('maintenance.equipment', string='Equipment', index=True)
technician_user_id = fields.Many2one('res.users', string='Owner', track_visibility='onchange', oldname='user_id')
stage_id = fields.Many2one('maintenance.stage', string='Stage', track_visibility='onchange',
group_expand='_read_group_stage_ids', default=_default_stage)
priority = fields.Selection([('0', 'Very Low'), ('1', 'Low'), ('2', 'Normal'), ('3', 'High')], string='Priority')
color = fields.Integer('Color Index')
close_date = fields.Date('Close Date', help="Date the maintenance was finished. ")
kanban_state = fields.Selection([('normal', 'In Progress'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')],
string='Kanban State', required=True, default='normal', track_visibility='onchange')
# active = fields.Boolean(default=True, help="Set active to false to hide the maintenance request without deleting it.")
archive = fields.Boolean(default=False, help="Set archive to true to hide the maintenance request without deleting it.")
maintenance_type = fields.Selection([('corrective', 'Corrective'), ('preventive', 'Preventive')], string='Maintenance Type', default="corrective")
schedule_date = fields.Datetime('Scheduled Date', help="Date the maintenance team plans the maintenance. It should not differ much from the Request Date. ")
maintenance_team_id = fields.Many2one('maintenance.team', string='Team', required=True, default=_get_default_team_id)
duration = fields.Float(help="Duration in minutes and seconds.")
@api.multi
def archive_equipment_request(self):
self.write({'archive': True})
@api.multi
def reset_equipment_request(self):
""" Reinsert the maintenance request into the maintenance pipe in the first stage"""
first_stage_obj = self.env['maintenance.stage'].search([], order="sequence asc", limit=1)
# self.write({'active': True, 'stage_id': first_stage_obj.id})
self.write({'archive': False, 'stage_id': first_stage_obj.id})
@api.onchange('equipment_id')
def onchange_equipment_id(self):
if self.equipment_id:
self.technician_user_id = self.equipment_id.technician_user_id if self.equipment_id.technician_user_id else self.equipment_id.category_id.technician_user_id
self.category_id = self.equipment_id.category_id
if self.equipment_id.maintenance_team_id:
self.maintenance_team_id = self.equipment_id.maintenance_team_id.id
@api.onchange('category_id')
def onchange_category_id(self):
if not self.technician_user_id or not self.equipment_id or (self.technician_user_id and not self.equipment_id.technician_user_id):
self.technician_user_id = self.category_id.technician_user_id
@api.model
def create(self, vals):
# context: no_log, because subtype already handle this
self = self.with_context(mail_create_nolog=True)
request = super(MaintenanceRequest, self).create(vals)
if request.owner_user_id or request.technician_user_id:
request._add_followers()
if request.equipment_id and not request.maintenance_team_id:
request.maintenance_team_id = request.equipment_id.maintenance_team_id
return request
@api.multi
def write(self, vals):
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the Maintenance Request changes.
if vals and 'kanban_state' not in vals and 'stage_id' in vals:
vals['kanban_state'] = 'normal'
res = super(MaintenanceRequest, self).write(vals)
if vals.get('owner_user_id') or vals.get('technician_user_id'):
self._add_followers()
if self.stage_id.done and 'stage_id' in vals:
self.write({'close_date': fields.Date.today()})
return res
def _add_followers(self):
for request in self:
user_ids = (request.owner_user_id + request.technician_user_id).ids
request.message_subscribe_users(user_ids=user_ids)
@api.model
def _read_group_stage_ids(self, stages, domain, order):
""" Read group customization in order to display all the stages in the
kanban view, even if they are empty
"""
stage_ids = stages._search([], order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
class MaintenanceTeam(models.Model):
_name = 'maintenance.team'
_description = 'Maintenance Teams'
name = fields.Char(required=True)
member_ids = fields.Many2many('res.users', 'maintenance_team_users_rel', string="Team Members")
color = fields.Integer("Color Index", default=1)
request_ids = fields.One2many('maintenance.request', 'maintenance_team_id', copy=False)
equipment_ids = fields.One2many('maintenance.equipment', 'maintenance_team_id', copy=False)
# For the dashboard only
todo_request_ids = fields.One2many('maintenance.request', copy=False, compute='_compute_todo_requests')
todo_request_count = fields.Integer(compute='_compute_todo_requests')
todo_request_count_date = fields.Integer(compute='_compute_todo_requests')
todo_request_count_high_priority = fields.Integer(compute='_compute_todo_requests')
todo_request_count_block = fields.Integer(compute='_compute_todo_requests')
todo_request_count_unscheduled = fields.Integer(compute='_compute_todo_requests')
@api.one
@api.depends('request_ids.stage_id.done')
def _compute_todo_requests(self):
self.todo_request_ids = self.request_ids.filtered(lambda e: e.stage_id.done==False)
self.todo_request_count = len(self.todo_request_ids)
self.todo_request_count_date = len(self.todo_request_ids.filtered(lambda e: e.schedule_date != False))
self.todo_request_count_high_priority = len(self.todo_request_ids.filtered(lambda e: e.priority == '3'))
self.todo_request_count_block = len(self.todo_request_ids.filtered(lambda e: e.kanban_state == 'blocked'))
self.todo_request_count_unscheduled = len(self.todo_request_ids.filtered(lambda e: not e.schedule_date))
@api.one
@api.depends('equipment_ids')
def _compute_equipment(self):
self.equipment_count = len(self.equipment_ids)
| richard-willowit/odoo | addons/maintenance/models/maintenance.py | Python | gpl-3.0 | 20,858 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
VERSION = '1.0.0'
README = open('README.rst').read()
setup(
name='mach',
description='Generic command line command dispatching framework.',
long_description=README,
license='MPL 2.0',
author='Gregory Szorc',
author_email='[email protected]',
url='https://developer.mozilla.org/en-US/docs/Developer_Guide/mach',
packages=['mach', 'mach.mixin'],
version=VERSION,
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
install_requires=[
'blessings',
'mozfile',
'mozprocess',
'six',
],
tests_require=['mock'],
)
| CYBAI/servo | python/mach/setup.py | Python | mpl-2.0 | 1,204 |
"""add timezone to each station
Revision ID: 4d0be367f095
Revises: 6722b0ef4e1
Create Date: 2014-03-19 16:43:00.326820
"""
# revision identifiers, used by Alembic.
revision = '4d0be367f095'
down_revision = '6722b0ef4e1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('radio_station', sa.Column('timezone', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('radio_station', 'timezone')
### end Alembic commands ###
| rootio/rootio_web | alembic/versions/4d0be367f095_station_timezone.py | Python | agpl-3.0 | 644 |
# -*- coding: utf-8 -*-
from . import models
from .hooks import set_default_map_settings
| brain-tec/partner-contact | partner_external_map/__init__.py | Python | agpl-3.0 | 90 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp import models, fields, api
class PostlogisticsLicense(models.Model):
_name = 'postlogistics.license'
_description = 'PostLogistics Franking License'
_order = 'sequence'
name = fields.Char(string='Description',
translate=True,
required=True)
number = fields.Char(string='Number',
required=True)
company_id = fields.Many2one(comodel_name='res.company',
string='Company',
required=True)
sequence = fields.Integer(
string='Sequence',
help="Gives the sequence on company to define priority on license "
"when multiple licenses are available for the same group of "
"service."
)
class PostlogisticsServiceGroup(models.Model):
_name = 'postlogistics.service.group'
_description = 'PostLogistics Service Group'
name = fields.Char(string='Description', translate=True, required=True)
group_extid = fields.Integer(string='Group ID', required=True)
postlogistics_license_ids = fields.Many2many(
comodel_name='postlogistics.license',
relation='postlogistics_license_service_groups_rel',
column1='license_id',
column2='group_id',
string='PostLogistics Franking License')
_sql_constraints = [
('group_extid_uniq', 'unique(group_extid)',
"A service group ID must be unique.")
]
POSTLOGISTIC_TYPES = [
('label_layout', 'Label Layout'),
('output_format', 'Output Format'),
('resolution', 'Output Resolution'),
('basic', 'Basic Service'),
('additional', 'Additional Service'),
('delivery', 'Delivery Instructions')
]
class DeliveryCarrierTemplateOption(models.Model):
""" Set name translatable and add service group """
_inherit = 'delivery.carrier.template.option'
name = fields.Char(translate=True)
postlogistics_service_group_id = fields.Many2one(
comodel_name='postlogistics.service.group',
string='PostLogistics Service Group',
)
postlogistics_type = fields.Selection(
selection=POSTLOGISTIC_TYPES,
string="PostLogistics option type",
)
# relation tables to manage compatiblity between basic services
# and other services
postlogistics_basic_service_ids = fields.Many2many(
comodel_name='delivery.carrier.template.option',
relation='postlogistics_compatibility_service_rel',
column1='service_id',
column2='basic_service_id',
string="Basic Services",
domain=[('postlogistics_type', '=', 'basic')],
help="List of basic service for which this service is compatible",
)
postlogistics_additonial_service_ids = fields.Many2many(
comodel_name='delivery.carrier.template.option',
relation='postlogistics_compatibility_service_rel',
column1='basic_service_id',
column2='service_id',
string="Compatible Additional Services",
domain=[('postlogistics_type', '=', 'additional')],
)
postlogistics_delivery_instruction_ids = fields.Many2many(
comodel_name='delivery.carrier.template.option',
relation='postlogistics_compatibility_service_rel',
column1='basic_service_id',
column2='service_id',
string="Compatible Delivery Instructions",
domain=[('postlogistics_type', '=', 'delivery')],
)
class DeliveryCarrierOption(models.Model):
""" Set name translatable and add service group """
_inherit = 'delivery.carrier.option'
name = fields.Char(translate=True)
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
_super = super(DeliveryCarrierOption, self)
result = _super.fields_view_get(cr, uid, view_id=view_id,
view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
xmlid = 'delivery_carrier_label_postlogistics.postlogistics'
ref = self.pool['ir.model.data'].xmlid_to_object
postlogistics_partner = ref(cr, uid, xmlid, context=context)
if context.get('default_carrier_id'):
carrier_obj = self.pool['delivery.carrier']
carrier = carrier_obj.browse(cr, uid,
context['default_carrier_id'],
context=context)
if carrier.partner_id == postlogistics_partner:
arch = result['arch']
doc = etree.fromstring(arch)
for node in doc.xpath("//field[@name='tmpl_option_id']"):
node.set(
'domain',
"[('partner_id', '=', %s), "
" ('id', 'in', parent.allowed_option_ids[0][2])]" %
postlogistics_partner.id
)
result['arch'] = etree.tostring(doc)
return result
class DeliveryCarrier(models.Model):
""" Add service group """
_inherit = 'delivery.carrier'
@api.model
def _get_carrier_type_selection(self):
""" Add postlogistics carrier type """
res = super(DeliveryCarrier, self)._get_carrier_type_selection()
res.append(('postlogistics', 'Postlogistics'))
return res
@api.depends('partner_id',
'available_option_ids',
'available_option_ids.tmpl_option_id',
'available_option_ids.postlogistics_type',
)
def _get_basic_service_ids(self):
""" Search in all options for PostLogistics basic services if set """
xmlid = 'delivery_carrier_label_postlogistics.postlogistics'
postlogistics_partner = self.env.ref(xmlid)
for carrier in self:
if carrier.partner_id != postlogistics_partner:
continue
options = carrier.available_option_ids.filtered(
lambda option: option.postlogistics_type == 'basic'
).mapped('tmpl_option_id')
if not options:
continue
self.postlogistics_basic_service_ids = options
@api.depends('partner_id',
'postlogistics_service_group_id',
'postlogistics_basic_service_ids',
'postlogistics_basic_service_ids',
'available_option_ids',
'available_option_ids.postlogistics_type',
)
def _get_allowed_option_ids(self):
""" Return a list of possible options
A domain would be too complicated.
We do this to ensure the user first select a basic service. And
then he adds additional services.
"""
option_template_obj = self.env['delivery.carrier.template.option']
xmlid = 'delivery_carrier_label_postlogistics.postlogistics'
postlogistics_partner = self.env.ref(xmlid)
for carrier in self:
allowed = option_template_obj.browse()
if carrier.partner_id != postlogistics_partner:
continue
service_group = carrier.postlogistics_service_group_id
if service_group:
basic_services = carrier.postlogistics_basic_service_ids
services = option_template_obj.search(
[('postlogistics_service_group_id', '=', service_group.id)]
)
allowed |= services
if basic_services:
related_services = option_template_obj.search(
[('postlogistics_basic_service_ids', 'in',
basic_services.ids)]
)
allowed |= related_services
# Allows to set multiple optional single option in order to
# let the user select them
single_option_types = [
'label_layout',
'output_format',
'resolution',
]
selected_single_options = [
opt.tmpl_option_id.postlogistics_type
for opt in carrier.available_option_ids
if opt.postlogistics_type in single_option_types and
opt.mandatory]
if selected_single_options != single_option_types:
services = option_template_obj.search(
[('postlogistics_type', 'in', single_option_types),
('postlogistics_type', 'not in',
selected_single_options)],
)
allowed |= services
carrier.allowed_option_ids = allowed
postlogistics_license_id = fields.Many2one(
comodel_name='postlogistics.license',
string='PostLogistics Franking License',
)
postlogistics_service_group_id = fields.Many2one(
comodel_name='postlogistics.service.group',
string='PostLogistics Service Group',
help="Service group defines the available options for "
"this delivery method.",
)
postlogistics_basic_service_ids = fields.One2many(
comodel_name='delivery.carrier.template.option',
compute='_get_basic_service_ids',
string='PostLogistics Service Group',
help="Basic Service defines the available "
"additional options for this delivery method",
)
allowed_option_ids = fields.Many2many(
comodel_name='delivery.carrier.template.option',
compute='_get_allowed_option_ids',
string='Allowed options',
help="Compute allowed options according to selected options.",
)
| Antiun/carrier-delivery | delivery_carrier_label_postlogistics/delivery.py | Python | agpl-3.0 | 10,691 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_balance_report(osv.osv_memory):
_inherit = "account.common.account.report"
_name = 'account.balance.report'
_description = 'Trial Balance Report'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_balance_report_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'journal_ids': [],
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml', 'report_name': 'account.account.balance', 'datas': data}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/saas3 | openerp/addons/account/wizard/account_report_account_balance.py | Python | agpl-3.0 | 1,729 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Spanish Fiscal Year Closing Wizards
"""
import wizard_run
| jmesteve/saas3 | openerp/addons_extra/l10n_es_fiscal_year_closing/wizard/__init__.py | Python | agpl-3.0 | 1,141 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Cut-off Base module for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Cut-off Base',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Base module for Account Cut-offs',
'description': """
This module contains objets, fields and menu entries that are used by other
cut-off modules. So you need to install other cut-off modules to get the
additionnal functionalities :
* the module *account_cutoff_prepaid* will manage prepaid cut-offs based on
start date and end date,
* the module *account_cutoff_accrual_picking* will manage the accruals based
on the status of the pickings.
Please contact Alexis de Lattre from Akretion <[email protected]>
for any help or question about this module.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['account_accountant'],
'data': [
'company_view.xml',
'account_cutoff_view.xml',
'security/ir.model.access.csv',
'security/account_cutoff_base_security.xml',
],
'installable': True,
'active': False,
}
| AlceConsorcio/account-closing | account_cutoff_base/__openerp__.py | Python | agpl-3.0 | 2,130 |
#!/usr/bin/env python
import os, sys
usage = "usage: %s [infile [outfile]]" % os.path.basename(sys.argv[0])
if len(sys.argv) < 1:
print (usage)
else:
stext = "<insert_a_suppression_name_here>"
rtext = "memcheck problem #"
input = sys.stdin
output = sys.stdout
hit = 0
if len(sys.argv) > 1:
input = open(sys.argv[1])
if len(sys.argv) > 2:
output = open(sys.argv[2], 'w')
for s in input.readlines():
if s.replace(stext, "") != s:
hit = hit + 1
output.write(s.replace(stext, "memcheck problem #%d" % hit))
else:
output.write(s)
| lubosz/gst-plugins-vr | valgrind_helpers/valgrind-make-fix-list.py | Python | lgpl-2.1 | 628 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :class:`iris.coord_systems.VerticalPerspective` class."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import cartopy.crs as ccrs
from iris.coord_systems import GeogCS, VerticalPerspective
class Test(tests.IrisTest):
def setUp(self):
self.latitude_of_projection_origin = 0.0
self.longitude_of_projection_origin = 0.0
self.perspective_point_height = 38204820000.0
self.false_easting = 0.0
self.false_northing = 0.0
self.semi_major_axis = 6377563.396
self.semi_minor_axis = 6356256.909
self.ellipsoid = GeogCS(self.semi_major_axis, self.semi_minor_axis)
self.globe = ccrs.Globe(
semimajor_axis=self.semi_major_axis,
semiminor_axis=self.semi_minor_axis,
ellipse=None,
)
# Actual and expected coord system can be re-used for
# VerticalPerspective.test_crs_creation and test_projection_creation.
self.expected = ccrs.NearsidePerspective(
central_longitude=self.longitude_of_projection_origin,
central_latitude=self.latitude_of_projection_origin,
satellite_height=self.perspective_point_height,
false_easting=self.false_easting,
false_northing=self.false_northing,
globe=self.globe,
)
self.vp_cs = VerticalPerspective(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
self.perspective_point_height,
self.false_easting,
self.false_northing,
self.ellipsoid,
)
def test_crs_creation(self):
res = self.vp_cs.as_cartopy_crs()
self.assertEqual(res, self.expected)
def test_projection_creation(self):
res = self.vp_cs.as_cartopy_projection()
self.assertEqual(res, self.expected)
def test_set_optional_args(self):
# Check that setting the optional (non-ellipse) args works.
crs = VerticalPerspective(
0, 0, 1000, false_easting=100, false_northing=-203.7
)
self.assertEqualAndKind(crs.false_easting, 100.0)
self.assertEqualAndKind(crs.false_northing, -203.7)
def _check_crs_defaults(self, crs):
# Check for property defaults when no kwargs options were set.
# NOTE: except ellipsoid, which is done elsewhere.
self.assertEqualAndKind(crs.false_easting, 0.0)
self.assertEqualAndKind(crs.false_northing, 0.0)
def test_no_optional_args(self):
# Check expected defaults with no optional args.
crs = VerticalPerspective(0, 0, 1000)
self._check_crs_defaults(crs)
def test_optional_args_None(self):
# Check expected defaults with optional args=None.
crs = VerticalPerspective(
0, 0, 1000, false_easting=None, false_northing=None
)
self._check_crs_defaults(crs)
if __name__ == "__main__":
tests.main()
| SciTools/iris | lib/iris/tests/unit/coord_systems/test_VerticalPerspective.py | Python | lgpl-3.0 | 3,251 |
# encoding: utf-8
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from ObjCDataFormatterTestCase import ObjCDataFormatterTestCase
class ObjCDataFormatterNSError(ObjCDataFormatterTestCase):
@skipUnlessDarwin
def test_nserror_with_run_command(self):
"""Test formatters for NSError."""
self.appkit_tester_impl(self.nserror_data_formatter_commands)
def nserror_data_formatter_commands(self):
self.expect(
'frame variable nserror', substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserrorptr',
substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserror->_userInfo', substrs=['2 key/value pairs'])
self.expect(
'frame variable nserror->_userInfo --ptr-depth 1 -d run-target',
substrs=['@"a"', '@"b"', "1", "2"])
| llvm-mirror/lldb | packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-objc/TestDataFormatterObjCNSError.py | Python | apache-2.0 | 1,050 |
# Copyright (c) 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
from cinder import test
import cinder.volume.drivers.fujitsu.eternus_dx_common as eternus_dx_common
CONF = cfg.CONF
FUJITSU_FC_MODULE = ('cinder.volume.drivers.fujitsu.'
'eternus_dx_fc.FJDXFCDriver')
FUJITSU_ISCSI_MODULE = ('cinder.volume.drivers.fujitsu.'
'eternus_dx_iscsi.FJDXISCSIDriver')
class FJDriverCompatibility(test.TestCase):
def setUp(self):
super(FJDriverCompatibility, self).setUp()
self.manager = importutils.import_object(CONF.volume_manager)
# Stub definition
self.stubs.Set(
eternus_dx_common.FJDXCommon, '__init__', self.fake_init)
def _load_driver(self, driver):
self.manager.__init__(volume_driver=driver)
def _driver_module_name(self):
return "%s.%s" % (self.manager.driver.__class__.__module__,
self.manager.driver.__class__.__name__)
def fake_init(self, prtcl, configuration=None):
msg = "selected protocol is %s" % prtcl
self.assertTrue((prtcl == 'FC') or (prtcl == 'iSCSI'), msg=msg)
def test_fujitsu_driver_fc_old(self):
self._load_driver(
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver')
self.assertEqual(FUJITSU_FC_MODULE, self._driver_module_name())
def test_fujitsu_driver_fc_new(self):
self._load_driver(FUJITSU_FC_MODULE)
self.assertEqual(FUJITSU_FC_MODULE, self._driver_module_name())
def test_fujitsu_driver_iscsi_old(self):
self._load_driver(
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver')
self.assertEqual(FUJITSU_ISCSI_MODULE, self._driver_module_name())
def test_fujitsu_driver_iscsi_new(self):
self._load_driver(FUJITSU_ISCSI_MODULE)
self.assertEqual(FUJITSU_ISCSI_MODULE, self._driver_module_name())
| Akrog/cinder | cinder/tests/test_fujitsu_compatibility.py | Python | apache-2.0 | 2,530 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for image bundling tool."""
import logging
import os
import subprocess
import time
import urllib2
METADATA_URL_PREFIX = 'http://169.254.169.254/computeMetadata/'
METADATA_V1_URL_PREFIX = METADATA_URL_PREFIX + 'v1/'
class MakeFileSystemException(Exception):
"""Error occurred in file system creation."""
class TarAndGzipFileException(Exception):
"""Error occurred in creating the tarball."""
class LoadDiskImage(object):
"""Loads raw disk image using kpartx."""
def __init__(self, file_path):
"""Initializes LoadDiskImage object.
Args:
file_path: a path to a file containing raw disk image.
Returns:
A list of devices for every partition found in an image.
"""
self._file_path = file_path
def __enter__(self):
"""Map disk image as a device."""
SyncFileSystem()
kpartx_cmd = ['kpartx', '-a', '-v', '-s', self._file_path]
output = RunCommand(kpartx_cmd)
devs = []
for line in output.splitlines():
split_line = line.split()
if (len(split_line) > 2 and split_line[0] == 'add'
and split_line[1] == 'map'):
devs.append('/dev/mapper/' + split_line[2])
time.sleep(2)
return devs
def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
"""Unmap disk image as a device.
Args:
unused_exc_type: unused.
unused_exc_value: unused.
unused_exc_tb: unused.
"""
SyncFileSystem()
time.sleep(2)
kpartx_cmd = ['kpartx', '-d', '-v', '-s', self._file_path]
RunCommand(kpartx_cmd)
class MountFileSystem(object):
"""Mounts a file system."""
def __init__(self, dev_path, dir_path, fs_type):
"""Initializes MountFileSystem object.
Args:
dev_path: A path to a device to mount.
dir_path: A path to a directory where a device is to be mounted.
"""
self._dev_path = dev_path
self._dir_path = dir_path
self._fs_type = fs_type
def __enter__(self):
"""Mounts a device.
"""
# Since the bundled image can have the same uuid as the root disk,
# we should prevent uuid conflicts for xfs mounts.
if self._fs_type is 'xfs':
mount_cmd = ['mount', '-o', 'nouuid', self._dev_path, self._dir_path]
else:
mount_cmd = ['mount', self._dev_path, self._dir_path]
RunCommand(mount_cmd)
def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
"""Unmounts a file system.
Args:
unused_exc_type: unused.
unused_exc_value: unused.
unused_exc_tb: unused.
"""
umount_cmd = ['umount', self._dir_path]
RunCommand(umount_cmd)
SyncFileSystem()
def SyncFileSystem():
RunCommand(['sync'])
def GetMounts(root='/'):
"""Find all mount points under the specified root.
Args:
root: a path to look for a mount points.
Returns:
A list of mount points.
"""
output = RunCommand(['/bin/mount', '-l'])
mounts = []
for line in output.splitlines():
split_line = line.split()
mount_point = split_line[2]
if mount_point == root:
continue
# We are simply ignoring the fs_type of fs for now. But we can use that
# later Just verify that these are actually mount points.
if os.path.ismount(mount_point) and mount_point.startswith(root):
mounts.append(mount_point)
return mounts
def MakePartitionTable(file_path):
"""Create a partition table in a file.
Args:
file_path: A path to a file where a partition table will be created.
"""
RunCommand(['parted', file_path, 'mklabel', 'msdos'])
def MakePartition(file_path, partition_type, fs_type, start, end):
"""Create a partition in a file.
Args:
file_path: A path to a file where a partition will be created.
partition_type: A type of a partition to be created. Tested option is msdos.
fs_type: A type of a file system to be created. For example, ext2, ext3,
etc.
start: Start offset of a partition in bytes.
end: End offset of a partition in bytes.
"""
parted_cmd = ['parted', file_path, 'unit B', 'mkpart', partition_type,
fs_type, str(start), str(end)]
RunCommand(parted_cmd)
def MakeFileSystem(dev_path, fs_type, uuid=None):
"""Create a file system in a device.
Args:
dev_path: A path to a device.
fs_type: A type of a file system to be created. For example ext2, ext3, etc.
uuid: The value to use as the UUID for the filesystem. If none, a random
UUID will be generated and used.
Returns:
The uuid of the filesystem. This will be the same as the passed value if
a value was specified. If no uuid was passed in, this will be the randomly
generated uuid.
Raises:
MakeFileSystemException: If mkfs encounters an error.
"""
if uuid is None:
uuid = RunCommand(['uuidgen']).strip()
if uuid is None:
raise MakeFileSystemException(dev_path)
mkfs_cmd = ['mkfs', '-t', fs_type, dev_path]
RunCommand(mkfs_cmd)
if fs_type is 'xfs':
set_uuid_cmd = ['xfs_admin', '-U', uuid, dev_path]
else:
set_uuid_cmd = ['tune2fs', '-U', uuid, dev_path]
RunCommand(set_uuid_cmd)
return uuid
def Rsync(src, dest, exclude_file, ignore_hard_links, recursive, xattrs):
"""Copy files from specified directory using rsync.
Args:
src: Source location to copy.
dest: Destination to copy files to.
exclude_file: A path to a file which contains a list of exclude from copy
filters.
ignore_hard_links: If True a hard links are copied as a separate files. If
False, hard link are recreated in dest.
recursive: Specifies if directories are copied recursively or not.
xattrs: Specifies if extended attributes are preserved or not.
"""
rsync_cmd = ['rsync', '--times', '--perms', '--owner', '--group', '--links',
'--devices', '--acls', '--sparse']
if not ignore_hard_links:
rsync_cmd.append('--hard-links')
if recursive:
rsync_cmd.append('--recursive')
else:
rsync_cmd.append('--dirs')
if xattrs:
rsync_cmd.append('--xattrs')
if exclude_file:
rsync_cmd.append('--exclude-from=' + exclude_file)
rsync_cmd.extend([src, dest])
logging.debug('Calling: %s', repr(rsync_cmd))
if exclude_file:
logging.debug('Contents of exclude file %s:', exclude_file)
with open(exclude_file, 'rb') as excludes:
for line in excludes:
logging.debug(' %s', line.rstrip())
RunCommand(rsync_cmd)
def GetUUID(partition_path):
"""Fetches the UUID of the filesystem on the specified partition.
Args:
partition_path: The path to the partition.
Returns:
The uuid of the filesystem.
"""
output = RunCommand(['blkid', partition_path])
for token in output.split():
if token.startswith('UUID='):
uuid = token.strip()[len('UUID="'):-1]
logging.debug('found uuid = %s', uuid)
return uuid
def CopyBytes(src, dest, count):
"""Copies count bytes from the src to dest file.
Args:
src: The source to read bytes from.
dest: The destination to copy bytes to.
count: Number of bytes to copy.
"""
block_size = 4096
block_count = count / block_size
dd_command = ['dd',
'if=%s' % src,
'of=%s' % dest,
'conv=notrunc',
'bs=%s' % block_size,
'count=%s' % block_count]
RunCommand(dd_command)
remaining_bytes = count - block_count * block_size
if remaining_bytes:
logging.debug('remaining bytes to copy = %s', remaining_bytes)
dd_command = ['dd',
'if=%s' % src,
'of=%s' % dest,
'seek=%s' % block_count,
'skip=%s' % block_count,
'conv=notrunc',
'bs=1',
'count=%s' % remaining_bytes]
RunCommand(dd_command)
def GetPartitionStart(disk_path, partition_number):
"""Returns the starting position in bytes of the partition.
Args:
disk_path: The path to disk device.
partition_number: The partition number to lookup. 1 based.
Returns:
The starting position of the first partition in bytes.
Raises:
subprocess.CalledProcessError: If running parted fails.
IndexError: If there is no partition at the given number.
"""
parted_cmd = ['parted',
disk_path,
'unit B',
'print']
# In case the device is not valid and parted throws the retry/cancel prompt
# write c to stdin.
output = RunCommand(parted_cmd, input_str='c')
for line in output.splitlines():
split_line = line.split()
if len(split_line) > 4 and split_line[0] == str(partition_number):
return int(split_line[1][:-1])
raise IndexError()
def RemovePartition(disk_path, partition_number):
"""Removes the partition number from the disk.
Args:
disk_path: The disk to remove the partition from.
partition_number: The partition number to remove.
"""
parted_cmd = ['parted',
disk_path,
'rm',
str(partition_number)]
# In case the device is not valid and parted throws the retry/cancel prompt
# write c to stdin.
RunCommand(parted_cmd, input_str='c')
def GetDiskSize(disk_file):
"""Returns the size of the disk device in bytes.
Args:
disk_file: The full path to the disk device.
Returns:
The size of the disk device in bytes.
Raises:
subprocess.CalledProcessError: If fdisk command fails for the disk file.
"""
output = RunCommand(['fdisk', '-s', disk_file])
return int(output) * 1024
def RunCommand(command, input_str=None):
"""Runs the command and returns the output printed on stdout.
Args:
command: The command to run.
input_str: The input to pass to subprocess via stdin.
Returns:
The stdout from running the command.
Raises:
subprocess.CalledProcessError: if the command fails.
"""
logging.debug('running %s with input=%s', command, input_str)
p = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd_output = p.communicate(input_str)
logging.debug('stdout %s', cmd_output[0])
logging.debug('stderr %s', cmd_output[1])
logging.debug('returncode %s', p.returncode)
if p.returncode:
logging.warning('Error while running %s return_code = %s\n'
'stdout=%s\nstderr=%s',
command, p.returncode, cmd_output[0],
cmd_output[1])
raise subprocess.CalledProcessError(p.returncode,
cmd=command)
return cmd_output[0]
def TarAndGzipFile(src_paths, dest):
"""Pack file in tar archive and optionally gzip it.
Args:
src_paths: A list of files that will be archived.
(Must be in the same directory.)
dest: An archive name. If a file ends with .gz or .tgz an archive is gzipped
as well.
Raises:
TarAndGzipFileException: If tar encounters an error.
"""
if dest.endswith('.gz') or dest.endswith('.tgz'):
mode = 'czSf'
else:
mode = 'cSf'
src_names = [os.path.basename(src_path) for src_path in src_paths]
# Take the directory of the first file in the list, all files are expected
# to be in the same directory.
src_dir = os.path.dirname(src_paths[0])
tar_cmd = ['tar', mode, dest, '-C', src_dir] + src_names
retcode = subprocess.call(tar_cmd)
if retcode:
raise TarAndGzipFileException(','.join(src_paths))
class Http(object):
def Get(self, request, timeout=None):
return urllib2.urlopen(request, timeout=timeout).read()
def GetMetadata(self, url_path, recursive=False, timeout=None):
"""Retrieves instance metadata.
Args:
url_path: The path of the metadata url after the api version.
http://169.254.169.254/computeMetadata/v1/url_path
recursive: If set, returns the tree of metadata starting at url_path as
a json string.
timeout: How long to wait for blocking operations (in seconds).
A value of None uses urllib2's default timeout.
Returns:
The metadata returned based on the url path.
"""
# Use the latest version of the metadata.
suffix = ''
if recursive:
suffix = '?recursive=true'
url = '{0}{1}{2}'.format(METADATA_V1_URL_PREFIX, url_path, suffix)
request = urllib2.Request(url)
request.add_unredirected_header('Metadata-Flavor', 'Google')
return self.Get(request, timeout=timeout)
def IsRunningOnGCE():
"""Detect if we are running on GCE.
Returns:
True if we are running on GCE, False otherwise.
"""
# Try accessing DMI/SMBIOS informations through dmidecode first
try:
dmidecode_cmd = ['dmidecode', '-s', 'bios-vendor']
output = RunCommand(dmidecode_cmd)
return 'Google' in output
except subprocess.CalledProcessError:
# We fail if dmidecode doesn't exist or we have insufficient privileges
pass
# If dmidecode is not working, fallback to contacting the metadata server
try:
Http().GetMetadata('instance/id', timeout=1)
return True
except urllib2.HTTPError as e:
logging.warning('HTTP error: %s (http status code=%s)' % (e.reason, e.code))
except urllib2.URLError as e:
logging.warning('Cannot reach metadata server: %s' % e.reason)
return False
| tweksteen/compute-image-packages | gcimagebundle/gcimagebundlelib/utils.py | Python | apache-2.0 | 13,831 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
'''
Usage:
advogato.py <name> <diary entry file>
'''
from twisted.web.xmlrpc import Proxy
from twisted.internet import reactor
from getpass import getpass
import sys
class AddDiary:
def __init__(self, name, password):
self.name = name
self.password = password
self.proxy = Proxy('http://advogato.org/XMLRPC')
def __call__(self, filename):
self.data = open(filename).read()
d = self.proxy.callRemote('authenticate', self.name, self.password)
d.addCallbacks(self.login, self.noLogin)
def noLogin(self, reason):
print "could not login"
reactor.stop()
def login(self, cookie):
d = self.proxy.callRemote('diary.set', cookie, -1, self.data)
d.addCallbacks(self.setDiary, self.errorSetDiary)
def setDiary(self, response):
reactor.stop()
def errorSetDiary(self, error):
print "could not set diary", error
reactor.stop()
diary = AddDiary(sys.argv[1], getpass())
diary(sys.argv[2])
reactor.run()
| jxta/cc | vendor/Twisted-10.0.0/doc/web/examples/advogato.py | Python | apache-2.0 | 1,110 |
from openflow.optin_manager.sfa.rspecs.elements.element import Element
class PLTag(Element):
fields = [
'tagname',
'value',
]
| dana-i2cat/felix | optin_manager/src/python/openflow/optin_manager/sfa/rspecs/elements/pltag.py | Python | apache-2.0 | 161 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a campaign in a given advertiser.
To create an advertiser, run create_advertiser.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
CAMPAIGN_NAME = 'INSERT_CAMPAIGN_NAME_HERE'
URL = 'INSERT_LANDING_PAGE_URL_HERE'
LANDING_PAGE_NAME = 'INSERT_LANDING_PAGE_NAME_HERE'
START_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_START_YEAR_HERE',
'month': int('INSERT_START_MONTH_HERE'),
'day': int('INSERT_START_DAY_HERE')}
END_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_END_YEAR_HERE',
'month': int('INSERT_END_MONTH_HERE'),
'day': int('INSERT_END_DAY_HERE')}
def main(client, advertiser_id, campaign_name, url, landing_page_name,
start_date, end_date):
# Initialize appropriate service.
campaign_service = client.GetService(
'campaign', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Create a default landing page for the campaign and save it.
default_landing_page = {
'url': url,
'name': landing_page_name
}
default_landing_page_id = campaign_service.saveLandingPage(
default_landing_page)['id']
# Construct and save the campaign.
campaign = {
'name': campaign_name,
'advertiserId': advertiser_id,
'defaultLandingPageId': default_landing_page_id,
'archived': 'false',
'startDate': start_date,
'endDate': end_date
}
result = campaign_service.saveCampaign(campaign)
# Display results.
print 'Campaign with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, ADVERTISER_ID, CAMPAIGN_NAME, URL, LANDING_PAGE_NAME,
START_DATE, END_DATE)
| wubr2000/googleads-python-lib | examples/dfa/v1_20/create_campaign.py | Python | apache-2.0 | 2,699 |
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import mock
from neutron.common import constants
from neutron.common import exceptions
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit.vmware.nsxlib import base
_uuid = test_api_v2._uuid
class LogicalSwitchesTestCase(base.NsxlibTestCase):
def test_create_and_get_lswitches_single(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id,
'fake-switch',
transport_zones_config)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['uuid'],
lswitch['uuid'])
def test_create_and_get_lswitches_single_name_exceeds_40_chars(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
tenant_id,
_uuid(),
'*' * 50,
transport_zones_config)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid'])
self.assertEqual(res_lswitch[0]['display_name'], '*' * 40)
def test_create_and_get_lswitches_multiple(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
network_id = _uuid()
main_lswitch = switchlib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch', transport_zones_config,
tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
# Create secondary lswitch
second_lswitch = switchlib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch-2', transport_zones_config)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
network_id)
self.assertEqual(len(res_lswitch), 2)
switch_uuids = [ls['uuid'] for ls in res_lswitch]
self.assertIn(main_lswitch['uuid'], switch_uuids)
self.assertIn(second_lswitch['uuid'], switch_uuids)
for ls in res_lswitch:
if ls['uuid'] == main_lswitch['uuid']:
main_ls = ls
else:
second_ls = ls
main_ls_tags = self._build_tag_dict(main_ls['tags'])
second_ls_tags = self._build_tag_dict(second_ls['tags'])
self.assertIn('multi_lswitch', main_ls_tags)
self.assertNotIn('multi_lswitch', second_ls_tags)
self.assertIn('quantum_net_id', main_ls_tags)
self.assertIn('quantum_net_id', second_ls_tags)
self.assertEqual(main_ls_tags['quantum_net_id'],
network_id)
self.assertEqual(second_ls_tags['quantum_net_id'],
network_id)
def test_update_lswitch(self):
new_name = 'new-name'
new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}]
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'],
new_name, tags=new_tags)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['display_name'], new_name)
switch_tags = self._build_tag_dict(res_lswitch[0]['tags'])
self.assertIn('new_tag', switch_tags)
self.assertEqual(switch_tags['new_tag'], 'xxx')
def test_update_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
switchlib.update_lswitch,
self.fake_cluster, 'whatever',
'foo', 'bar')
def test_delete_networks(self):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
switchlib.delete_networks(self.fake_cluster, lswitch['uuid'],
[lswitch['uuid']])
self.assertRaises(exceptions.NotFound,
switchlib.get_lswitches,
self.fake_cluster,
lswitch['uuid'])
def test_delete_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
switchlib.delete_networks,
self.fake_cluster, 'whatever', ['whatever'])
class LogicalPortsTestCase(base.NsxlibTestCase):
def _create_switch_and_port(self, tenant_id='pippo',
neutron_port_id='whatever',
name='name', device_id='device_id'):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(), tenant_id, 'fake-switch',
transport_zones_config)
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, neutron_port_id,
name, device_id, True)
return lswitch, lport
def test_create_and_get_port(self):
lswitch, lport = self._create_switch_and_port()
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
# Try again with relation
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'],
relations='LogicalPortStatus')
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_plug_interface(self):
lswitch, lport = self._create_switch_and_port()
switchlib.plug_vif_interface(self.fake_cluster, lswitch['uuid'],
lport['uuid'], 'VifAttachment', 'fake')
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_get_port_by_tag(self):
lswitch, lport = self._create_switch_and_port()
lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'],
'whatever')
self.assertIsNotNone(lport2)
self.assertEqual(lport['uuid'], lport2['uuid'])
def test_get_port_by_tag_not_found_with_switch_id_raises_not_found(self):
tenant_id = 'pippo'
neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(
self.fake_cluster, tenant_id, _uuid(),
'fake-switch', transport_zones_config)
self.assertRaises(exceptions.NotFound,
switchlib.get_port_by_neutron_tag,
self.fake_cluster, lswitch['uuid'],
neutron_port_id)
def test_get_port_by_tag_not_find_wildcard_lswitch_returns_none(self):
tenant_id = 'pippo'
neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
switchlib.create_lswitch(
self.fake_cluster, tenant_id, _uuid(),
'fake-switch', transport_zones_config)
lport = switchlib.get_port_by_neutron_tag(
self.fake_cluster, '*', neutron_port_id)
self.assertIsNone(lport)
def test_get_port_status(self):
lswitch, lport = self._create_switch_and_port()
status = switchlib.get_port_status(
self.fake_cluster, lswitch['uuid'], lport['uuid'])
self.assertEqual(constants.PORT_STATUS_ACTIVE, status)
def test_get_port_status_non_existent_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.get_port_status,
self.fake_cluster,
'boo', 'boo')
def test_update_port(self):
lswitch, lport = self._create_switch_and_port()
switchlib.update_port(
self.fake_cluster, lswitch['uuid'], lport['uuid'],
'neutron_port_id', 'pippo2', 'new_name', 'device_id', False)
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
self.assertEqual('new_name', lport_res['display_name'])
self.assertEqual('False', lport_res['admin_status_enabled'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertIn('vm_id', port_tags)
def test_create_port_device_id_less_than_40_chars(self):
lswitch, lport = self._create_switch_and_port()
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertEqual('device_id', port_tags['vm_id'])
def test_create_port_device_id_more_than_40_chars(self):
dev_id = "this_is_a_very_long_device_id_with_lots_of_characters"
lswitch, lport = self._create_switch_and_port(device_id=dev_id)
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertNotEqual(len(dev_id), len(port_tags['vm_id']))
def test_get_ports_with_obsolete_and_new_vm_id_tag(self):
def obsolete(device_id, obfuscate=False):
return hashlib.sha1(device_id).hexdigest()
with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete):
dev_id1 = "short-dev-id-1"
_, lport1 = self._create_switch_and_port(device_id=dev_id1)
dev_id2 = "short-dev-id-2"
_, lport2 = self._create_switch_and_port(device_id=dev_id2)
lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertNotEqual(dev_id1, port_tags['vm_id'])
lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertEqual(dev_id2, port_tags['vm_id'])
def test_update_non_existent_port_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.update_port, self.fake_cluster,
'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False)
def test_delete_port(self):
lswitch, lport = self._create_switch_and_port()
switchlib.delete_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.get_port, self.fake_cluster,
lswitch['uuid'], lport['uuid'])
def test_delete_non_existent_port_raises(self):
lswitch = self._create_switch_and_port()[0]
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.delete_port, self.fake_cluster,
lswitch['uuid'], 'bad_port_uuid')
def test_query_lswitch_ports(self):
lswitch, lport = self._create_switch_and_port()
switch_port_uuids = [
switchlib.create_lport(
self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k,
'port-%s' % k, 'deviceid-%s' % k, True)['uuid']
for k in range(2)]
switch_port_uuids.append(lport['uuid'])
ports = switchlib.query_lswitch_lports(
self.fake_cluster, lswitch['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], switch_port_uuids)
| sajuptpm/neutron-ipam | neutron/tests/unit/vmware/nsxlib/test_switch.py | Python | apache-2.0 | 14,400 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all first party audience segments.
To create first party audience segments, run create_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201505')
# Create statement object to only select first party audience segments.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'FIRST_PARTY'
}
}]
query = 'WHERE Type = :type'
statement = dfp.FilterStatement(query, values)
# Get audience segments by statement.
while True:
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
segments = response['results']
for segment in segments:
print ('Audience segment with id \'%s\' and name \'%s\' of size '
'%s was found. ' %
(segment['id'], segment['name'], segment['size']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| wubr2000/googleads-python-lib | examples/dfp/v201505/audience_segment_service/get_first_party_audience_segments.py | Python | apache-2.0 | 2,062 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import subprocess
from mock import Mock, MagicMock, patch
import pexpect
from trove.common import exception
from trove.common import utils
from trove.guestagent import pkg
from trove.tests.unittests import trove_testtools
"""
Unit tests for the classes and functions in pkg.py.
"""
class PkgDEBInstallTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBInstallTestCase, self).setUp()
self.pkg = pkg.DebianPackagerMixin()
self.pkg_fix = self.pkg._fix
self.pkg_fix_package_selections = self.pkg._fix_package_selections
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
self.pkg._fix = Mock(return_value=None)
self.pkg._fix_package_selections = Mock(return_value=None)
self.pkgName = 'packageName'
def tearDown(self):
super(PkgDEBInstallTestCase, self).tearDown()
self.pkg._fix = self.pkg_fix
self.pkg._fix_package_selections = self.pkg_fix_package_selections
def test_pkg_is_installed_no_packages(self):
packages = []
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_yes(self):
packages = ["package1=1.0", "package2"]
self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0"])
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_no(self):
packages = ["package1=1.0", "package2", "package3=3.1"]
self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0", "3.0"])
self.assertFalse(self.pkg.pkg_is_installed(packages))
def test_success_install(self):
# test
pexpect.spawn.return_value.expect.return_value = 7
pexpect.spawn.return_value.match = False
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_success_install_with_config_opts(self):
# test
config_opts = {'option': 'some_opt'}
pexpect.spawn.return_value.expect.return_value = 7
pexpect.spawn.return_value.match = False
self.assertTrue(
self.pkg.pkg_install(self.pkgName, config_opts, 5000) is None)
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_not_found_1(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_not_found_2(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_run_DPKG_bad_State(self):
# test _fix method is called and PackageStateError is thrown
pexpect.spawn.return_value.expect.return_value = 4
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
self.assertTrue(self.pkg._fix.called)
def test_admin_lock_error(self):
# test 'Unable to lock the administration directory' error
pexpect.spawn.return_value.expect.return_value = 5
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_broken_error(self):
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgBrokenError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install,
self.pkgName, {}, 5000)
class PkgDEBRemoveTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBRemoveTestCase, self).setUp()
self.pkg = pkg.DebianPackagerMixin()
self.pkg_version = self.pkg.pkg_version
self.pkg_install = self.pkg._install
self.pkg_fix = self.pkg._fix
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
self.pkg.pkg_version = Mock(return_value="OK")
self.pkg._install = Mock(return_value=None)
self.pkg._fix = Mock(return_value=None)
self.pkgName = 'packageName'
def tearDown(self):
super(PkgDEBRemoveTestCase, self).tearDown()
self.pkg.pkg_version = self.pkg_version
self.pkg._install = self.pkg_install
self.pkg._fix = self.pkg_fix
def test_remove_no_pkg_version(self):
# test
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
with patch.object(self.pkg, 'pkg_version', return_value=None):
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_success_remove(self):
# test
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_package_not_found(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_package_reinstall_first_1(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertTrue(self.pkg._install.called)
self.assertFalse(self.pkg._fix.called)
def test_package_reinstall_first_2(self):
# test
pexpect.spawn.return_value.expect.return_value = 3
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertTrue(self.pkg._install.called)
self.assertFalse(self.pkg._fix.called)
def test_package_DPKG_first(self):
# test
pexpect.spawn.return_value.expect.return_value = 4
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertFalse(self.pkg._install.called)
self.assertTrue(self.pkg._fix.called)
def test_admin_lock_error(self):
# test 'Unable to lock the administration directory' error
pexpect.spawn.return_value.expect.return_value = 5
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove,
self.pkgName, 5000)
@patch.object(subprocess, 'call')
def test_timeout_error_with_exception(self, mock_call):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
pexpect.spawn.return_value.close.side_effect = (
pexpect.ExceptionPexpect('error'))
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertEqual(1, mock_call.call_count)
class PkgDEBVersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBVersionTestCase, self).setUp()
self.pkgName = 'mysql-server-5.5'
self.pkgVersion = '5.5.28-0'
self.getoutput = pkg.getoutput
def tearDown(self):
super(PkgDEBVersionTestCase, self).tearDown()
pkg.getoutput = self.getoutput
def test_version_success(self):
cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, self.pkgVersion)
pkg.getoutput = Mock(return_value=cmd_out)
version = pkg.DebianPackagerMixin().pkg_version(self.pkgName)
self.assertTrue(version)
self.assertEqual(self.pkgVersion, version)
def test_version_unknown_package(self):
cmd_out = "N: Unable to locate package %s" % self.pkgName
pkg.getoutput = Mock(return_value=cmd_out)
self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName))
def test_version_no_version(self):
cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, "(none)")
pkg.getoutput = Mock(return_value=cmd_out)
self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName))
class PkgRPMVersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgRPMVersionTestCase, self).setUp()
self.pkgName = 'python-requests'
self.pkgVersion = '0.14.2-1.el6'
self.getoutput = pkg.getoutput
def tearDown(self):
super(PkgRPMVersionTestCase, self).tearDown()
pkg.getoutput = self.getoutput
@patch('trove.guestagent.pkg.LOG')
def test_version_no_output(self, mock_logging):
cmd_out = ''
pkg.getoutput = Mock(return_value=cmd_out)
self.assertIsNone(pkg.RedhatPackagerMixin().pkg_version(self.pkgName))
def test_version_success(self):
cmd_out = self.pkgVersion
pkg.getoutput = Mock(return_value=cmd_out)
version = pkg.RedhatPackagerMixin().pkg_version(self.pkgName)
self.assertTrue(version)
self.assertEqual(self.pkgVersion, version)
class PkgRPMInstallTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgRPMInstallTestCase, self).setUp()
self.pkg = pkg.RedhatPackagerMixin()
self.getoutput = pkg.getoutput
self.pkgName = 'packageName'
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
def tearDown(self):
super(PkgRPMInstallTestCase, self).tearDown()
pkg.getoutput = self.getoutput
def test_pkg_is_installed_no_packages(self):
packages = []
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_yes(self):
packages = ["package1=1.0", "package2"]
with patch.object(pkg, 'getoutput', MagicMock(
return_value="package1=1.0\n" "package2=2.0")):
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_no(self):
packages = ["package1=1.0", "package2", "package3=3.0"]
with patch.object(pkg, 'getoutput', MagicMock(
return_value="package1=1.0\n" "package2=2.0")):
self.assertFalse(self.pkg.pkg_is_installed(packages))
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_not_found(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_conflict_remove(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
self.pkg._rpm_remove_nodeps = Mock()
# test and verify
self.pkg._install(self.pkgName, 5000)
self.assertTrue(self.pkg._rpm_remove_nodeps.called)
def test_package_conflict_remove_install(self):
with patch.object(self.pkg, '_install', side_effect=[3, 3, 0]):
self.assertTrue(
self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
self.assertEqual(3, self.pkg._install.call_count)
@patch.object(utils, 'execute')
def test__rpm_remove_nodeps(self, mock_execute):
self.pkg._rpm_remove_nodeps(self.pkgName)
mock_execute.assert_called_with('rpm', '-e', '--nodeps', self.pkgName,
run_as_root=True, root_helper='sudo')
def test_package_scriptlet_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 5
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgScriptletError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_http_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_nomirrors_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 7
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_sign_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 8
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgSignError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_already_installed(self):
# test
pexpect.spawn.return_value.expect.return_value = 9
pexpect.spawn.return_value.match = False
# test and verify
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_package_success_updated(self):
# test
pexpect.spawn.return_value.expect.return_value = 10
pexpect.spawn.return_value.match = False
# test and verify
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_package_success_installed(self):
# test
pexpect.spawn.return_value.expect.return_value = 11
pexpect.spawn.return_value.match = False
# test and verify
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install,
self.pkgName, {}, 5000)
class PkgRPMRemoveTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgRPMRemoveTestCase, self).setUp()
self.pkg = pkg.RedhatPackagerMixin()
self.pkg_version = self.pkg.pkg_version
self.pkg_install = self.pkg._install
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
self.pkg.pkg_version = Mock(return_value="OK")
self.pkg._install = Mock(return_value=None)
self.pkgName = 'packageName'
def tearDown(self):
super(PkgRPMRemoveTestCase, self).tearDown()
self.pkg.pkg_version = self.pkg_version
self.pkg._install = self.pkg_install
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_package_not_found(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_remove_no_pkg_version(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = False
with patch.object(self.pkg, 'pkg_version', return_value=None):
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_success_remove(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = False
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove,
self.pkgName, 5000)
class PkgDEBFixPackageSelections(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBFixPackageSelections, self).setUp()
self.pkg = pkg.DebianPackagerMixin()
self.getoutput = pkg.getoutput
def tearDown(self):
super(PkgDEBFixPackageSelections, self).tearDown()
pkg.getoutput = self.getoutput
@patch.object(os, 'remove')
@patch.object(pkg, 'NamedTemporaryFile')
@patch.object(utils, 'execute')
def test__fix_package_selections(self, mock_execute, mock_temp_file,
mock_remove):
packages = ["package1"]
config_opts = {'option': 'some_opt'}
pkg.getoutput = Mock(
return_value="* package1/option: some_opt")
self.pkg._fix_package_selections(packages, config_opts)
self.assertEqual(2, mock_execute.call_count)
self.assertEqual(1, mock_remove.call_count)
@patch.object(os, 'remove')
@patch.object(pkg, 'NamedTemporaryFile')
@patch.object(utils, 'execute',
side_effect=exception.ProcessExecutionError)
def test_fail__fix_package_selections(self, mock_execute, mock_temp_file,
mock_remove):
packages = ["package1"]
config_opts = {'option': 'some_opt'}
pkg.getoutput = Mock(
return_value="* package1/option: some_opt")
self.assertRaises(pkg.PkgConfigureError,
self.pkg._fix_package_selections,
packages, config_opts)
self.assertEqual(1, mock_remove.call_count)
@patch.object(utils, 'execute')
def test__fix(self, mock_execute):
self.pkg._fix(30)
mock_execute.assert_called_with('dpkg', '--configure', '-a',
run_as_root=True, root_helper='sudo')
| mmasaki/trove | trove/tests/unittests/guestagent/test_pkg.py | Python | apache-2.0 | 21,209 |
# -*- coding: utf-8 -*-
"""
sphinx.environment.managers.indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Index entries manager for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import bisect
import unicodedata
import string
from itertools import groupby
from six import text_type
from sphinx import addnodes
from sphinx.util import iteritems, split_index_msg, split_into
from sphinx.locale import _
from sphinx.environment.managers import EnvironmentManager
class IndexEntries(EnvironmentManager):
name = 'indices'
def __init__(self, env):
super(IndexEntries, self).__init__(env)
self.data = env.indexentries
def clear_doc(self, docname):
self.data.pop(docname, None)
def merge_other(self, docnames, other):
for docname in docnames:
self.data[docname] = other.indexentries[docname]
def process_doc(self, docname, doctree):
entries = self.data[docname] = []
for node in doctree.traverse(addnodes.index):
try:
for entry in node['entries']:
split_index_msg(entry[0], entry[1])
except ValueError as exc:
self.env.warn_node(exc, node)
node.parent.remove(node)
else:
for entry in node['entries']:
if len(entry) == 5:
# Since 1.4: new index structure including index_key (5th column)
entries.append(entry)
else:
entries.append(entry + (None,))
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
"""Create the real index from the collected index entries."""
from sphinx.environment import NoUri
new = {}
def add_entry(word, subword, main, link=True, dic=new, key=None):
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
word = text_type(word)
entry = dic.get(word)
if not entry:
dic[word] = entry = [[], {}, key]
if subword:
add_entry(subword, '', main, link=link, dic=entry[1], key=key)
elif link:
try:
uri = builder.get_relative_uri('genindex', fn) + '#' + tid
except NoUri:
pass
else:
# maintain links in sorted/deterministic order
bisect.insort(entry[0], (main, uri))
for fn, entries in iteritems(self.data):
# new entry types must be listed in directives/other.py!
for type, value, tid, main, index_key in entries:
try:
if type == 'single':
try:
entry, subentry = split_into(2, 'single', value)
except ValueError:
entry, = split_into(1, 'single', value)
subentry = ''
add_entry(entry, subentry, main, key=index_key)
elif type == 'pair':
first, second = split_into(2, 'pair', value)
add_entry(first, second, main, key=index_key)
add_entry(second, first, main, key=index_key)
elif type == 'triple':
first, second, third = split_into(3, 'triple', value)
add_entry(first, second + ' ' + third, main, key=index_key)
add_entry(second, third + ', ' + first, main, key=index_key)
add_entry(third, first + ' ' + second, main, key=index_key)
elif type == 'see':
first, second = split_into(2, 'see', value)
add_entry(first, _('see %s') % second, None,
link=False, key=index_key)
elif type == 'seealso':
first, second = split_into(2, 'see', value)
add_entry(first, _('see also %s') % second, None,
link=False, key=index_key)
else:
self.env.warn(fn, 'unknown index entry type %r' % type)
except ValueError as err:
self.env.warn(fn, str(err))
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry, lcletters=string.ascii_lowercase + '_'):
key, (void, void, category_key) = entry
if category_key:
# using specified category key to sort
key = category_key
lckey = unicodedata.normalize('NFD', key.lower())
if lckey[0:1] in lcletters:
lckey = chr(127) + lckey
# ensure a determinstic order *within* letters by also sorting on
# the entry itself
return (lckey, entry[0])
newlist = sorted(new.items(), key=keyfunc)
if group_entries:
# fixup entries: transform
# func() (in module foo)
# func() (in module bar)
# into
# func()
# (in module foo)
# (in module bar)
oldkey = ''
oldsubitems = None
i = 0
while i < len(newlist):
key, (targets, subitems, _key) = newlist[i]
# cannot move if it has subitems; structure gets too complex
if not subitems:
m = _fixre.match(key)
if m:
if oldkey == m.group(1):
# prefixes match: add entry as subitem of the
# previous entry
oldsubitems.setdefault(m.group(2), [[], {}, _key])[0].\
extend(targets)
del newlist[i]
continue
oldkey = m.group(1)
else:
oldkey = key
oldsubitems = subitems
i += 1
# group the entries by letter
def keyfunc2(item, letters=string.ascii_uppercase + '_'):
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1]))
if v[2] is None:
# now calculate the key
letter = unicodedata.normalize('NFD', k[0])[0].upper()
if letter in letters:
return letter
else:
# get all other symbols under one heading
return _('Symbols')
else:
return v[2]
return [(key_, list(group))
for (key_, group) in groupby(newlist, keyfunc2)]
| axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/environment/managers/indexentries.py | Python | apache-2.0 | 7,329 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
import tensorflow as tf
class AbsoluteDifferenceLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.absolute_difference(
self._predictions, self._predictions, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.5 * weight, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(5.5 * weight, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 0.0], shape=[2,])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 0.0], shape=[2, 1])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weight = tf.zeros((2, 3))
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(tf.test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.softmax_cross_entropy(logits, labels, weight=None)
def testAllCorrect(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(
logits, labels, tf.constant(weight))
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testAllWrongAllWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSomeWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 12.0, 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
tf.contrib.losses.softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = tf.constant([[100.0, -100.0, -100.0]])
labels = tf.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = tf.contrib.losses.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
class SparseSoftmaxCrossEntropyLossTest(tf.test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0], [1], [2]], dtype=tf.int32)
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0], [1], [2]], dtype=tf.int64)
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([0, 1, 2])
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]], dtype=tf.int32)
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]], dtype=tf.int64)
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([2, 0, 1])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, tf.constant(weight))
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testNonZeroLossWithColumnWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testAllWrongAllWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSomeWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 12.0, 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2]])
weight = tf.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2], [3]])
weight = tf.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2], [3]])
weight = tf.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = tf.constant([[0, 1], [2, 3]])
weight = tf.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
class SigmoidCrossEntropyLossTest(tf.test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = tf.placeholder(tf.float32, shape=(None, 1))
labels = tf.placeholder(tf.float32, shape=(None, 1))
weight = tf.ones_like(logits, dtype=tf.float32)
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels, weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(loss, 0.313, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = tf.placeholder(tf.float32, shape=(None, 2))
labels = tf.placeholder(tf.float32, shape=(None, 2))
weight = tf.ones_like(logits, dtype=tf.float32)
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels, weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(loss, 0.313, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, weight=weight)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 1700.0 / 7.0, 3)
def testMultiCorrectSigmoid(self):
logits = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = tf.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0]])
labels = tf.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = tf.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = tf.constant([[1, 0, 1]])
sigmoid_loss = tf.contrib.losses.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = tf.constant([[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = tf.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = tf.contrib.losses.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(tf.test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
targets = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_targets = targets
epsilon = 1e-7
self._expected_losses = np.multiply(
targets, np.log(predictions + epsilon)) + np.multiply(
1 - targets, np.log(1 - predictions + epsilon))
self._predictions = tf.constant(predictions)
self._targets = tf.constant(targets)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.log_loss(self._targets, self._targets, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.log_loss(self._targets, self._targets)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32, shape=self._np_targets.shape)
loss = tf.contrib.losses.log_loss(tf_predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(feed_dict={
tf_predictions: self._np_targets}), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.log_loss(self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32,
shape=self._np_predictions.shape)
weight = 2.3
loss = tf.contrib.losses.log_loss(
tf_predictions, self._targets, tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = tf.placeholder(tf.float32, shape=[None, None])
weight = 2.3
loss = tf.contrib.losses.log_loss(
tf_predictions, self._targets, tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weight = tf.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weight = tf.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weight = tf.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.log_loss(self._predictions, self._targets, weight)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
loss = tf.contrib.losses.log_loss(
self._predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
loss = tf.contrib.losses.log_loss(
tf_predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
loss = tf.contrib.losses.log_loss(
self._predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
tf_weight = tf.constant(weight, shape=(2, 3))
loss = tf.contrib.losses.log_loss(tf_predictions, self._targets, tf_weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weight = tf.zeros(shape=(2, 3))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, tf_weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(tf.test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = tf.constant([[-1.0], [2.1]])
target = tf.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = tf.contrib.losses.hinge_loss(logits, target).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = tf.constant([1.2, -1.4, -1.0, 2.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
loss = tf.contrib.losses.hinge_loss(logits, target)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = tf.constant([[-0.7], [-1.4], [1.4], [0.6]])
target = tf.constant([[0.0], [0.0], [1.0], [1.0]])
loss = tf.contrib.losses.hinge_loss(logits, target)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = tf.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
target = tf.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = tf.contrib.losses.hinge_loss(logits, target)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(tf.test.TestCase):
def setUp(self):
self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testDeprecatedName(self):
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.mean_squared_error(
self._predictions, self._predictions, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(49.5 * weight, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(49.5 * weight, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2,])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2, 1])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weight = tf.zeros((2, 3))
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12],
[8, 1, 3]])
self._targets = np.array([[1, 9, 2],
[-5, -5, 7]])
batch_size, dims = self._targets.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._targets[b, i].item() - self._targets[b, j].item()
tmp = (x-y) * (x-y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testDeprecatedName(self):
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
inputs = tf.ones((2, 3))
weights = tf.get_variable('weights',
shape=[3, 4],
initializer=tf.truncated_normal_initializer())
predictions = tf.matmul(inputs, weights)
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions,
predictions,
0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=weight)
with self.test_session():
self.assertAlmostEqual(weight * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(weight * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weight = 0
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weight = 2.3
tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
tf_targets = tf.placeholder(tf.float32, shape=self._targets.shape)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf_predictions,
targets=tf_targets,
weight=tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
tf_predictions: self._predictions,
tf_targets: self._targets,
})
self.assertAlmostEqual(weight * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weight, self._expected_losses)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weight = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weight = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weight, self._expected_losses)
tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
tf_targets = tf.placeholder(tf.int32, shape=self._targets.shape)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf_predictions,
targets=tf_targets,
weight=tf.constant(weight, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
tf_predictions: self._predictions,
tf_targets: self._targets,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weight = np.zeros((2, 1))
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class CosineDistanceLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = np.asarray([[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]]).reshape((3, 2, 3))
self._targets = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
dim=2,
weight=None)
def testAllCorrectNoWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
targets = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = tf.constant(predictions, shape=(3, 1, 3), dtype=tf.float32)
tf_targets = tf.constant(targets, shape=(3, 1, 3), dtype=tf.float32)
loss = tf.contrib.losses.cosine_distance(tf_preds, tf_targets, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32)
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.cosine_distance(
predictions=tf_predictions,
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = tf.placeholder(tf.float32, shape=self._targets.shape)
loss = tf.contrib.losses.cosine_distance(
predictions=tf_predictions,
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(tf.test.TestCase):
def testHingeLoss(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
losses = tf.contrib.losses.hinge_loss(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
loss = tf.contrib.losses.compute_weighted_loss(losses)
self.assertTrue(tf.contrib.losses.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5/4.0, atol=1e-3)
class AddLossTest(tf.test.TestCase):
def testAddExternalLoss(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
losses = tf.contrib.losses.hinge_loss(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
tf.contrib.losses.add_loss(tf.reduce_mean(losses))
self.assertTrue(tf.contrib.losses.get_losses())
total_loss = tf.contrib.losses.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5/4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
losses = tf.contrib.losses.hinge_loss(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
tf.contrib.losses.add_loss(tf.reduce_mean(losses), loss_collection=None)
self.assertFalse(tf.contrib.losses.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
def testNoCollectLosses(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
self.assertFalse(tf.contrib.losses.get_losses())
with tf.contrib.framework.arg_scope([tf.contrib.losses.add_loss],
loss_collection=None):
tf.contrib.losses.absolute_difference(logits, target)
tf.contrib.losses.log_loss(logits, target)
tf.contrib.losses.mean_squared_error(logits, target)
tf.contrib.losses.sigmoid_cross_entropy(logits, target)
tf.contrib.losses.softmax_cross_entropy(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
if __name__ == '__main__':
tf.test.main()
| neilhan/tensorflow | tensorflow/contrib/losses/python/losses/loss_ops_test.py | Python | apache-2.0 | 48,390 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2021-11-10 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0238_abstractprovider_allow_updates'),
]
operations = [
migrations.AddIndex(
model_name='schemaresponse',
index=models.Index(fields=['object_id', 'content_type'], name='osf_schemar_object__8cc95e_idx'),
),
]
| Johnetordoff/osf.io | osf/migrations/0239_auto_20211110_1921.py | Python | apache-2.0 | 497 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import django
from django.conf import settings
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
if django.VERSION >= (1, 9):
# Note(Itxaka): On django>=1.9 Charfield has an strip option that
# we need to set to False as to not hit
# https://bugs.launchpad.net/python-heatclient/+bug/1546166
environment_data.strip = False
template_data.strip = False
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment', _('environment'), cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
files, tpl =\
api.heat.get_template_files(cleaned.get('template_data'),
cleaned.get('template_url'))
kwargs['files'] = files
kwargs['template'] = tpl
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
cleaned['template_validate']['files'] = files
cleaned['template_validate']['template'] = tpl
except Exception as e:
raise forms.ValidationError(six.text_type(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': six.text_type(e)}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta(object):
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
class PreviewTemplateForm(TemplateForm):
class Meta(object):
name = _('Preview Template')
help_text = _('Select a new template to preview a stack.')
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta(object):
name = _('Create Stack')
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
if django.VERSION >= (1, 9):
# Note(Itxaka): On django>=1.9 Charfield has an strip option that
# we need to set to False as to not hit
# https://bugs.launchpad.net/python-heatclient/+bug/1546166
environment_data.strip = False
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if kwargs.get('validate_me'):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
if self._stack_password_enabled():
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self._build_parameter_fields(parameters)
def _stack_password_enabled(self):
stack_settings = getattr(settings, 'OPENSTACK_HEAT_STACK', {})
return stack_settings.get('enable_user_pass', True)
def _build_parameter_fields(self, template_validate):
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, simply sorted to make the order fixed
params_in_order = sorted(params.items())
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': html.escape(param.get('Description', '')),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'CustomConstraint' in param:
choices = self._populate_custom_choices(
param['CustomConstraint'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type == 'Json' and 'Default' in param:
field_args['initial'] = json.dumps(param['Default'])
field = forms.CharField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = field_args['min_length'] > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput(
render_value=True)
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
# heat-api currently returns the boolean type in lowercase
# (see https://bugs.launchpad.net/heat/+bug/1361448)
# so for better compatibility both are checked here
elif param_type in ('Boolean', 'boolean'):
field_args['required'] = False
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('password'):
fields['password'] = data.get('password')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.info(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
def _populate_custom_choices(self, custom_type):
if custom_type == 'neutron.network':
return instance_utils.network_field_data(self.request, True)
if custom_type == 'nova.keypair':
return instance_utils.keypair_field_data(self.request, True)
if custom_type == 'glance.image':
return image_utils.image_field_data(self.request, True)
if custom_type == 'nova.flavor':
return instance_utils.flavor_field_data(self.request, True)
return []
class EditStackForm(CreateStackForm):
class Meta(object):
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('password'):
fields['password'] = data.get('password')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.info(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
class PreviewStackForm(CreateStackForm):
class Meta(object):
name = _('Preview Stack Parameters')
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(CreateStackForm, self).__init__(*args, **kwargs)
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
stack_preview = api.heat.stack_preview(self.request, **fields)
request.method = 'GET'
return self.next_view.as_view()(request,
stack_preview=stack_preview)
except Exception:
exceptions.handle(request)
| bigswitch/horizon | openstack_dashboard/dashboards/project/stacks/forms.py | Python | apache-2.0 | 18,998 |
"""
Statistics for astronomy
"""
import numpy as np
from scipy.stats.distributions import rv_continuous
def bivariate_normal(mu=[0, 0], sigma_1=1, sigma_2=1, alpha=0,
size=None, return_cov=False):
"""Sample points from a 2D normal distribution
Parameters
----------
mu : array-like (length 2)
The mean of the distribution
sigma_1 : float
The unrotated x-axis width
sigma_2 : float
The unrotated y-axis width
alpha : float
The rotation counter-clockwise about the origin
size : tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
If no shape is specified, a single (`N`-D) sample is returned.
return_cov : boolean, optional
If True, return the computed covariance matrix.
Returns
-------
out : ndarray
The drawn samples, of shape *size*, if that was provided. If not,
the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
cov : ndarray
The 2x2 covariance matrix. Returned only if return_cov == True.
Notes
-----
This function works by computing a covariance matrix from the inputs,
and calling ``np.random.multivariate_normal()``. If the covariance
matrix is available, this function can be called directly.
"""
# compute covariance matrix
sigma_xx = ((sigma_1 * np.cos(alpha)) ** 2
+ (sigma_2 * np.sin(alpha)) ** 2)
sigma_yy = ((sigma_1 * np.sin(alpha)) ** 2
+ (sigma_2 * np.cos(alpha)) ** 2)
sigma_xy = (sigma_1 ** 2 - sigma_2 ** 2) * np.sin(alpha) * np.cos(alpha)
cov = np.array([[sigma_xx, sigma_xy],
[sigma_xy, sigma_yy]])
# draw points from the distribution
x = np.random.multivariate_normal(mu, cov, size)
if return_cov:
return x, cov
else:
return x
#----------------------------------------------------------------------
# Define some new distributions based on rv_continuous
class trunc_exp_gen(rv_continuous):
"""A truncated positive exponential continuous random variable.
The probability distribution is::
p(x) ~ exp(k * x) between a and b
= 0 otherwise
The arguments are (a, b, k)
%(before_notes)s
%(example)s
"""
def _argcheck(self, a, b, k):
self._const = k / (np.exp(k * b) - np.exp(k * a))
return (a != b) and not np.isinf(k)
def _pdf(self, x, a, b, k):
pdf = self._const * np.exp(k * x)
pdf[(x < a) | (x > b)] = 0
return pdf
def _rvs(self, a, b, k):
y = np.random.random(self._size)
return (1. / k) * np.log(1 + y * k / self._const)
trunc_exp = trunc_exp_gen(name="trunc_exp", shapes='a, b, k')
class linear_gen(rv_continuous):
"""A truncated positive exponential continuous random variable.
The probability distribution is::
p(x) ~ c * x + d between a and b
= 0 otherwise
The arguments are (a, b, c). d is set by the normalization
%(before_notes)s
%(example)s
"""
def _argcheck(self, a, b, c):
return (a != b) and not np.isinf(c)
def _pdf(self, x, a, b, c):
d = 1. / (b - a) - 0.5 * c * (b + a)
pdf = c * x + d
pdf[(x < a) | (x > b)] = 0
return pdf
def _rvs(self, a, b, c):
mu = 0.5 * (a + b)
W = (b - a)
x0 = 1. / c / W - mu
r = np.random.random(self._size)
return -x0 + np.sqrt(2. * r / c + a * a
+ 2. * a * x0 + x0 * x0)
linear = linear_gen(name="linear", shapes='a, b, c')
| nhuntwalker/astroML | astroML/stats/random.py | Python | bsd-2-clause | 3,890 |
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.ipv6 import clean_ipv6_address
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
'unique': _(u'%(model_name)s with this %(field_label)s '
u'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (self.empty_strings_allowed and
connection.features.interprets_empty_strings_as_nulls):
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Automatic key")
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return value
return connection.ops.value_to_db_auto(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid date format. It must be "
u"in YYYY-MM-DD format."),
'invalid_date': _(u"'%s' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
value = smart_str(value)
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
value = smart_str(value)
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return smart_unicode(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, verbose_name, name, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value is None:
return value
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _(u"'%s' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
value = smart_str(value)
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=False,
**kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(
validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
| klnprj/testapp | django/db/models/fields/__init__.py | Python | bsd-3-clause | 47,219 |
from __future__ import print_function
import numpy as nm
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except (ImportError, RuntimeError):
plt = mpl = None
#print 'matplotlib import failed!'
from sfepy.base.base import output, pause
def spy(mtx, eps=None, color='b', **kwargs):
"""
Show sparsity structure of a `scipy.sparse` matrix.
"""
aux = mtx.tocoo()
ij, val = nm.concatenate((aux.row[:,nm.newaxis],
aux.col[:,nm.newaxis]), 1), aux.data
n_item = aux.getnnz()
n_row, n_col = aux.shape
if eps is not None:
output('using eps =', eps)
ij = nm.compress(nm.absolute(val) > eps, ij, 0)
n_item = ij.shape[0]
else:
output('showing all')
output('n_item:', n_item)
if n_item:
args = {'marker' : '.'}
args.update(kwargs)
plt.plot(ij[:,1], ij[:,0], color, linestyle='None', **args)
plt.axis('image')
plt.axis([-0.5, n_row+0.5, -0.5, n_col+0.5])
plt.xlabel(r'%d x %d: %d nnz, %.2f%% fill'
% (n_row, n_col, n_item, 100. * n_item /
(float(n_row) * float(n_col))))
ax = plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
def spy_and_show(mtx, **kwargs):
spy(mtx, **kwargs)
plt.show()
##
# 13.12.2005, c
def print_matrix_diff(title, legend, mtx1, mtx2, mtx_da, mtx_dr, iis):
print('%s: ir, ic, %s, %s, adiff, rdiff' % ((title,) + tuple(legend)))
aux = mtx_da.copy().tocsc() # mtx_da should be CSC, cast for safety anyway.
aux.data = nm.ones(mtx_da.data.shape[0])
ics, irs = aux.nonzero()
for ii in iis:
ir, ic = irs[ii], ics[ii]
print('%5d %5d %11.4e %11.4e %9.2e %9.2e'
% (ir, ic, mtx1[ir,ic], mtx2[ir,ic],
mtx_da[ir,ic], mtx_dr[ir,ic]))
print('total: %d' % len(iis))
##
# 13.12.2005, c
# 14.12.2005
# 15.12.2005
# 18.07.2007
def plot_matrix_diff(mtx1, mtx2, delta, legend, mode):
eps = 1e-16
print("min", legend[0] , legend[1], ":", nm.amin(mtx1.data), nm.amin(mtx2.data))
print("max", legend[0] , legend[1], ":", nm.amax(mtx1.data), nm.amax(mtx2.data))
mtx_da = mtx1.copy() # To preserve structure of mtx1.
mtx_da.data[:] = nm.abs(mtx1.data - mtx2.data)
mtx_dr = mtx_da.copy()
mtx_dr.data[:] = -1
iin = nm.where(nm.abs(mtx1.data) > eps)[0]
mtx_dr.data[iin] = mtx_da.data[iin] / nm.abs(mtx1.data[iin])
print("err abs min max:", nm.amin(mtx_da.data), nm.amax(mtx_da.data))
print("err rel min max:", nm.amin(mtx_dr.data), nm.amax(mtx_dr.data))
epsilon = max(1e-5, 10 * delta)
print('epsilon:', epsilon)
pause()
ija = nm.where(mtx_da.data > epsilon)[0]
print_matrix_diff('--- absolute diff', legend,
mtx1, mtx2, mtx_da, mtx_dr, ija)
pause()
iin = nm.where(nm.abs(mtx1.data) > epsilon)[0]
ij = nm.where(nm.abs(mtx_dr.data[iin]) > epsilon)[0]
ij = iin[ij]
print_matrix_diff('--- relative diff', legend,
mtx1, mtx2, mtx_da, mtx_dr, ij)
pause()
ijb = nm.intersect1d(ija, ij)
print_matrix_diff('--- a-r', legend,
mtx1, mtx2, mtx_da, mtx_dr, ijb)
pause()
ii = nm.argsort(mtx_dr.data[ijb])
n_s = min(20, len(ii))
ijbs = ijb[ii[-1:-n_s-1:-1]]
print_matrix_diff('--- a-r 20 biggest (by r)', legend,
mtx1, mtx2, mtx_da, mtx_dr, ijbs)
pause()
if mode < 2: return
h = 100
plt.figure(h); plt.clf()
plt.axes([0.04, 0.6, 0.3, 0.3], frameon=True)
spy(mtx_da, epsilon)
plt.title('absolute diff')
plt.axes([0.68, 0.6, 0.3, 0.3], frameon=True)
iia = nm.where(mtx_dr.data)[0]
mtx_dr.data[nm.setdiff1d(iia, iin)] = 0.0
spy(mtx_dr, epsilon)
plt.title('relative diff')
plt.axes([0.36, 0.6, 0.3, 0.3], frameon=True)
mtx = mtx_dr.copy()
mtx.data[:] = 0.0
ii = nm.intersect1d(nm.where(mtx_dr.data > epsilon)[0],
nm.where(mtx_da.data > epsilon)[0])
mtx.data[ii] = 1.0
spy(mtx, epsilon)
plt.title('a-r intersection')
plt.axes([0.04, 0.08, 0.42, 0.42], frameon=True)
spy(mtx1, epsilon)
plt.title(legend[0])
plt.axes([0.54, 0.08, 0.42, 0.42], frameon=True)
spy(mtx2, epsilon)
plt.title(legend[1])
plt.show()
##
# 02.05.2006, c
def set_axes_font_size(ax, size):
labels = ax.get_xticklabels() + ax.get_yticklabels()
for label in labels:
label.set_size(size)
##
# 27.09.2006, c
def font_size(size):
return mpl.font_manager.FontProperties(size=size)
##
# 28.08.2007, c
def iplot(*args, **kwargs):
plt.ion()
plt.plot(*args, **kwargs)
plt.draw()
plt.ioff()
pause()
| vlukes/sfepy | sfepy/base/plotutils.py | Python | bsd-3-clause | 4,706 |
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as mexican hat or the second derivative of a gaussian) is not a particularily
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print __doc__
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = (2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4))) * (
1 - ((x - center) ** 2 / width ** 2)) * np.exp(
(-(x - center) ** 2) / (2 * width ** 2))
return x
def ricker_matrix(width, resolution, n_atoms):
"""Dictionary of Ricker (mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_atoms)
D = np.empty((n_atoms, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_atoms = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution, n_atoms=n_atoms)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_atoms=np.floor(n_atoms / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15),
('Lasso', 'lasso_cd', 2, None),
]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error' %
(title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| cdegroc/scikit-learn | examples/decomposition/plot_sparse_coding.py | Python | bsd-3-clause | 3,808 |
# -*- coding: utf-8 -*-
"""
CSS Testing
:copyright: (C) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from os.path import join
from cssutils import CSSParser
import unittest
import trytond.tests.test_tryton
dir = 'static/css/'
class CSSTest(unittest.TestCase):
"""
Test case for CSS.
"""
def validate(self, filename):
"""
Uses cssutils to validate a css file.
Prints output using a logger.
"""
CSSParser(raiseExceptions=True).parseFile(filename, validate=True)
def test_css(self):
"""
Test for CSS validation using W3C standards.
"""
cssfile = join(dir, 'style.css')
self.validate(cssfile)
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(CSSTest)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| gautampanday/nereid-webshop | tests/test_css.py | Python | bsd-3-clause | 1,062 |
# Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Vendor core functionality used from xarray.
This code has been reproduced with modification under the terms of the Apache License, Version
2.0 (notice included below).
Copyright 2014-2019, xarray Developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def expanded_indexer(key, ndim):
"""Expand an indexer to a tuple with length ndim.
Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def is_dict_like(value):
"""Check if value is dict-like."""
return hasattr(value, 'keys') and hasattr(value, '__getitem__')
def either_dict_or_kwargs(pos_kwargs, kw_kwargs, func_name):
"""Ensure dict-like argument from either positional or keyword arguments."""
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError('the first argument to .{} must be a '
'dictionary'.format(func_name))
if kw_kwargs:
raise ValueError('cannot specify both keyword and positional arguments to '
'.{}'.format(func_name))
return pos_kwargs
else:
return kw_kwargs
| ahaberlie/MetPy | src/metpy/_vendor/xarray.py | Python | bsd-3-clause | 2,816 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ImageAttachment'
db.create_table('upload_imageattachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.files.ImageField')(max_length=250)),
('thumbnail', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='image_attachments', to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('upload', ['ImageAttachment'])
def backwards(self, orm):
# Deleting model 'ImageAttachment'
db.delete_table('upload_imageattachment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'upload.imageattachment': {
'Meta': {'object_name': 'ImageAttachment'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_attachments'", 'to': "orm['auth.User']"}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'})
}
}
complete_apps = ['upload'] | safwanrahman/linuxdesh | kitsune/upload/migrations/0001_initial.py | Python | bsd-3-clause | 4,996 |
# Copyright (c) 2013 David Holm <[email protected]>
# This file is part of SimpleGUITk - https://github.com/dholm/simpleguitk
# See the file 'COPYING' for copying permission.
from .plot import plot_lines
| dholm/simpleguitk | simpleplot/__init___flymake.py | Python | bsd-3-clause | 207 |
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from scipy.special import gamma
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
# When we plot Hist, Pmf and Cdf objects, they don't appear in
# the legend unless we override the default label.
DEFAULT_LABEL = '_nolegend_'
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, str(self.d))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, repr(self.d))
else:
return '%s(%s, %s)' % (cls, repr(self.d), repr(self.label))
def __eq__(self, other):
try:
return self.d == other.d
except AttributeError:
return False
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def SortedItems(self):
"""Gets a sorted sequence of (value, freq/prob) pairs.
It items are unsortable, the result is unsorted.
"""
def isnan(x):
try:
return math.isnan(x)
except TypeError:
return False
if any([isnan(x) for x in self.Values()]):
msg = 'Keys contain NaN, may not sort correctly.'
logging.warning(msg)
try:
return sorted(self.d.items())
except TypeError:
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
return zip(*self.SortedItems())
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in self.SortedItems():
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def ProbEqual(self, x):
"""Probability that a sample from this Pmf is exactly x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbEqual(self, x)
else:
return self[x]
# NOTE: I've decided to remove the magic comparators because they
# have the side-effect of making Pmf sortable, but in fact they
# don't support sorting.
def Normalize(self, fraction=1):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0:
raise ValueError('Normalize: total probability is zero.')
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
return self.MakeCdf().Sample(n)
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
return sum(p * x for x, p in self.Items())
def Median(self):
"""Computes the median of a PMF.
Returns:
float median
"""
return self.MakeCdf().Percentile(50)
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
return sum(p * (x-mu)**2 for x, p in self.Items())
def Expect(self, func):
"""Computes the expectation of func(x).
Returns:
expectation
"""
return np.sum(p * func(x) for x, p in self.Items())
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def Mode(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
# The mode of a posterior is the maximum aposteori probability (MAP)
MAP = Mode
# If the distribution contains likelihoods only, the peak is the
# maximum likelihood estimator.
MaximumLikelihood = Mode
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
__radd__ = __add__
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf[v1 + v2] += p1 * p2
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
if other == 0:
return self.Copy()
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
cdf.ps **= k
return cdf
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix[x] += p1 * p2
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf:
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s, %s)' % (cls, str(self.xs), str(self.ps))
else:
return '%s(%s, %s, %s)' % (cls, str(self.xs), str(self.ps),
repr(self.label))
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in zip(self.xs, self.ps):
print(val, prob)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def Values(self, ps=None):
"""Returns InverseCDF(p), the value that corresponds to probability p.
If ps is not provided, returns all values.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
if ps is None:
return self.xs
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
ValueArray = Values
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100)
def Percentiles(self, ps):
"""Returns the value that corresponds to percentiles ps.
Args:
ps: numbers in the range [0, 100]
Returns:
array of values
"""
ps = np.asarray(ps)
return self.Values(ps / 100)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100
def PercentileRanks(self, xs):
"""Returns the percentile ranks of the values in xs.
xs: potential value in the CDF
returns: array of percentile ranks in the range 0 to 100
"""
return self.Probs(x) * 100
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalGammaPdf(x, a):
"""Computes the Gamma PDF.
x: where to evaluate the PDF
a: parameter of the gamma distribution
returns: float probability
"""
return x**(a-1) * np.exp(-x) / gamma(a)
def MakeGammaPmf(xs, a):
"""Makes a PMF discrete approx to a Gamma distribution.
lam: parameter lambda in events per unit time
xs: upper bound of the Pmf
returns: normalized Pmf
"""
xs = np.asarray(xs)
ps = EvalGammaPdf(xs, a)
pmf = Pmf(dict(zip(xs, ps)))
pmf.Normalize()
return pmf
def EvalGeometricPmf(k, p, loc=0):
"""Evaluates the geometric PMF.
With loc=0: Probability of `k` trials to get one success.
With loc=-1: Probability of `k` trials before first success.
k: number of trials
p: probability of success on each trial
"""
return stats.geom.pmf(k, p, loc=loc)
def MakeGeometricPmf(p, loc=0, high=10):
"""Evaluates the binomial PMF.
With loc=0: PMF of trials to get one success.
With loc=-1: PMF of trials before first success.
p: probability of success
high: upper bound where PMF is truncated
"""
pmf = Pmf()
for k in range(high):
pmf[k] = stats.geom.pmf(k, p, loc=loc)
pmf.Normalize()
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
return stats.poisson.pmf(k, lam)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = stats.poisson.pmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalWeibullPdf(x, lam, k):
"""Computes the Weibull PDF.
x: value
lam: parameter lambda in events per unit time
k: parameter
returns: float probability density
"""
arg = (x / lam)
return k / lam * arg**(k-1) * np.exp(-arg**k)
def EvalWeibullCdf(x, lam, k):
"""Evaluates CDF of the Weibull distribution."""
arg = (x / lam)
return 1 - np.exp(-arg**k)
def MakeWeibullPmf(lam, k, high, n=200):
"""Makes a PMF discrete approx to a Weibull distribution.
lam: parameter lambda in events per unit time
k: parameter
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
xs = np.linspace(0, high, n)
ps = EvalWeibullPdf(xs, lam, k)
ps[np.isinf(ps)] = 0
return Pmf(dict(zip(xs, ps)))
def EvalParetoPdf(x, xm, alpha):
"""Computes the Pareto.
xm: minimum value (scale parameter)
alpha: shape parameter
returns: float probability density
"""
return stats.pareto.pdf(x, alpha, scale=xm)
def MakeParetoPmf(xm, alpha, high, num=101):
"""Makes a PMF discrete approx to a Pareto distribution.
xm: minimum value (scale parameter)
alpha: shape parameter
high: upper bound value
num: number of values
returns: normalized Pmf
"""
xs = np.linspace(xm, high, num)
ps = stats.pareto.pdf(xs, alpha, scale=xm)
pmf = Pmf(dict(zip(xs, ps)))
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta:
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def MAP(self):
"""Computes the value with maximum a posteori probability."""
a = self.alpha - 1
b = self.beta - 1
return a / (a + b)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
The result is a little funny, because the values at 0 and 1
are not symmetric. Nevertheless, it is a reasonable discrete
model of the continuous distribution, and behaves well as
the number of values increases.
"""
if label is None and self.label is not None:
label = self.label
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = special.betainc(self.alpha, self.beta, xs)
cdf = Cdf(xs, ps)
return cdf
def Percentile(self, ps):
"""Returns the given percentiles from this distribution.
ps: scalar, array, or list of [0-100]
"""
ps = np.asarray(ps) / 100
xs = special.betaincinv(self.alpha, self.beta, ps)
return xs
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.normal(0, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float,
double=float, numeric=float)
var_info = []
with open(dct_file, **options) as f:
for line in f:
match = re.search( r'_column\(([^)]*)\)', line)
if not match:
continue
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column].copy()
weights /= sum(weights)
indices = np.random.choice(df.index, len(df), replace=True, p=weights)
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| AllenDowney/MarriageNSFG | thinkstats2.py | Python | mit | 75,264 |
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Problema unic."""
from __future__ import print_function
def gaseste_unic(istoric):
"""unic"""
result = istoric.pop()
for numar in istoric:
result = result ^ numar
return result
if __name__ == "__main__":
assert gaseste_unic([1, 2, 3, 2, 1]) == 3
assert gaseste_unic([1, 1, 1, 2, 2]) == 1
| alexandrucoman/labs | python/solutii/monica_vizitiu/unic/unic.py | Python | mit | 374 |
# Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
from cStringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r == dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
else:
write(', %s: ' % rep)
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r == list.__repr__) or
(issubclass(typ, tuple) and r == tuple.__repr__) or
(issubclass(typ, set) and r == set.__repr__) or
(issubclass(typ, frozenset) and r == frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, set):
if not length:
write('set()')
return
write('set([')
endchar = '])'
object = _sorted(object)
indent += 4
elif issubclass(typ, frozenset):
if not length:
write('frozenset()')
return
write('frozenset([')
endchar = '])'
object = _sorted(object)
indent += 10
else:
write('(')
endchar = ')'
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
else:
write(', ')
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r == dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r == list.__repr__) or \
(issubclass(typ, tuple) and r == tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/pprint.py | Python | mit | 11,932 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Yann GUIBET <[email protected]>
# See LICENSE for details.
from .openssl import OpenSSL
# For python3
def _equals_bytes(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0
def _equals_str(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def equals(a, b):
if isinstance(a, str):
return _equals_str(a, b)
else:
return _equals_bytes(a, b)
def hmac_sha256(k, m):
"""
Compute the key and the message with HMAC SHA5256
"""
key = OpenSSL.malloc(k, len(k))
d = OpenSSL.malloc(m, len(m))
md = OpenSSL.malloc(0, 32)
i = OpenSSL.pointer(OpenSSL.c_int(0))
OpenSSL.HMAC(OpenSSL.EVP_sha256(), key, len(k), d, len(m), md, i)
return md.raw
def hmac_sha512(k, m):
"""
Compute the key and the message with HMAC SHA512
"""
key = OpenSSL.malloc(k, len(k))
d = OpenSSL.malloc(m, len(m))
md = OpenSSL.malloc(0, 64)
i = OpenSSL.pointer(OpenSSL.c_int(0))
OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i)
return md.raw
def pbkdf2(password, salt=None, i=10000, keylen=64):
if salt is None:
salt = OpenSSL.rand(8)
p_password = OpenSSL.malloc(password, len(password))
p_salt = OpenSSL.malloc(salt, len(salt))
output = OpenSSL.malloc(0, keylen)
OpenSSL.PKCS5_PBKDF2_HMAC(p_password, len(password), p_salt,
len(p_salt), i, OpenSSL.EVP_sha256(),
keylen, output)
return salt, output.raw
| cpacia/Subspace | subspace/pyelliptic/hash.py | Python | mit | 1,739 |
from __future__ import absolute_import
import nsq
import unittest
class WriterUnitTest(unittest.TestCase):
def setUp(self):
super(WriterUnitTest, self).setUp()
def test_constructor(self):
name = 'test'
reconnect_interval = 10.0
writer = nsq.Writer(nsqd_tcp_addresses=['127.0.0.1:4150'],
reconnect_interval=reconnect_interval,
name=name)
self.assertEqual(writer.name, name)
self.assertEqual(0, len(writer.conn_kwargs))
self.assertEqual(writer.reconnect_interval, reconnect_interval)
def test_bad_writer_arguments(self):
bad_options = dict(foo=10)
self.assertRaises(
AssertionError,
nsq.Writer,
nsqd_tcp_addresses=['127.0.0.1:4150'],
reconnect_interval=15.0,
name='test', **bad_options)
| goller/pynsq | tests/test_writer.py | Python | mit | 892 |
"""Plugin for filesystem tasks."""
from __future__ import unicode_literals, division, absolute_import
import os
import logging
from path import Path
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
log = logging.getLogger('listdir')
class Listdir(object):
"""
Uses local path content as an input.
Example::
listdir: /storage/movies/
"""
schema = one_or_more({'type': 'string', 'format': 'path'})
def on_task_input(self, task, config):
# If only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
entries = []
for folder in config:
folder = Path(folder).expanduser()
try:
dir_files = folder.listdir()
except OSError as e:
log.error('Path %s could not be accessed: %s' % (folder, e.strerror))
continue
for filepath in dir_files:
try:
filepath.exists()
except UnicodeError:
log.error('file %s not decodable with filesystem encoding' % filepath)
continue
e = Entry()
if filepath.isfile():
e['title'] = filepath.namebase
else:
e['title'] = filepath.name
e['location'] = filepath
# Windows paths need an extra / preceded to them
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % filepath
e['filename'] = filepath.name
entries.append(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Listdir, 'listdir', api_ver=2)
| ratoaq2/Flexget | flexget/plugins/input/listdir.py | Python | mit | 1,908 |
#!/usr/bin/python
import participantCollection
participantCollection = participantCollection.ParticipantCollection()
numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()
initialNumber = participantCollection.size()
print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%**."
print "These participants have checked in at least once in the last 15 days:"
print ""
for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():
print "/u/" + participant.name
print ""
print "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:"
print ""
for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():
print "/u/" + participant.name + " ~"
print ""
| foobarbazblarg/stayclean | stayclean-2015-january/display-on-last-day-before-participants-must-check-in.py | Python | mit | 1,018 |
# unit tests for Mini-project 7 (The Fifteen Puzzle), by k., 08/02/2014
import unittest
from mini_project7 import Puzzle
class TestFunctions(unittest.TestCase):
def setUp(self):
pass
def test_lower_row_invariant(self):
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 1, 0, 11], [12, 13, 14, 15]])
self.assertTrue(state.lower_row_invariant(2, 2))
self.assertIs(type(state.lower_row_invariant(2, 2)), bool)
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 1, 11, 0], [12, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 2))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 0, 1, 11], [12, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 2))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 1, 0, 12], [11, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 2))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 1], [9, 0, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.lower_row_invariant(2, 1))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 1], [9, 0, 10, 11], [13, 12, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 1))
state = Puzzle(3, 3, [[8, 7, 6], [5, 4, 3], [2, 1, 0]])
self.assertTrue(state.lower_row_invariant(2, 2))
state = Puzzle(3, 3, [[2, 3, 4], [1, 0, 5], [6, 7, 8]])
self.assertTrue(state.lower_row_invariant(1, 1))
state = Puzzle(3, 3, [[2, 3, 4], [5, 0, 1], [6, 7, 8]])
self.assertFalse(state.lower_row_invariant(1, 1))
state = Puzzle(3, 5, [[13, 1, 2, 3, 11], [5, 6, 7, 8, 10], [11, 12, 4, 0, 14]])
self.assertTrue(state.lower_row_invariant(2, 3))
state = Puzzle(4, 4, [[1, 2, 3, 7], [5, 0, 6, 4], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(1, 1))
def test_solve_interior_tile(self):
state = Puzzle(4, 4, [[4, 13, 1, 3], [5, 10, 2, 7], [8, 12, 6, 11], [9, 0, 14, 15]])
self.assertIs(type(state.solve_interior_tile(3, 1)), str)
state = Puzzle(4, 4, [[4, 13, 1, 3], [5, 10, 2, 7], [8, 12, 6, 11], [9, 0, 14, 15]])
self.assertEqual(state.solve_interior_tile(3, 1), 'uuulddrulddruld')
state = Puzzle(4, 4, [[1, 2, 3, 7], [5, 4, 9, 6], [8, 0, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_interior_tile(2, 1), 'urullddruld')
state = Puzzle(3, 3, [[8, 7, 6], [5, 4, 3], [2, 1, 0]])
self.assertEqual(state.solve_interior_tile(2, 2) , 'uulldrruldrulddruld')
state = Puzzle(3, 3, [[1, 2, 3], [4, 5, 6], [7, 0, 8]])
self.assertEqual(state.solve_interior_tile(2, 1) , 'l')
state = Puzzle(4, 4, [[1, 2, 3, 4], [5, 6, 10, 7], [8, 9, 0, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_interior_tile(2, 2) , 'uld')
state = Puzzle(3, 5, [[13, 2, 3, 4, 5], [6, 7, 8, 9, 11], [10, 12, 1, 0, 14]])
self.assertEqual(state.solve_interior_tile(2, 3) , 'uullldrruldrruldrulddruld')
state = Puzzle(4, 4, [[1, 2, 3, 4], [5, 6, 7, 9], [8, 0, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_interior_tile(2, 1) , 'urrulldrullddruld')
state = Puzzle(3, 3, [[1, 2, 3], [4, 5, 7], [6, 0, 8]])
self.assertEqual(state.solve_interior_tile(2, 1) , 'urullddruld')
state = Puzzle(4, 5, [[15, 16, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [1, 0, 17, 18, 19]])
self.assertEqual(state.solve_interior_tile(3, 1), 'uuulddrulddruld')
def test_solve_col0_tile(self):
state = Puzzle(3, 3, [[1, 2, 3], [6, 4, 5], [0, 7, 8]])
self.assertIs(type(state.solve_col0_tile(2)), str)
state = Puzzle(3, 3, [[1, 2, 3], [6, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'urr')
state = Puzzle(3, 3, [[2, 3, 6], [1, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'ururdlludruldruldrdlurdluurddlurr')
state = Puzzle(3, 3, [[2, 6, 1], [3, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'uruldruldrdlurdluurddlurr')
state = Puzzle(3, 3, [[6, 2, 1], [3, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'uruldruldruldrdlurdluurddlurr')
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [0, 11, 12, 13, 14]])
self.assertEqual(state.solve_col0_tile(2), 'urrrrulldrulldrulldruldrdlurdluurddlurrrr')
state = Puzzle(3, 5, [[10, 2, 3, 4, 5], [6, 7, 8, 9, 1], [0, 11, 12, 13, 14]])
self.assertEqual(state.solve_col0_tile(2), 'uruldruldruldrdlurdluurddlurrrr')
state = Puzzle(3, 5, [[1, 2, 10, 4, 5], [6, 7, 8, 9, 3], [0, 11, 12, 13, 14]])
self.assertEqual(state.solve_col0_tile(2), 'ururdlludruldruldrdlurdluurddlurrrr')
def test_invariant_row0(self):
state = Puzzle(3, 3, [[2, 0, 1], [3, 4, 5], [6, 7, 8]])
self.assertFalse(state.row0_invariant(1))
self.assertIs(type(state.row0_invariant(1)), bool)
state = Puzzle(4, 4, [[1, 0, 3, 2], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertFalse(state.row0_invariant(1))
state = Puzzle(3, 3, [[1, 0, 2], [3, 4, 5], [6, 7, 8]])
self.assertTrue(state.row0_invariant(1))
state = Puzzle(4, 4, [[1, 0, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.row0_invariant(1))
state = Puzzle(3, 5, [[1, 2, 3, 4, 0], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]])
self.assertTrue(state.row0_invariant(4))
state = Puzzle(3, 5, [[2, 4, 1, 0, 3], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]])
self.assertFalse(state.row0_invariant(3))
state = Puzzle(4, 4, [[4, 2, 0, 3], [5, 1, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.row0_invariant(2))
# from the grader
state = Puzzle(4, 5, [[15, 16, 0, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [1, 2, 17, 18, 19]])
self.assertFalse(state.row0_invariant(2))
def test_invariant_row1(self):
state = Puzzle(3, 3, [[2, 3, 4], [1, 0, 5], [6, 7, 8]])
self.assertTrue(state.row1_invariant(1))
self.assertIs(type(state.row1_invariant(1)), bool)
state = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
self.assertTrue(state.row1_invariant(1))
state = Puzzle(3, 3, [[2, 3, 4], [5, 1, 0], [6, 7, 8]])
self.assertTrue(state.row1_invariant(2))
state = Puzzle(4, 4, [[1, 3, 4, 2], [0, 6, 5, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertFalse(state.row1_invariant(0))
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [8, 9, 0, 6, 7], [10, 11, 12, 13, 14]])
self.assertFalse(state.row1_invariant(2))
state = Puzzle(3, 5, [[1, 5, 2, 3, 4], [7, 6, 0, 8, 9], [10, 11, 12, 13, 14]])
self.assertTrue(state.row1_invariant(2))
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0], [10, 11, 12, 13, 14]])
self.assertTrue(state.row1_invariant(4))
state = Puzzle(4, 4, [[4, 6, 1, 3], [5, 2, 0, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.row1_invariant(2))
# from the grader
state = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
self.assertFalse(state.row1_invariant(0))
self.assertIs(type(state.row1_invariant(1)), bool)
state = Puzzle(4, 5, [[15, 6, 5, 3, 4], [2, 1, 0, 8, 9], [10, 11, 12, 13, 14], [7, 16, 17, 18, 19]])
self.assertFalse(state.row1_invariant(2))
def test_solve_row0(self):
state = Puzzle(3, 3, [[1, 2, 0], [3, 4, 5], [6, 7, 8]])
self.assertEqual(state.solve_row0_tile(2), 'ld')
state = Puzzle(4, 4, [[2, 4, 5, 0], [3, 6, 1, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row0_tile(3), 'ldllurrdlurdlurrdluldrruld')
state = Puzzle(4, 4, [[1, 3, 5, 0], [2, 6, 4, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row0_tile(3), 'lduldruldurdlurrdluldrruld')
state = Puzzle(4, 5, [[1, 5, 6, 0, 4], [7, 2, 3, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]])
self.assertEqual(state.solve_row0_tile(3), 'lduldurdlurrdluldrruld')
def test_solve_row1(self):
state = Puzzle(3, 3, [[2, 5, 4], [1, 3, 0], [6, 7, 8]])
self.assertEqual(state.solve_row1_tile(2), 'uldruldur')
self.assertIs(type(state.solve_row1_tile(2)), str)
state = Puzzle(3, 3, [[1, 4, 2], [3, 5, 0], [6, 7, 8]])
self.assertEqual(state.solve_row1_tile(2), 'lur')
state = Puzzle(3, 5, [[1, 2, 7, 3, 4], [6, 5, 0, 8, 9], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_row1_tile(2), 'uldur')
state = puzzle = Puzzle(4, 4, [[1, 2, 6, 3], [7, 4, 5, 0], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row1_tile(3), 'lllurrdlurrdlur')
state = Puzzle(4, 4, [[1, 7, 4, 2], [3, 5, 6, 0], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row1_tile(3), 'ulldrruldruldur')
state = Puzzle(3, 5, [[1, 7, 2, 3, 4], [6, 5, 0, 8, 9], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_row1_tile(2), 'uldruldur')
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_row1_tile(4), 'lur')
def test_two_by_two(self):
state = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
self.assertEqual(state.solve_2x2(), 'uldrul')
self.assertIs(type(state.solve_2x2()), str)
state = Puzzle(3, 5, [[5, 1, 2, 3, 4], [6, 0, 7, 8, 9], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_2x2(), 'ulrdlu')
state = Puzzle(2, 2, [[3, 2], [1, 0]])
self.assertEqual(state.solve_2x2(), 'uldrul')
state = Puzzle(2, 2, [[1, 3], [2, 0]])
self.assertEqual(state.solve_2x2(), 'ul')
state = Puzzle(2, 2, [[0, 1], [2, 3]])
self.assertEqual(state.solve_2x2(), '')
def test_finale(self):
state = Puzzle(4, 5, [[15, 16, 0, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [1, 2, 17, 18, 19]])
self.assertEqual(state.solve_puzzle(), 'rrdddulduldulduuulddrulddrulduruulddruldruldrdlurdluurddlurrrrulduldulduldurlruldrdlurdluurddlurrrruldurlduldurlduldurlduldurdlurrdluldrrulduldrul')
state = Puzzle(4, 4, [[14, 12, 8, 5], [0, 2, 15, 6], [4, 13, 7, 9], [10, 11, 3, 1]])
self.assertEqual(state.solve_puzzle(), 'rrrdduullurrdldrulddrulduuulldrruldrulddrulddrulduurullddrulddrulduruuldrulddruldruldrdlurdluurddlurrrllurrdlllurrdluulddruldururdlludruldruldrdlurdluurddlurrrulldrruldruldurldlurdlurrdluldrruldlurldulrdlu')
state = Puzzle(4,4,[[2,11,12,13],[9,4,6,1],[5,7,8,3],[10,0,14,15]])
self.assertEqual(state.solve_puzzle(), 'rrlluuurrdllurdlludrulddrulddruldururullddruldruldrdlurdluurddlurrruldruldllurrdluulddruldurrulldruldrdlurdluurddlurrrlllurrdlurrdlurldulldrruldruldurlduldurdlurrdluldrrulduldrul')
# let's run it in IDLE
if __name__ == '__main__':
unittest.main(exit=False)
| lifeloverxg/principles-of-computing-2 | test_mini_project7.py | Python | mit | 11,001 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
rgb2pct.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rgb2pct(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NCOLORS = 'NCOLORS'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', '24-to-8-bits.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('RGB to PCT')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Conversion')
self.addParameter(ParameterRaster(rgb2pct.INPUT,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(rgb2pct.NCOLORS,
self.tr('Number of colors'), 1, None, 2))
self.addOutput(OutputRaster(rgb2pct.OUTPUT, self.tr('RGB to PCT')))
def getConsoleCommands(self):
arguments = []
arguments.append('-n')
arguments.append(str(self.getParameterValue(rgb2pct.NCOLORS)))
arguments.append('-of')
out = self.getOutputValue(rgb2pct.OUTPUT)
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append(self.getParameterValue(rgb2pct.INPUT))
arguments.append(out)
if isWindows():
commands = ['cmd.exe', '/C ', 'rgb2pct.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['rgb2pct.py', GdalUtils.escapeAndJoin(arguments)]
return commands
| drnextgis/QGIS | python/plugins/processing/algs/gdal/rgb2pct.py | Python | gpl-2.0 | 2,969 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implement upgrade recipes."""
| inveniosoftware/invenio-upgrader | invenio_upgrader/upgrades/__init__.py | Python | gpl-2.0 | 812 |
# emacs: -*- mode: python; coding: utf-8; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import re
from abc import abstractmethod
from .strptime import reGroupDictStrptime, timeRE
from ..helpers import getLogger
logSys = getLogger(__name__)
class DateTemplate(object):
"""A template which searches for and returns a date from a log line.
This is an not functional abstract class which other templates should
inherit from.
Attributes
----------
name
regex
"""
def __init__(self):
self._name = ""
self._regex = ""
self._cRegex = None
self.hits = 0
@property
def name(self):
"""Name assigned to template.
"""
return self._name
@name.setter
def name(self, name):
self._name = name
def getRegex(self):
return self._regex
def setRegex(self, regex, wordBegin=True):
"""Sets regex to use for searching for date in log line.
Parameters
----------
regex : str
The regex the template will use for searching for a date.
wordBegin : bool
Defines whether the regex should be modified to search at
beginning of a word, by adding "\\b" to start of regex.
Default True.
Raises
------
re.error
If regular expression fails to compile
"""
regex = regex.strip()
if (wordBegin and not re.search(r'^\^', regex)):
regex = r'\b' + regex
self._regex = regex
self._cRegex = re.compile(regex, re.UNICODE | re.IGNORECASE)
regex = property(getRegex, setRegex, doc=
"""Regex used to search for date.
""")
def matchDate(self, line):
"""Check if regex for date matches on a log line.
"""
dateMatch = self._cRegex.search(line)
return dateMatch
@abstractmethod
def getDate(self, line):
"""Abstract method, which should return the date for a log line
This should return the date for a log line, typically taking the
date from the part of the line which matched the templates regex.
This requires abstraction, therefore just raises exception.
Parameters
----------
line : str
Log line, of which the date should be extracted from.
Raises
------
NotImplementedError
Abstract method, therefore always returns this.
"""
raise NotImplementedError("getDate() is abstract")
class DateEpoch(DateTemplate):
"""A date template which searches for Unix timestamps.
This includes Unix timestamps which appear at start of a line, optionally
within square braces (nsd), or on SELinux audit log lines.
Attributes
----------
name
regex
"""
def __init__(self):
DateTemplate.__init__(self)
self.regex = "(?:^|(?P<square>(?<=^\[))|(?P<selinux>(?<=audit\()))\d{10}(?:\.\d{3,6})?(?(selinux)(?=:\d+\))(?(square)(?=\])))"
def getDate(self, line):
"""Method to return the date for a log line.
Parameters
----------
line : str
Log line, of which the date should be extracted from.
Returns
-------
(float, str)
Tuple containing a Unix timestamp, and the string of the date
which was matched and in turned used to calculated the timestamp.
"""
dateMatch = self.matchDate(line)
if dateMatch:
# extract part of format which represents seconds since epoch
return (float(dateMatch.group()), dateMatch)
return None
class DatePatternRegex(DateTemplate):
"""Date template, with regex/pattern
Parameters
----------
pattern : str
Sets the date templates pattern.
Attributes
----------
name
regex
pattern
"""
_patternRE = r"%%(%%|[%s])" % "".join(timeRE.keys())
_patternName = {
'a': "DAY", 'A': "DAYNAME", 'b': "MON", 'B': "MONTH", 'd': "Day",
'H': "24hour", 'I': "12hour", 'j': "Yearday", 'm': "Month",
'M': "Minute", 'p': "AMPM", 'S': "Second", 'U': "Yearweek",
'w': "Weekday", 'W': "Yearweek", 'y': 'Year2', 'Y': "Year", '%': "%",
'z': "Zone offset", 'f': "Microseconds", 'Z': "Zone name"}
for _key in set(timeRE) - set(_patternName): # may not have them all...
_patternName[_key] = "%%%s" % _key
def __init__(self, pattern=None):
super(DatePatternRegex, self).__init__()
self._pattern = None
if pattern is not None:
self.pattern = pattern
@property
def pattern(self):
"""The pattern used for regex with strptime "%" time fields.
This should be a valid regular expression, of which matching string
will be extracted from the log line. strptime style "%" fields will
be replaced by appropriate regular expressions, or custom regex
groups with names as per the strptime fields can also be used
instead.
"""
return self._pattern
@pattern.setter
def pattern(self, pattern):
self._pattern = pattern
self._name = re.sub(
self._patternRE, r'%(\1)s', pattern) % self._patternName
super(DatePatternRegex, self).setRegex(
re.sub(self._patternRE, r'%(\1)s', pattern) % timeRE)
def setRegex(self, value):
raise NotImplementedError("Regex derived from pattern")
@DateTemplate.name.setter
def name(self, value):
raise NotImplementedError("Name derived from pattern")
def getDate(self, line):
"""Method to return the date for a log line.
This uses a custom version of strptime, using the named groups
from the instances `pattern` property.
Parameters
----------
line : str
Log line, of which the date should be extracted from.
Returns
-------
(float, str)
Tuple containing a Unix timestamp, and the string of the date
which was matched and in turned used to calculated the timestamp.
"""
dateMatch = self.matchDate(line)
if dateMatch:
groupdict = dict(
(key, value)
for key, value in dateMatch.groupdict().iteritems()
if value is not None)
return reGroupDictStrptime(groupdict), dateMatch
class DateTai64n(DateTemplate):
"""A date template which matches TAI64N formate timestamps.
Attributes
----------
name
regex
"""
def __init__(self):
DateTemplate.__init__(self)
# We already know the format for TAI64N
# yoh: we should not add an additional front anchor
self.setRegex("@[0-9a-f]{24}", wordBegin=False)
def getDate(self, line):
"""Method to return the date for a log line.
Parameters
----------
line : str
Log line, of which the date should be extracted from.
Returns
-------
(float, str)
Tuple containing a Unix timestamp, and the string of the date
which was matched and in turned used to calculated the timestamp.
"""
dateMatch = self.matchDate(line)
if dateMatch:
# extract part of format which represents seconds since epoch
value = dateMatch.group()
seconds_since_epoch = value[2:17]
# convert seconds from HEX into local time stamp
return (int(seconds_since_epoch, 16), dateMatch)
return None
| jakesyl/fail2ban | fail2ban/server/datetemplate.py | Python | gpl-2.0 | 7,428 |
###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at [email protected] with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# [email protected]
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
class SpaceIconViewlet(ViewletBase):
render = ViewPageTemplateFile('space_icon.pt')
def update(self):
portal_state = getMultiAdapter((self.context, self.request),
name=u'plone_portal_state')
cportal_url = portal_state.portal_url()
current_object = self.context.aq_inner
self.has_space_icon = False
self.space_icon = ""
self.space_url = ""
parentslist = current_object.aq_chain
new_object = None
found = 0
try:
for type in parentslist:
if type.portal_type == 'Space' and type.meta_type == 'Space':
new_object = type
found = 1
if found == 1:
break
except AttributeError:
a = self.space_icon
if new_object <> None:
#implement code here for binding space icon
if new_object.space_icon <> "":
self.space_icon = cportal_url + "/" + new_object.space_icon
else:
self.space_icon = default_space_icon
self.space_url = new_object.absolute_url()
self.has_space_icon = True
else:
self.site_icon = portal_state.portal_url() + "/logo.jpg"
self.site_url = portal_state.portal_url()
self.render = ViewPageTemplateFile('site_logo.pt')
| collective/cyn.in | src/ubify.viewlets/ubify/viewlets/browser/spaceicon.py | Python | gpl-3.0 | 3,414 |
"""A module that is accepted by Python but rejected by tokenize.
The problem is the trailing line continuation at the end of the line,
which produces a TokenError."""
""\
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/pylint/test/input/func_tokenize_error.py | Python | agpl-3.0 | 173 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Gengo Translator',
'category': 'Website/Website',
'summary': 'Translate website in one-click',
'description': """
This module allows to send website content to Gengo translation service in a single click. Gengo then gives back the translated terms in the destination language.
""",
'depends': [
'website',
'base_gengo'
],
'data': [
'views/website_gengo_templates.xml',
]
}
| ddico/odoo | addons/website_gengo/__manifest__.py | Python | agpl-3.0 | 544 |
"""
Serializers and ModelSerializers are similar to Forms and ModelForms.
Unlike forms, they are not constrained to dealing with HTML output, and
form encoded input.
Serialization in REST framework is a two-phase process:
1. Serializers marshal between complex types like model instances, and
python primitives.
2. The process of marshalling between python primitives and request and
response content is handled by parsers and renderers.
"""
from __future__ import unicode_literals
import copy
import datetime
import inspect
import types
from decimal import Decimal
from django.contrib.contenttypes.generic import GenericForeignKey
from django.core.paginator import Page
from django.db import models
from django.forms import widgets
from django.utils.datastructures import SortedDict
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.compat import get_concrete_model, six
from rest_framework.settings import api_settings
# Note: We do the following so that users of the framework can use this style:
#
# example_field = serializers.CharField(...)
#
# This helps keep the separation between model fields, form fields, and
# serializer fields more explicit.
from rest_framework.relations import * # NOQA
from rest_framework.fields import * # NOQA
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situtations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
return models.get_model(app_name, model_name)
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
else:
raise ValueError("{0} is not a Django model".format(obj))
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
class RelationsList(list):
_deleted = []
class NestedValidationError(ValidationError):
"""
The default ValidationError behavior is to stringify each item in the list
if the messages are a list of error messages.
In the case of nested serializers, where the parent has many children,
then the child's `serializer.errors` will be a list of dicts. In the case
of a single child, the `serializer.errors` will be a dict.
We need to override the default behavior to get properly nested error dicts.
"""
def __init__(self, message):
if isinstance(message, dict):
self._messages = [message]
else:
self._messages = message
@property
def messages(self):
return self._messages
class DictWithMetadata(dict):
"""
A dict-like object, that can have additional properties attached.
"""
def __getstate__(self):
"""
Used by pickle (e.g., caching).
Overridden to remove the metadata from the dict, since it shouldn't be
pickled and may in some instances be unpickleable.
"""
return dict(self)
class SortedDictWithMetadata(SortedDict):
"""
A sorted dict-like object, that can have additional properties attached.
"""
def __getstate__(self):
"""
Used by pickle (e.g., caching).
Overriden to remove the metadata from the dict, since it shouldn't be
pickle and may in some instances be unpickleable.
"""
return SortedDict(self).__dict__
def _is_protected_type(obj):
"""
True if the object is a native datatype that does not need to
be serialized further.
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal,
basestring)
)
def _get_declared_fields(bases, attrs):
"""
Create a list of serializer field instances from the passed in 'attrs',
plus any fields on the base classes (in 'bases').
Note that all fields from the base classes are used.
"""
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(six.iteritems(attrs))
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Serializer, add that Serializer's
# fields. Note that we loop over the bases in *reverse*. This is necessary
# in order to maintain the correct order of fields.
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = list(base.base_fields.items()) + fields
return SortedDict(fields)
class SerializerMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = _get_declared_fields(bases, attrs)
return super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class SerializerOptions(object):
"""
Meta class options for Serializer
"""
def __init__(self, meta):
self.depth = getattr(meta, 'depth', 0)
self.fields = getattr(meta, 'fields', ())
self.exclude = getattr(meta, 'exclude', ())
class BaseSerializer(WritableField):
"""
This is the Serializer implementation.
We need to implement it as `BaseSerializer` due to metaclass magicks.
"""
class Meta(object):
pass
_options_class = SerializerOptions
_dict_class = SortedDictWithMetadata
def __init__(self, instance=None, data=None, files=None,
context=None, partial=False, many=None,
allow_add_remove=False, **kwargs):
super(BaseSerializer, self).__init__(**kwargs)
self.opts = self._options_class(self.Meta)
self.parent = None
self.root = None
self.partial = partial
self.many = many
self.allow_add_remove = allow_add_remove
self.context = context or {}
self.init_data = data
self.init_files = files
self.object = instance
self.fields = self.get_fields()
self._data = None
self._files = None
self._errors = None
if many and instance is not None and not hasattr(instance, '__iter__'):
raise ValueError('instance should be a queryset or other iterable with many=True')
if allow_add_remove and not many:
raise ValueError('allow_add_remove should only be used for bulk updates, but you have not set many=True')
#####
# Methods to determine which fields to use when (de)serializing objects.
def get_default_fields(self):
"""
Return the complete set of default fields for the object, as a dict.
"""
return {}
def get_fields(self):
"""
Returns the complete set of fields for the object as a dict.
This will be the set of any explicitly declared fields,
plus the set of fields returned by get_default_fields().
"""
ret = SortedDict()
# Get the explicitly declared fields
base_fields = copy.deepcopy(self.base_fields)
for key, field in base_fields.items():
ret[key] = field
# Add in the default fields
default_fields = self.get_default_fields()
for key, val in default_fields.items():
if key not in ret:
ret[key] = val
# If 'fields' is specified, use those fields, in that order.
if self.opts.fields:
assert isinstance(self.opts.fields, (list, tuple)), '`fields` must be a list or tuple'
new = SortedDict()
for key in self.opts.fields:
new[key] = ret[key]
ret = new
# Remove anything in 'exclude'
if self.opts.exclude:
assert isinstance(self.opts.exclude, (list, tuple)), '`exclude` must be a list or tuple'
for key in self.opts.exclude:
ret.pop(key, None)
for key, field in ret.items():
field.initialize(parent=self, field_name=key)
return ret
#####
# Methods to convert or revert from objects <--> primitive representations.
def get_field_key(self, field_name):
"""
Return the key that should be used for a given field.
"""
return field_name
def restore_fields(self, data, files):
"""
Core of deserialization, together with `restore_object`.
Converts a dictionary of data into a dictionary of deserialized fields.
"""
reverted_data = {}
if data is not None and not isinstance(data, dict):
self._errors['non_field_errors'] = ['Invalid data']
return None
for field_name, field in self.fields.items():
field.initialize(parent=self, field_name=field_name)
try:
field.field_from_native(data, files, field_name, reverted_data)
except ValidationError as err:
self._errors[field_name] = list(err.messages)
return reverted_data
def perform_validation(self, attrs):
"""
Run `validate_<fieldname>()` and `validate()` methods on the serializer
"""
for field_name, field in self.fields.items():
if field_name in self._errors:
continue
source = field.source or field_name
if self.partial and source not in attrs:
continue
try:
validate_method = getattr(self, 'validate_%s' % field_name, None)
if validate_method:
attrs = validate_method(attrs, source)
except ValidationError as err:
self._errors[field_name] = self._errors.get(field_name, []) + list(err.messages)
# If there are already errors, we don't run .validate() because
# field-validation failed and thus `attrs` may not be complete.
# which in turn can cause inconsistent validation errors.
if not self._errors:
try:
attrs = self.validate(attrs)
except ValidationError as err:
if hasattr(err, 'message_dict'):
for field_name, error_messages in err.message_dict.items():
self._errors[field_name] = self._errors.get(field_name, []) + list(error_messages)
elif hasattr(err, 'messages'):
self._errors['non_field_errors'] = err.messages
return attrs
def validate(self, attrs):
"""
Stub method, to be overridden in Serializer subclasses
"""
return attrs
def restore_object(self, attrs, instance=None):
"""
Deserialize a dictionary of attributes into an object instance.
You should override this method to control how deserialized objects
are instantiated.
"""
if instance is not None:
instance.update(attrs)
return instance
return attrs
def to_native(self, obj):
"""
Serialize objects -> primitives.
"""
ret = self._dict_class()
ret.fields = self._dict_class()
for field_name, field in self.fields.items():
if field.read_only and obj is None:
continue
field.initialize(parent=self, field_name=field_name)
key = self.get_field_key(field_name)
value = field.field_to_native(obj, field_name)
method = getattr(self, 'transform_%s' % field_name, None)
if callable(method):
value = method(obj, value)
if not getattr(field, 'write_only', False):
ret[key] = value
ret.fields[key] = self.augment_field(field, field_name, key, value)
return ret
def from_native(self, data, files=None):
"""
Deserialize primitives -> objects.
"""
self._errors = {}
if data is not None or files is not None:
attrs = self.restore_fields(data, files)
if attrs is not None:
attrs = self.perform_validation(attrs)
else:
self._errors['non_field_errors'] = ['No input provided']
if not self._errors:
return self.restore_object(attrs, instance=getattr(self, 'object', None))
def augment_field(self, field, field_name, key, value):
# This horrible stuff is to manage serializers rendering to HTML
field._errors = self._errors.get(key) if self._errors else None
field._name = field_name
field._value = self.init_data.get(key) if self._errors and self.init_data else value
if not field.label:
field.label = pretty_name(key)
return field
def field_to_native(self, obj, field_name):
"""
Override default so that the serializer can be used as a nested field
across relationships.
"""
if self.write_only:
return None
if self.source == '*':
return self.to_native(obj)
# Get the raw field value
try:
source = self.source or field_name
value = obj
for component in source.split('.'):
if value is None:
break
value = get_component(value, component)
except ObjectDoesNotExist:
return None
if is_simple_callable(getattr(value, 'all', None)):
return [self.to_native(item) for item in value.all()]
if value is None:
return None
if self.many is not None:
many = self.many
else:
many = hasattr(value, '__iter__') and not isinstance(value, (Page, dict, six.text_type))
if many:
return [self.to_native(item) for item in value]
return self.to_native(value)
def field_from_native(self, data, files, field_name, into):
"""
Override default so that the serializer can be used as a writable
nested field across relationships.
"""
if self.read_only:
return
try:
value = data[field_name]
except KeyError:
if self.default is not None and not self.partial:
# Note: partial updates shouldn't set defaults
value = copy.deepcopy(self.default)
else:
if self.required:
raise ValidationError(self.error_messages['required'])
return
if self.source == '*':
if value:
reverted_data = self.restore_fields(value, {})
if not self._errors:
into.update(reverted_data)
else:
if value in (None, ''):
into[(self.source or field_name)] = None
else:
# Set the serializer object if it exists
obj = get_component(self.parent.object, self.source or field_name) if self.parent.object else None
# If we have a model manager or similar object then we need
# to iterate through each instance.
if (self.many and
not hasattr(obj, '__iter__') and
is_simple_callable(getattr(obj, 'all', None))):
obj = obj.all()
kwargs = {
'instance': obj,
'data': value,
'context': self.context,
'partial': self.partial,
'many': self.many,
'allow_add_remove': self.allow_add_remove
}
serializer = self.__class__(**kwargs)
if serializer.is_valid():
into[self.source or field_name] = serializer.object
else:
# Propagate errors up to our parent
raise NestedValidationError(serializer.errors)
def get_identity(self, data):
"""
This hook is required for bulk update.
It is used to determine the canonical identity of a given object.
Note that the data has not been validated at this point, so we need
to make sure that we catch any cases of incorrect datatypes being
passed to this method.
"""
try:
return data.get('id', None)
except AttributeError:
return None
@property
def errors(self):
"""
Run deserialization and return error data,
setting self.object if no errors occurred.
"""
if self._errors is None:
data, files = self.init_data, self.init_files
if self.many is not None:
many = self.many
else:
many = hasattr(data, '__iter__') and not isinstance(data, (Page, dict, six.text_type))
if many:
warnings.warn('Implicit list/queryset serialization is deprecated. '
'Use the `many=True` flag when instantiating the serializer.',
DeprecationWarning, stacklevel=3)
if many:
ret = RelationsList()
errors = []
update = self.object is not None
if update:
# If this is a bulk update we need to map all the objects
# to a canonical identity so we can determine which
# individual object is being updated for each item in the
# incoming data
objects = self.object
identities = [self.get_identity(self.to_native(obj)) for obj in objects]
identity_to_objects = dict(zip(identities, objects))
if hasattr(data, '__iter__') and not isinstance(data, (dict, six.text_type)):
for item in data:
if update:
# Determine which object we're updating
identity = self.get_identity(item)
self.object = identity_to_objects.pop(identity, None)
if self.object is None and not self.allow_add_remove:
ret.append(None)
errors.append({'non_field_errors': ['Cannot create a new item, only existing items may be updated.']})
continue
ret.append(self.from_native(item, None))
errors.append(self._errors)
if update and self.allow_add_remove:
ret._deleted = identity_to_objects.values()
self._errors = any(errors) and errors or []
else:
self._errors = {'non_field_errors': ['Expected a list of items.']}
else:
ret = self.from_native(data, files)
if not self._errors:
self.object = ret
return self._errors
def is_valid(self):
return not self.errors
@property
def data(self):
"""
Returns the serialized data on the serializer.
"""
if self._data is None:
obj = self.object
if self.many is not None:
many = self.many
else:
many = hasattr(obj, '__iter__') and not isinstance(obj, (Page, dict))
if many:
warnings.warn('Implicit list/queryset serialization is deprecated. '
'Use the `many=True` flag when instantiating the serializer.',
DeprecationWarning, stacklevel=2)
if many:
self._data = [self.to_native(item) for item in obj]
else:
self._data = self.to_native(obj)
return self._data
def save_object(self, obj, **kwargs):
obj.save(**kwargs)
def delete_object(self, obj):
obj.delete()
def save(self, **kwargs):
"""
Save the deserialized object and return it.
"""
# Clear cached _data, which may be invalidated by `save()`
self._data = None
if isinstance(self.object, list):
[self.save_object(item, **kwargs) for item in self.object]
if self.object._deleted:
[self.delete_object(item) for item in self.object._deleted]
else:
self.save_object(self.object, **kwargs)
return self.object
def metadata(self):
"""
Return a dictionary of metadata about the fields on the serializer.
Useful for things like responding to OPTIONS requests, or generating
API schemas for auto-documentation.
"""
return SortedDict(
[(field_name, field.metadata())
for field_name, field in six.iteritems(self.fields)]
)
class Serializer(six.with_metaclass(SerializerMetaclass, BaseSerializer)):
pass
class ModelSerializerOptions(SerializerOptions):
"""
Meta class options for ModelSerializer
"""
def __init__(self, meta):
super(ModelSerializerOptions, self).__init__(meta)
self.model = getattr(meta, 'model', None)
self.read_only_fields = getattr(meta, 'read_only_fields', ())
self.write_only_fields = getattr(meta, 'write_only_fields', ())
class ModelSerializer(Serializer):
"""
A serializer that deals with model instances and querysets.
"""
_options_class = ModelSerializerOptions
field_mapping = {
models.AutoField: IntegerField,
models.FloatField: FloatField,
models.IntegerField: IntegerField,
models.PositiveIntegerField: IntegerField,
models.SmallIntegerField: IntegerField,
models.PositiveSmallIntegerField: IntegerField,
models.DateTimeField: DateTimeField,
models.DateField: DateField,
models.TimeField: TimeField,
models.DecimalField: DecimalField,
models.EmailField: EmailField,
models.CharField: CharField,
models.URLField: URLField,
models.SlugField: SlugField,
models.TextField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.BooleanField: BooleanField,
models.NullBooleanField: BooleanField,
models.FileField: FileField,
models.ImageField: ImageField,
}
def get_default_fields(self):
"""
Return all the fields that should be serialized for the model.
"""
cls = self.opts.model
assert cls is not None, \
"Serializer class '%s' is missing 'model' Meta option" % self.__class__.__name__
opts = get_concrete_model(cls)._meta
ret = SortedDict()
nested = bool(self.opts.depth)
# Deal with adding the primary key field
pk_field = opts.pk
while pk_field.rel and pk_field.rel.parent_link:
# If model is a child via multitable inheritance, use parent's pk
pk_field = pk_field.rel.to._meta.pk
field = self.get_pk_field(pk_field)
if field:
ret[pk_field.name] = field
# Deal with forward relationships
forward_rels = [field for field in opts.fields if field.serialize]
forward_rels += [field for field in opts.many_to_many if field.serialize]
for model_field in forward_rels:
has_through_model = False
if model_field.rel:
to_many = isinstance(model_field,
models.fields.related.ManyToManyField)
related_model = _resolve_model(model_field.rel.to)
if to_many and not model_field.rel.through._meta.auto_created:
has_through_model = True
if model_field.rel and nested:
if len(inspect.getargspec(self.get_nested_field).args) == 2:
warnings.warn(
'The `get_nested_field(model_field)` call signature '
'is due to be deprecated. '
'Use `get_nested_field(model_field, related_model, '
'to_many) instead',
PendingDeprecationWarning
)
field = self.get_nested_field(model_field)
else:
field = self.get_nested_field(model_field, related_model, to_many)
elif model_field.rel:
if len(inspect.getargspec(self.get_nested_field).args) == 3:
warnings.warn(
'The `get_related_field(model_field, to_many)` call '
'signature is due to be deprecated. '
'Use `get_related_field(model_field, related_model, '
'to_many) instead',
PendingDeprecationWarning
)
field = self.get_related_field(model_field, to_many=to_many)
else:
field = self.get_related_field(model_field, related_model, to_many)
else:
field = self.get_field(model_field)
if field:
if has_through_model:
field.read_only = True
ret[model_field.name] = field
# Deal with reverse relationships
if not self.opts.fields:
reverse_rels = []
else:
# Reverse relationships are only included if they are explicitly
# present in the `fields` option on the serializer
reverse_rels = opts.get_all_related_objects()
reverse_rels += opts.get_all_related_many_to_many_objects()
for relation in reverse_rels:
accessor_name = relation.get_accessor_name()
if not self.opts.fields or accessor_name not in self.opts.fields:
continue
related_model = relation.model
to_many = relation.field.rel.multiple
has_through_model = False
is_m2m = isinstance(relation.field,
models.fields.related.ManyToManyField)
if (is_m2m and
hasattr(relation.field.rel, 'through') and
not relation.field.rel.through._meta.auto_created):
has_through_model = True
if nested:
field = self.get_nested_field(None, related_model, to_many)
else:
field = self.get_related_field(None, related_model, to_many)
if field:
if has_through_model:
field.read_only = True
ret[accessor_name] = field
# Ensure that 'read_only_fields' is an iterable
assert isinstance(self.opts.read_only_fields, (list, tuple)), '`read_only_fields` must be a list or tuple'
# Add the `read_only` flag to any fields that have been specified
# in the `read_only_fields` option
for field_name in self.opts.read_only_fields:
assert field_name not in self.base_fields.keys(), (
"field '%s' on serializer '%s' specified in "
"`read_only_fields`, but also added "
"as an explicit field. Remove it from `read_only_fields`." %
(field_name, self.__class__.__name__))
assert field_name in ret, (
"Non-existant field '%s' specified in `read_only_fields` "
"on serializer '%s'." %
(field_name, self.__class__.__name__))
ret[field_name].read_only = True
# Ensure that 'write_only_fields' is an iterable
assert isinstance(self.opts.write_only_fields, (list, tuple)), '`write_only_fields` must be a list or tuple'
for field_name in self.opts.write_only_fields:
assert field_name not in self.base_fields.keys(), (
"field '%s' on serializer '%s' specified in "
"`write_only_fields`, but also added "
"as an explicit field. Remove it from `write_only_fields`." %
(field_name, self.__class__.__name__))
assert field_name in ret, (
"Non-existant field '%s' specified in `write_only_fields` "
"on serializer '%s'." %
(field_name, self.__class__.__name__))
ret[field_name].write_only = True
return ret
def get_pk_field(self, model_field):
"""
Returns a default instance of the pk field.
"""
return self.get_field(model_field)
def get_nested_field(self, model_field, related_model, to_many):
"""
Creates a default instance of a nested relational field.
Note that model_field will be `None` for reverse relationships.
"""
class NestedModelSerializer(ModelSerializer):
class Meta:
model = related_model
depth = self.opts.depth - 1
return NestedModelSerializer(many=to_many)
def get_related_field(self, model_field, related_model, to_many):
"""
Creates a default instance of a flat relational field.
Note that model_field will be `None` for reverse relationships.
"""
# TODO: filter queryset using:
# .using(db).complex_filter(self.rel.limit_choices_to)
kwargs = {
'queryset': related_model._default_manager,
'many': to_many
}
if model_field:
kwargs['required'] = not(model_field.null or model_field.blank)
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if not model_field.editable:
kwargs['read_only'] = True
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
return PrimaryKeyRelatedField(**kwargs)
def get_field(self, model_field):
"""
Creates a default instance of a basic non-relational field.
"""
kwargs = {}
if model_field.null or model_field.blank:
kwargs['required'] = False
if isinstance(model_field, models.AutoField) or not model_field.editable:
kwargs['read_only'] = True
if model_field.has_default():
kwargs['default'] = model_field.get_default()
if issubclass(model_field.__class__, models.TextField):
kwargs['widget'] = widgets.Textarea
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
# TODO: TypedChoiceField?
if model_field.flatchoices: # This ModelField contains choices
kwargs['choices'] = model_field.flatchoices
if model_field.null:
kwargs['empty'] = None
return ChoiceField(**kwargs)
# put this below the ChoiceField because min_value isn't a valid initializer
if issubclass(model_field.__class__, models.PositiveIntegerField) or\
issubclass(model_field.__class__, models.PositiveSmallIntegerField):
kwargs['min_value'] = 0
attribute_dict = {
models.CharField: ['max_length'],
models.CommaSeparatedIntegerField: ['max_length'],
models.DecimalField: ['max_digits', 'decimal_places'],
models.EmailField: ['max_length'],
models.FileField: ['max_length'],
models.ImageField: ['max_length'],
models.SlugField: ['max_length'],
models.URLField: ['max_length'],
}
if model_field.__class__ in attribute_dict:
attributes = attribute_dict[model_field.__class__]
for attribute in attributes:
kwargs.update({attribute: getattr(model_field, attribute)})
try:
return self.field_mapping[model_field.__class__](**kwargs)
except KeyError:
return ModelField(model_field=model_field, **kwargs)
def get_validation_exclusions(self, instance=None):
"""
Return a list of field names to exclude from model validation.
"""
cls = self.opts.model
opts = get_concrete_model(cls)._meta
exclusions = [field.name for field in opts.fields + opts.many_to_many]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name in exclusions \
and not field.read_only \
and (field.required or hasattr(instance, field_name)) \
and not isinstance(field, Serializer):
exclusions.remove(field_name)
return exclusions
def full_clean(self, instance):
"""
Perform Django's full_clean, and populate the `errors` dictionary
if any validation errors occur.
Note that we don't perform this inside the `.restore_object()` method,
so that subclasses can override `.restore_object()`, and still get
the full_clean validation checking.
"""
try:
instance.full_clean(exclude=self.get_validation_exclusions(instance))
except ValidationError as err:
self._errors = err.message_dict
return None
return instance
def restore_object(self, attrs, instance=None):
"""
Restore the model instance.
"""
m2m_data = {}
related_data = {}
nested_forward_relations = {}
meta = self.opts.model._meta
# Reverse fk or one-to-one relations
for (obj, model) in meta.get_all_related_objects_with_model():
field_name = obj.get_accessor_name()
if field_name in attrs:
related_data[field_name] = attrs.pop(field_name)
# Reverse m2m relations
for (obj, model) in meta.get_all_related_m2m_objects_with_model():
field_name = obj.get_accessor_name()
if field_name in attrs:
m2m_data[field_name] = attrs.pop(field_name)
# Forward m2m relations
for field in meta.many_to_many + meta.virtual_fields:
if isinstance(field, GenericForeignKey):
continue
if field.name in attrs:
m2m_data[field.name] = attrs.pop(field.name)
# Nested forward relations - These need to be marked so we can save
# them before saving the parent model instance.
for field_name in attrs.keys():
if isinstance(self.fields.get(field_name, None), Serializer):
nested_forward_relations[field_name] = attrs[field_name]
# Create an empty instance of the model
if instance is None:
instance = self.opts.model()
for key, val in attrs.items():
try:
setattr(instance, key, val)
except ValueError:
self._errors[key] = self.error_messages['required']
# Any relations that cannot be set until we've
# saved the model get hidden away on these
# private attributes, so we can deal with them
# at the point of save.
instance._related_data = related_data
instance._m2m_data = m2m_data
instance._nested_forward_relations = nested_forward_relations
return instance
def from_native(self, data, files):
"""
Override the default method to also include model field validation.
"""
instance = super(ModelSerializer, self).from_native(data, files)
if not self._errors:
return self.full_clean(instance)
def save_object(self, obj, **kwargs):
"""
Save the deserialized object.
"""
if getattr(obj, '_nested_forward_relations', None):
# Nested relationships need to be saved before we can save the
# parent instance.
for field_name, sub_object in obj._nested_forward_relations.items():
if sub_object:
self.save_object(sub_object)
setattr(obj, field_name, sub_object)
obj.save(**kwargs)
if getattr(obj, '_m2m_data', None):
for accessor_name, object_list in obj._m2m_data.items():
setattr(obj, accessor_name, object_list)
del(obj._m2m_data)
if getattr(obj, '_related_data', None):
related_fields = dict([
(field.get_accessor_name(), field)
for field, model
in obj._meta.get_all_related_objects_with_model()
])
for accessor_name, related in obj._related_data.items():
if isinstance(related, RelationsList):
# Nested reverse fk relationship
for related_item in related:
fk_field = related_fields[accessor_name].field.name
setattr(related_item, fk_field, obj)
self.save_object(related_item)
# Delete any removed objects
if related._deleted:
[self.delete_object(item) for item in related._deleted]
elif isinstance(related, models.Model):
# Nested reverse one-one relationship
fk_field = obj._meta.get_field_by_name(accessor_name)[0].field.name
setattr(related, fk_field, obj)
self.save_object(related)
else:
# Reverse FK or reverse one-one
setattr(obj, accessor_name, related)
del(obj._related_data)
class HyperlinkedModelSerializerOptions(ModelSerializerOptions):
"""
Options for HyperlinkedModelSerializer
"""
def __init__(self, meta):
super(HyperlinkedModelSerializerOptions, self).__init__(meta)
self.view_name = getattr(meta, 'view_name', None)
self.lookup_field = getattr(meta, 'lookup_field', None)
self.url_field_name = getattr(meta, 'url_field_name', api_settings.URL_FIELD_NAME)
class HyperlinkedModelSerializer(ModelSerializer):
"""
A subclass of ModelSerializer that uses hyperlinked relationships,
instead of primary key relationships.
"""
_options_class = HyperlinkedModelSerializerOptions
_default_view_name = '%(model_name)s-detail'
_hyperlink_field_class = HyperlinkedRelatedField
_hyperlink_identify_field_class = HyperlinkedIdentityField
def get_default_fields(self):
fields = super(HyperlinkedModelSerializer, self).get_default_fields()
if self.opts.view_name is None:
self.opts.view_name = self._get_default_view_name(self.opts.model)
if self.opts.url_field_name not in fields:
url_field = self._hyperlink_identify_field_class(
view_name=self.opts.view_name,
lookup_field=self.opts.lookup_field
)
ret = self._dict_class()
ret[self.opts.url_field_name] = url_field
ret.update(fields)
fields = ret
return fields
def get_pk_field(self, model_field):
if self.opts.fields and model_field.name in self.opts.fields:
return self.get_field(model_field)
def get_related_field(self, model_field, related_model, to_many):
"""
Creates a default instance of a flat relational field.
"""
# TODO: filter queryset using:
# .using(db).complex_filter(self.rel.limit_choices_to)
kwargs = {
'queryset': related_model._default_manager,
'view_name': self._get_default_view_name(related_model),
'many': to_many
}
if model_field:
kwargs['required'] = not(model_field.null or model_field.blank)
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if self.opts.lookup_field:
kwargs['lookup_field'] = self.opts.lookup_field
return self._hyperlink_field_class(**kwargs)
def get_identity(self, data):
"""
This hook is required for bulk update.
We need to override the default, to use the url as the identity.
"""
try:
return data.get(self.opts.url_field_name, None)
except AttributeError:
return None
def _get_default_view_name(self, model):
"""
Return the view name to use if 'view_name' is not specified in 'Meta'
"""
model_meta = model._meta
format_kwargs = {
'app_label': model_meta.app_label,
'model_name': model_meta.object_name.lower()
}
return self._default_view_name % format_kwargs
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/rest_framework/serializers.py | Python | agpl-3.0 | 41,575 |
from sympy import (symbols, product, factorial, rf, sqrt, cos,
Function, Product, Rational)
a, k, n = symbols('a,k,n', integer=True)
def test_simple_products():
assert product(2, (k, a, n)) == 2**(n-a+1)
assert product(k, (k, 1, n)) == factorial(n)
assert product(k**3, (k, 1, n)) == factorial(n)**3
assert product(k+1, (k, 0, n-1)) == factorial(n)
assert product(k+1, (k, a, n-1)) == rf(1+a, n-a)
assert product(cos(k), (k, 0, 5)) == cos(1)*cos(2)*cos(3)*cos(4)*cos(5)
assert product(cos(k), (k, 3, 5)) == cos(3)*cos(4)*cos(5)
assert product(cos(k), (k, 1, Rational(5, 2))) == cos(1)*cos(2)
assert isinstance(product(k**k, (k, 1, n)), Product)
def test_rational_products():
assert product(1+1/k, (k, 1, n)) == rf(2, n)/factorial(n)
def test_special_products():
# Wallis product
assert product((4*k)**2 / (4*k**2-1), (k, 1, n)) == \
4**n*factorial(n)**2/rf(Rational(1, 2), n)/rf(Rational(3, 2), n)
# Euler's product formula for sin
assert product(1 + a/k**2, (k, 1, n)) == \
rf(1 - sqrt(-a), n)*rf(1 + sqrt(-a), n)/factorial(n)**2
def test__eval_product():
from sympy.abc import i, n
# 1710
a = Function('a')
assert product(2*a(i), (i, 1, n)) == 2**n * Product(a(i), (i, 1, n))
# 1711
assert product(2**i, (i, 1, n)) == 2**(n/2 + n**2/2)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/concrete/tests/test_products.py | Python | agpl-3.0 | 1,362 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class UtilMacros(AutotoolsPackage):
"""This is a set of autoconf macros used by the configure.ac scripts in
other Xorg modular packages, and is needed to generate new versions
of their configure scripts with autoconf."""
homepage = "http://cgit.freedesktop.org/xorg/util/macros/"
url = "https://www.x.org/archive/individual/util/util-macros-1.19.1.tar.bz2"
version('1.19.1', '6e76e546a4e580f15cebaf8019ef1625')
version('1.19.0', '1cf984125e75f8204938d998a8b6c1e1')
| EmreAtes/spack | var/spack/repos/builtin/packages/util-macros/package.py | Python | lgpl-2.1 | 1,752 |
"""Fixer that changes raw_input(...) into input(...)."""
# Author: Andre Roberge
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixRawInput(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< name='raw_input' trailer< '(' [any] ')' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("input", prefix=name.prefix))
| Orav/kbengine | kbe/src/lib/python/Lib/lib2to3/fixes/fix_raw_input.py | Python | lgpl-3.0 | 471 |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""Python 2<->3 compatibility module"""
import sys
def print_(template, *args, **kwargs):
template = str(template)
if args:
template = template % args
elif kwargs:
template = template % kwargs
sys.stdout.writelines(template)
if sys.version_info < (3, 0):
basestring = basestring
from ConfigParser import ConfigParser
from urllib import unquote
iteritems = lambda d: d.iteritems()
dictkeys = lambda d: d.keys()
def reraise(t, e, tb):
exec('raise t, e, tb', dict(t=t, e=e, tb=tb))
else:
basestring = str
from configparser import ConfigParser
from urllib.parse import unquote
iteritems = lambda d: d.items()
dictkeys = lambda d: list(d.keys())
def reraise(t, e, tb):
raise e.with_traceback(tb)
| grepme/CMPUT410Lab01 | virt_env/virt1/lib/python2.7/site-packages/PasteDeploy-1.5.2-py2.7.egg/paste/deploy/compat.py | Python | apache-2.0 | 961 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helper functions for RuntimeConfig API classes."""
def config_name_from_full_name(full_name):
"""Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See:
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
"""
projects, _, configs, result = full_name.split('/')
if projects != 'projects' or configs != 'configs':
raise ValueError(
'Unexpected format of resource', full_name,
'Expected "projects/{proj}/configs/{cfg}"')
return result
def variable_name_from_full_name(full_name):
"""Extract the variable name from a full resource name.
>>> variable_name_from_full_name(
'projects/my-proj/configs/my-config/variables/var-name')
"var-name"
>>> variable_name_from_full_name(
'projects/my-proj/configs/my-config/variables/another/var/name')
"another/var/name"
:type full_name: str
:param full_name:
The full resource name of a variable. The full resource name looks like
``projects/prj-name/configs/cfg-name/variables/var-name`` and is
returned as the ``name`` field of a variable resource. See:
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables
:rtype: str
:returns: The variable's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
"""
projects, _, configs, _, variables, result = full_name.split('/', 5)
if (projects != 'projects' or configs != 'configs' or
variables != 'variables'):
raise ValueError(
'Unexpected format of resource', full_name,
'Expected "projects/{proj}/configs/{cfg}/variables/..."')
return result
| jgeewax/gcloud-python | runtimeconfig/google/cloud/runtimeconfig/_helpers.py | Python | apache-2.0 | 2,925 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import metrics
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class AUCTest(test.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = constant_op.constant([0, 0.5, 0.3, 0.9], dtype=dtypes.float32)
self.y_true = constant_op.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve='PR',
summation_method='majoring',
name='auc_1')
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
auc_obj = metrics.AUC(
num_thresholds=None,
curve='PR',
summation_method='majoring',
name='auc_1',
thresholds=[0.3, 0.5])
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(variables.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegexp(ValueError,
'Invalid AUC curve value "Invalid".'):
metrics.AUC(curve='Invalid')
def test_invalid_summation_method(self):
with self.assertRaisesRegexp(
ValueError, 'Invalid AUC summation method value "Invalid".'):
metrics.AUC(summation_method='Invalid')
if __name__ == '__main__':
test.main()
| chemelnucfin/tensorflow | tensorflow/python/keras/metrics_confusion_matrix_test.py | Python | apache-2.0 | 51,243 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetTag
# Retrieves a specified tag object.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTag(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTag Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTag, self).__init__(temboo_session, '/Library/GitHub/GitDataAPI/Tags/GetTag')
def new_input_set(self):
return GetTagInputSet()
def _make_result_set(self, result, path):
return GetTagResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTagChoreographyExecution(session, exec_id, path)
class GetTagInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTag
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((conditional, string) The Access Token retrieved during the OAuth process. Required when accessing a protected resource.)
"""
super(GetTagInputSet, self)._set_input('AccessToken', value)
def set_Repo(self, value):
"""
Set the value of the Repo input for this Choreo. ((required, string) The name of the repo associated with the tag to retrieve.)
"""
super(GetTagInputSet, self)._set_input('Repo', value)
def set_SHA(self, value):
"""
Set the value of the SHA input for this Choreo. ((required, string) The SHA associated with the tag to retrieve.)
"""
super(GetTagInputSet, self)._set_input('SHA', value)
def set_User(self, value):
"""
Set the value of the User input for this Choreo. ((required, string) The GitHub username.)
"""
super(GetTagInputSet, self)._set_input('User', value)
class GetTagResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTag Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from GitHub.)
"""
return self._output.get('Response', None)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The available rate limit for your account. This is returned in the GitHub response header.)
"""
return self._output.get('Limit', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The remaining number of API requests available to you. This is returned in the GitHub response header.)
"""
return self._output.get('Remaining', None)
class GetTagChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTagResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/GitHub/GitDataAPI/Tags/GetTag.py | Python | apache-2.0 | 4,161 |
#!/usr/bin/env python
# update-dependencies-bad.py - Fails on bad.swift -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Fails if the input file is named "bad.swift" or "crash.swift"; otherwise
# dispatches to update-dependencies.py. "crash.swift" gives an exit code
# other than 1.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import shutil
import sys
assert sys.argv[1] == '-frontend'
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
if (os.path.basename(primaryFile) == 'bad.swift' or
os.path.basename(primaryFile) == 'crash.swift'):
print("Handled", os.path.basename(primaryFile))
# Replace the dependencies file with the input file.
try:
depsFile = sys.argv[sys.argv.index(
'-emit-reference-dependencies-path') + 1]
shutil.copyfile(primaryFile, depsFile)
except ValueError:
pass
if os.path.basename(primaryFile) == 'bad.swift':
sys.exit(1)
else:
sys.exit(129)
execDir = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(execDir, "update-dependencies.py"))
| gottesmm/swift | test/Driver/Dependencies/Inputs/update-dependencies-bad.py | Python | apache-2.0 | 1,560 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Badges
# Returns badges for a given user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Badges(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Badges Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Badges, self).__init__(temboo_session, '/Library/Foursquare/Users/Badges')
def new_input_set(self):
return BadgesInputSet()
def _make_result_set(self, result, path):
return BadgesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return BadgesChoreographyExecution(session, exec_id, path)
class BadgesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Badges
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API OAuth token string.)
"""
super(BadgesInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(BadgesInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) Identity of the user to get badges for. Defaults to "self" to get lists of the acting user.)
"""
super(BadgesInputSet, self)._set_input('UserID', value)
class BadgesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Badges Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class BadgesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return BadgesResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Foursquare/Users/Badges.py | Python | apache-2.0 | 3,462 |
import six
import sys
from optparse import make_option, NO_DEFAULT
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django_extensions.management.modelviz import generate_dot
try:
import pygraphviz
HAS_PYGRAPHVIZ = True
except ImportError:
HAS_PYGRAPHVIZ = False
try:
import pydot
HAS_PYDOT = True
except ImportError:
HAS_PYDOT = False
class Command(BaseCommand):
graph_models_options = (
make_option('--pygraphviz', action='store_true', dest='pygraphviz',
help='Use PyGraphViz to generate the image.'),
make_option('--pydot', action='store_true', dest='pydot',
help='Use PyDot to generate the image.'),
make_option('--disable-fields', '-d', action='store_true', dest='disable_fields',
help='Do not show the class member fields'),
make_option('--group-models', '-g', action='store_true', dest='group_models',
help='Group models together respective to their application'),
make_option('--all-applications', '-a', action='store_true', dest='all_applications',
help='Automatically include all applications from INSTALLED_APPS'),
make_option('--output', '-o', action='store', dest='outputfile',
help='Render output file. Type of output dependend on file extensions. Use png or jpg to render graph to image.'),
make_option('--layout', '-l', action='store', dest='layout', default='dot',
help='Layout to be used by GraphViz for visualization. Layouts: circo dot fdp neato nop nop1 nop2 twopi'),
make_option('--verbose-names', '-n', action='store_true', dest='verbose_names',
help='Use verbose_name of models and fields'),
make_option('--language', '-L', action='store', dest='language',
help='Specify language used for verbose_name localization'),
make_option('--exclude-columns', '-x', action='store', dest='exclude_columns',
help='Exclude specific column(s) from the graph. Can also load exclude list from file.'),
make_option('--exclude-models', '-X', action='store', dest='exclude_models',
help='Exclude specific model(s) from the graph. Can also load exclude list from file.'),
make_option('--include-models', '-I', action='store', dest='include_models',
help='Restrict the graph to specified models.'),
make_option('--inheritance', '-e', action='store_true', dest='inheritance', default=True,
help='Include inheritance arrows (default)'),
make_option('--no-inheritance', '-E', action='store_false', dest='inheritance',
help='Do not include inheritance arrows'),
make_option('--hide-relations-from-fields', '-R', action='store_false', dest="relations_as_fields",
default=True, help="Do not show relations as fields in the graph."),
make_option('--disable-sort-fields', '-S', action="store_false", dest="sort_fields",
default=True, help="Do not sort fields"),
)
option_list = BaseCommand.option_list + graph_models_options
help = "Creates a GraphViz dot file for the specified app names. You can pass multiple app names and they will all be combined into a single model. Output is usually directed to a dot file."
args = "[appname]"
label = 'application name'
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
self.options_from_settings(options)
if len(args) < 1 and not options['all_applications']:
raise CommandError("need one or more arguments for appname")
use_pygraphviz = options.get('pygraphviz', False)
use_pydot = options.get('pydot', False)
cli_options = ' '.join(sys.argv[2:])
dotdata = generate_dot(args, cli_options=cli_options, **options)
dotdata = dotdata.encode('utf-8')
if options['outputfile']:
if not use_pygraphviz and not use_pydot:
if HAS_PYGRAPHVIZ:
use_pygraphviz = True
elif HAS_PYDOT:
use_pydot = True
if use_pygraphviz:
self.render_output_pygraphviz(dotdata, **options)
elif use_pydot:
self.render_output_pydot(dotdata, **options)
else:
raise CommandError("Neither pygraphviz nor pydot could be found to generate the image")
else:
self.print_output(dotdata)
def options_from_settings(self, options):
defaults = getattr(settings, 'GRAPH_MODELS', None)
if defaults:
for option in self.graph_models_options:
long_opt = option._long_opts[0]
if long_opt:
long_opt = long_opt.lstrip("-").replace("-", "_")
if long_opt in defaults:
default_value = None
if not option.default == NO_DEFAULT:
default_value = option.default
if options[option.dest] == default_value:
options[option.dest] = defaults[long_opt]
def print_output(self, dotdata):
if six.PY3 and isinstance(dotdata, six.binary_type):
dotdata = dotdata.decode()
print(dotdata)
def render_output_pygraphviz(self, dotdata, **kwargs):
"""Renders the image using pygraphviz"""
if not HAS_PYGRAPHVIZ:
raise CommandError("You need to install pygraphviz python module")
version = pygraphviz.__version__.rstrip("-svn")
try:
if tuple(int(v) for v in version.split('.')) < (0, 36):
# HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version)
import tempfile
tmpfile = tempfile.NamedTemporaryFile()
tmpfile.write(dotdata)
tmpfile.seek(0)
dotdata = tmpfile.name
except ValueError:
pass
graph = pygraphviz.AGraph(dotdata)
graph.layout(prog=kwargs['layout'])
graph.draw(kwargs['outputfile'])
def render_output_pydot(self, dotdata, **kwargs):
"""Renders the image using pydot"""
if not HAS_PYDOT:
raise CommandError("You need to install pydot python module")
graph = pydot.graph_from_dot_data(dotdata)
if not graph:
raise CommandError("pydot returned an error")
output_file = kwargs['outputfile']
formats = ['bmp', 'canon', 'cmap', 'cmapx', 'cmapx_np', 'dot', 'dia', 'emf',
'em', 'fplus', 'eps', 'fig', 'gd', 'gd2', 'gif', 'gv', 'imap',
'imap_np', 'ismap', 'jpe', 'jpeg', 'jpg', 'metafile', 'pdf',
'pic', 'plain', 'plain-ext', 'png', 'pov', 'ps', 'ps2', 'svg',
'svgz', 'tif', 'tiff', 'tk', 'vml', 'vmlz', 'vrml', 'wbmp', 'xdot']
ext = output_file[output_file.rfind('.') + 1:]
format = ext if ext in formats else 'raw'
graph.write(output_file, format=format)
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/django_extensions/management/commands/graph_models.py | Python | bsd-3-clause | 7,278 |
import json
from tempfile import mkdtemp
from os.path import join, basename
from shutil import rmtree
from distutils.dir_util import copy_tree
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks
from slyd.projectspec import create_project_resource
from slyd.projectspec import convert_template
from .utils import TestSite, test_spec_manager
from .settings import SPEC_DATA_DIR
class CrawlerSpecTest(unittest.TestCase):
spider = """
{
"exclude_patterns": [],
"follow_patterns": [
".+MobileHomePark.php?key=d+"
],
"links_to_follow": "patterns",
"respect_nofollow": true,
"start_urls": [
"http://www.mhvillage.com/"
],
"templates": []
}
"""
def setUp(self):
sm = test_spec_manager()
spec_resource = create_project_resource(sm)
self.temp_project_dir = mkdtemp(dir=SPEC_DATA_DIR,
prefix='test-run-')
self.project = basename(self.temp_project_dir)
self.specsite = TestSite(spec_resource, project=self.project)
test_project_dir = join(SPEC_DATA_DIR, 'test')
copy_tree(test_project_dir, self.temp_project_dir)
@inlineCallbacks
def _get_check_resource(self, resource, converter=None):
result = yield self.specsite.get(resource)
ffile = join(self.temp_project_dir, resource + ".json")
fdata = json.load(open(ffile))
if converter:
converter(fdata)
rdata = json.loads(result.value())
self.assertEqual(fdata, rdata)
def test_get_resource(self):
self._get_check_resource("project")
self._get_check_resource("spiders/pinterest.com",
convert_template)
@inlineCallbacks
def post_command(self, spider, cmd, *args, **kwargs):
obj = {'cmd': cmd, 'args': args}
result = yield self.specsite.post(spider, data=json.dumps(obj))
self.assertEqual(result.responseCode, kwargs.get('expect', 200))
@inlineCallbacks
def test_updating(self):
result = yield self.specsite.post('spiders/testpost', data=self.spider)
self.assertEqual(result.responseCode, 200)
result = yield self.specsite.get('spiders/testpost')
self.assertEqual(json.loads(result.value()), json.loads(self.spider))
# should fail - missing required fields
result = yield self.specsite.post('spiders/testpost', data='{}')
self.assertEqual(result.responseCode, 400)
@inlineCallbacks
def test_commands(self):
self.post_command('spiders', 'unknown', expect=400)
self.post_command('spiders', 'mv', expect=400)
self.post_command('spiders', 'mv', '../notallowed', 'whatever',
expect=400)
self.post_command('spiders', 'mv', 'notallowedexists', 'whatever',
expect=404)
self.post_command('spiders', 'rm', 'notexists', expect=404)
# TODO: mv to existing spider - 400
yield self.specsite.post('spiders/c', data=self.spider)
self._get_check_resource('spiders/c')
self.post_command('spiders', 'mv', 'c', 'c2')
result = yield self.specsite.get('spiders/c')
self.assertEqual(result.value(), '{}\n')
self._get_check_resource('spiders/c2')
yield self.specsite.post('spiders/c3', data=self.spider)
# overwrites
self.post_command('spiders', 'mv', 'c2', 'c3')
result = yield self.specsite.get('spiders/c2')
self.assertEqual(result.value(), '{}\n')
self.post_command('spiders', 'rm', 'c3')
result = yield self.specsite.get('spiders/c3')
self.assertEqual(result.value(), '{}\n')
def tearDown(self):
rmtree(self.temp_project_dir)
| CENDARI/portia | slyd/tests/test_spec.py | Python | bsd-3-clause | 3,884 |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 24 15:08:01 2013
@author: steve
"""
import numpy as np
import mdptoolbox
from .utils import SMALLNUM, P_forest, R_forest, P_small, R_small, P_sparse
from .utils import P_forest_sparse, R_forest_sparse
def test_ValueIterationGS_small():
sdp = mdptoolbox.mdp.ValueIterationGS(P_small, R_small, 0.9)
sdp.run()
p = (1, 0)
itr = 28 # from Octave MDPtoolbox
v = np.matrix('42.27744026138212, 35.89524504047155')
assert sdp.iter == itr
assert sdp.policy == p
assert (np.absolute(np.array(sdp.V) - v) < SMALLNUM).all()
def test_ValueIterationGS_small_sparse():
sdp = mdptoolbox.mdp.ValueIterationGS(P_sparse, R_small, 0.9)
sdp.run()
p = (1, 0)
itr = 28 # from Octave MDPtoolbox
v = np.matrix('42.27744026138212, 35.89524504047155')
assert sdp.iter == itr
assert sdp.policy == p
assert (np.absolute(np.array(sdp.V) - v) < SMALLNUM).all()
def test_ValueIterationGS_forest():
sdp = mdptoolbox.mdp.ValueIterationGS(P_forest, R_forest, 0.96)
sdp.run()
p = (0, 0, 0)
v = np.matrix('69.98910821400665, 73.46560194552877, 77.46560194552877')
itr = 63 # from Octave MDPtoolbox
assert sdp.max_iter == 63
assert sdp.policy == p
assert sdp.iter == itr
assert (np.absolute(np.array(sdp.V) - v) < SMALLNUM).all()
def test_ValueIterationGS_forest_sparse():
sdp = mdptoolbox.mdp.ValueIterationGS(P_forest_sparse, R_forest_sparse,
0.96)
sdp.run()
p = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
itr = 16 # from Octave MDPtoolbox
assert sdp.policy == p
assert sdp.iter == itr
| silgon/pymdptoolbox | src/tests/test_ValueIterationGS.py | Python | bsd-3-clause | 1,658 |
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
import cPickle as pickle
except ImportError:
import pickle
# Handle the case where the requests module has been patched to not have
# urllib3 bundled as part of its source.
try:
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
except ImportError:
from pip._vendor.urllib3.response import HTTPResponse
try:
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
except ImportError:
from pip._vendor.urllib3.util import is_fp_closed
# Replicate some six behaviour
try:
text_type = unicode
except NameError:
text_type = str
| ncos/lisa | src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/cachecontrol/compat.py | Python | mit | 724 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib_qsrs.qsr_rcc_abstractclass import QSR_RCC_Abstractclass
class QSR_RCC8(QSR_RCC_Abstractclass):
"""Symmetrical RCC5 relations.
Values of the abstract properties
* **_unique_id** = "rcc8"
* **_all_possible_relations** = ("dc", "ec", "po", "eq", "tpp", "ntpp", "tppi", "ntppi")
* **_dtype** = "bounding_boxes_2d"
QSR specific `dynamic_args`
* **'quantisation_factor'** (*float*) = 0.0: Threshold that determines whether two rectangle regions are disconnected.
.. seealso:: For further details about RCC8, refer to its :doc:`description. <../handwritten/qsrs/rcc8>`
"""
_unique_id = "rcc8"
"""str: Unique identifier name of the QSR."""
_all_possible_relations = ("dc", "ec", "po", "eq", "tpp", "ntpp", "tppi", "ntppi")
"""tuple: All possible relations of the QSR."""
def __init__(self):
"""Constructor."""
super(QSR_RCC8, self).__init__()
def _convert_to_requested_rcc_type(self, qsr):
"""No need for remapping.
:param qsr: RCC8 value.
:type qsr: str
:return: RCC8 value.
:rtype: str
"""
return qsr
| cdondrup/strands_qsr_lib | qsr_lib/src/qsrlib_qsrs/qsr_rcc8.py | Python | mit | 1,237 |
#! /usr/bin/python
# Script for increasing versions numbers across the code
import sys
import glob
import re
import argparse
def check_version_format(version):
"""Check format of version number"""
pattern = '^[0-9]+[\.][0-9]+[\.][0-9]+(\-.+)*$'
return re.match(pattern, version) is not None
BIO_FORMATS_ARTIFACT = (
r"(<groupId>%s</groupId>\n"
".*<artifactId>pom-bio-formats</artifactId>\n"
".*<version>).*(</version>)")
class Replacer(object):
def __init__(self, old_group="ome", new_group="ome"):
self.old_group = old_group
self.new_group = new_group
self.group_pattern = \
r"(<groupId>)%s(</groupId>)" % \
old_group
self.artifact_pattern = BIO_FORMATS_ARTIFACT % old_group
self.release_version_pattern = \
r"(<release.version>).*(</release.version>)"
self.stableversion_pattern = \
r"(STABLE_VERSION = \").*(\";)"
self.upgradecheck = \
"components/formats-bsd/src/loci/formats/UpgradeChecker.java"
def replace_file(self, input_path, pattern, version):
"""Substitute a pattern with version in a file"""
with open(input_path, "r") as infile:
regexp = re.compile(pattern)
new_content = regexp.sub(r"\g<1>%s\g<2>" % version, infile.read())
with open(input_path, "w") as output:
output.write(new_content)
output.close()
infile.close()
def bump_pom_versions(self, version):
"""Replace versions in pom.xml files"""
# Replace versions in components pom.xml
for pomfile in (glob.glob("*/*/pom.xml") + glob.glob("*/*/*/pom.xml")):
self.replace_file(pomfile, self.artifact_pattern, version)
self.replace_file(pomfile, self.group_pattern, self.new_group)
# Replace versions in top-level pom.xml
toplevelpomfile = "pom.xml"
self.replace_file(
toplevelpomfile, self.artifact_pattern, version)
self.replace_file(
toplevelpomfile, self.release_version_pattern, version)
self.replace_file(
toplevelpomfile, self.group_pattern, self.new_group)
def bump_stable_version(self, version):
"""Replace UpgradeChecker stable version"""
self.replace_file(
self.upgradecheck, self.stableversion_pattern, version)
if __name__ == "__main__":
# Input check
parser = argparse.ArgumentParser()
parser.add_argument("--old-group", type=str, default="ome")
parser.add_argument("--new-group", type=str, default="ome")
parser.add_argument("version", type=str)
ns = parser.parse_args()
if not check_version_format(ns.version):
print "Invalid version format"
sys.exit(1)
replacer = Replacer(old_group=ns.old_group, new_group=ns.new_group)
replacer.bump_pom_versions(ns.version)
if not ns.version.endswith('SNAPSHOT'):
replacer.bump_stable_version(ns.version)
| stelfrich/bioformats | tools/bump_maven_version.py | Python | gpl-2.0 | 3,006 |
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
modulepath:
description:
- Path to an alternate location for puppet modules
required: false
default: None
version_added: "2.4"
manifest:
description:
- Path to the manifest file to run puppet apply on.
required: false
default: None
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used
required: false
default: stdout
choices: [ 'stdout', 'syslog' ]
version_added: "2.1"
certname:
description:
- The name to use when handling certificates.
required: false
default: None
version_added: "2.1"
tags:
description:
- A comma-separated list of puppet tags to be used.
required: false
default: None
version_added: "2.1"
execute:
description:
- Execute a specific piece of Puppet code. It has no effect with
a puppetmaster.
required: false
default: None
version_added: "2.1"
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet:
timeout: 5m
# Run puppet using a different environment
- puppet:
environment: testing
# Run puppet using a specific certname
- puppet:
certname: agent01.example.com
# Run puppet using a specific piece of Puppet code. Has no effect with a
# puppetmaster.
- puppet:
execute: 'include ::mymodule'
# Run puppet using a specific tags
- puppet:
tags: update,nginx
'''
import os
import pipes
import stat
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
modulepath=dict(required=False, default=None),
manifest=dict(required=False, default=None),
logdest=dict(
required=False, default='stdout',
choices=['stdout', 'syslog']),
show_diff=dict(
# internal code to work with --diff, do not use
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
certname=dict(required=False, default=None),
tags=dict(required=False, default=None, type='list'),
execute=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
('puppetmaster', 'modulepath')
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=pipes.quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['modulepath']:
cmd += "--modulepath='%s'" % p['modulepath']
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['execute']:
cmd += " --execute '%s'" % p['execute']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| mensler/ansible | lib/ansible/modules/system/puppet.py | Python | gpl-3.0 | 9,396 |
# coding=utf-8
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Item",
"_doctype": "Item",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Item"
},
{
"module_name": "Customer",
"_doctype": "Customer",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "link",
"link": "List/Customer"
},
{
"module_name": "Supplier",
"_doctype": "Supplier",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "link",
"link": "List/Supplier"
},
{
"_doctype": "Employee",
"module_name": "Employee",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"type": "link",
"link": "List/Employee"
},
{
"module_name": "Project",
"_doctype": "Project",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "link",
"link": "List/Project"
},
{
"module_name": "Issue",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"_doctype": "Issue",
"type": "link",
"link": "List/Issue"
},
{
"module_name": "Lead",
"icon": "octicon octicon-broadcast",
"_doctype": "Lead",
"type": "link",
"link": "List/Lead"
},
{
"module_name": "Profit and Loss Statement",
"_doctype": "Account",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "link",
"link": "query-report/Profit and Loss Statement"
},
# old
{
"module_name": "Accounts",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "module",
"hidden": 1
},
{
"module_name": "CRM",
"color": "#EF4DB6",
"icon": "octicon octicon-broadcast",
"type": "module",
"hidden": 1
},
{
"module_name": "Selling",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "module",
"hidden": 1
},
{
"module_name": "Buying",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "module",
"hidden": 1
},
{
"module_name": "HR",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"label": _("Human Resources"),
"type": "module",
"hidden": 1
},
{
"module_name": "Manufacturing",
"color": "#7f8c8d",
"icon": "octicon octicon-tools",
"type": "module",
"hidden": 1
},
{
"module_name": "POS",
"color": "#589494",
"icon": "octicon octicon-credit-card",
"type": "page",
"link": "pos",
"label": _("POS")
},
{
"module_name": "Leaderboard",
"color": "#589494",
"icon": "octicon octicon-graph",
"type": "page",
"link": "leaderboard",
"label": _("Leaderboard")
},
{
"module_name": "Projects",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "module",
"hidden": 1
},
{
"module_name": "Support",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"type": "module",
"hidden": 1
},
{
"module_name": "Learn",
"color": "#FF888B",
"icon": "octicon octicon-device-camera-video",
"type": "module",
"is_help": True,
"label": _("Learn"),
"hidden": 1
},
{
"module_name": "Maintenance",
"color": "#FF888B",
"icon": "octicon octicon-tools",
"type": "module",
"label": _("Maintenance"),
"hidden": 1
},
{
"module_name": "Student",
"color": "#c0392b",
"icon": "octicon octicon-person",
"label": _("Student"),
"link": "List/Student",
"_doctype": "Student",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Group",
"color": "#d59919",
"icon": "octicon octicon-organization",
"label": _("Student Group"),
"link": "List/Student Group",
"_doctype": "Student Group",
"type": "list",
"hidden": 1
},
{
"module_name": "Course Schedule",
"color": "#fd784f",
"icon": "octicon octicon-calendar",
"label": _("Course Schedule"),
"link": "List/Course Schedule/Calendar",
"_doctype": "Course Schedule",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Attendance Tool",
"color": "#C0392B",
"icon": "octicon octicon-checklist",
"label": _("Student Attendance Tool"),
"link": "List/Student Attendance Tool",
"_doctype": "Student Attendance Tool",
"type": "list",
"hidden": 1
},
{
"module_name": "Course",
"color": "#8e44ad",
"icon": "octicon octicon-book",
"label": _("Course"),
"link": "List/Course",
"_doctype": "Course",
"type": "list",
"hidden": 1
},
{
"module_name": "Program",
"color": "#9b59b6",
"icon": "octicon octicon-repo",
"label": _("Program"),
"link": "List/Program",
"_doctype": "Program",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Applicant",
"color": "#4d927f",
"icon": "octicon octicon-clippy",
"label": _("Student Applicant"),
"link": "List/Student Applicant",
"_doctype": "Student Applicant",
"type": "list",
"hidden": 1
},
{
"module_name": "Fees",
"color": "#83C21E",
"icon": "fa fa-money",
"label": _("Fees"),
"link": "List/Fees",
"_doctype": "Fees",
"type": "list",
"hidden": 1
},
{
"module_name": "Instructor",
"color": "#a99e4c",
"icon": "octicon octicon-broadcast",
"label": _("Instructor"),
"link": "List/Instructor",
"_doctype": "Instructor",
"type": "list",
"hidden": 1
},
{
"module_name": "Room",
"color": "#f22683",
"icon": "fa fa-map-marker",
"label": _("Room"),
"link": "List/Room",
"_doctype": "Room",
"type": "list",
"hidden": 1
},
{
"module_name": "Education",
"color": "#428B46",
"icon": "octicon octicon-mortar-board",
"type": "module",
"label": _("Education"),
"hidden": 1
},
{
"module_name": "Healthcare",
"color": "#FF888B",
"icon": "fa fa-heartbeat",
"type": "module",
"label": _("Healthcare"),
"hidden": 1
},
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient"),
"hidden": 1
},
{
"module_name": "Healthcare Practitioner",
"color": "#2ecc71",
"icon": "fa fa-user-md",
"doctype": "Healthcare Practitioner",
"type": "link",
"link": "List/Healthcare Practitioner",
"label": _("Healthcare Practitioner"),
"hidden": 1
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment"),
"hidden": 1
},
{
"module_name": "Patient Encounter",
"color": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Patient Encounter",
"type": "link",
"link": "List/Patient Encounter",
"label": _("Patient Encounter"),
"hidden": 1
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "list",
"link": "List/Lab Test",
"label": _("Lab Test"),
"hidden": 1
},
{
"module_name": "Vital Signs",
"color": "#2ecc71",
"icon": "fa fa-thermometer-empty",
"doctype": "Vital Signs",
"type": "list",
"link": "List/Vital Signs",
"label": _("Vital Signs"),
"hidden": 1
},
{
"module_name": "Clinical Procedure",
"color": "#FF888B",
"icon": "fa fa-medkit",
"doctype": "Clinical Procedure",
"type": "list",
"link": "List/Clinical Procedure",
"label": _("Clinical Procedure"),
"hidden": 1
},
{
"module_name": "Inpatient Record",
"color": "#7578f6",
"icon": "fa fa-list-alt",
"doctype": "Inpatient Record",
"type": "list",
"link": "List/Inpatient Record",
"label": _("Inpatient Record"),
"hidden": 1
},
{
"module_name": "Hub",
"color": "#009248",
"icon": "/assets/erpnext/images/hub_logo.svg",
"type": "page",
"link": "Hub/Item",
"label": _("Hub")
},
{
"module_name": "Data Import",
"color": "#FFF168",
"reverse": 1,
"doctype": "Data Import",
"icon": "octicon octicon-cloud-upload",
"label": _("Data Import"),
"link": "List/Data Import",
"type": "list"
},
{
"module_name": "Restaurant",
"color": "#EA81E8",
"icon": "🍔",
"_doctype": "Restaurant",
"type": "module",
"link": "List/Restaurant",
"label": _("Restaurant"),
"hidden": 1
},
{
"module_name": "Hotels",
"color": "#EA81E8",
"icon": "fa fa-bed",
"type": "module",
"label": _("Hotels"),
"hidden": 1
},
{
"module_name": "Agriculture",
"color": "#8BC34A",
"icon": "octicon octicon-globe",
"type": "module",
"label": _("Agriculture"),
"hidden": 1
},
{
"module_name": "Crop",
"_doctype": "Crop",
"label": _("Crop"),
"color": "#8BC34A",
"icon": "fa fa-tree",
"type": "list",
"link": "List/Crop",
"hidden": 1
},
{
"module_name": "Crop Cycle",
"_doctype": "Crop Cycle",
"label": _("Crop Cycle"),
"color": "#8BC34A",
"icon": "fa fa-circle-o-notch",
"type": "list",
"link": "List/Crop Cycle",
"hidden": 1
},
{
"module_name": "Fertilizer",
"_doctype": "Fertilizer",
"label": _("Fertilizer"),
"color": "#8BC34A",
"icon": "fa fa-leaf",
"type": "list",
"link": "List/Fertilizer",
"hidden": 1
},
{
"module_name": "Location",
"_doctype": "Location",
"label": _("Location"),
"color": "#8BC34A",
"icon": "fa fa-map",
"type": "list",
"link": "List/Location",
"hidden": 1
},
{
"module_name": "Disease",
"_doctype": "Disease",
"label": _("Disease"),
"color": "#8BC34A",
"icon": "octicon octicon-bug",
"type": "list",
"link": "List/Disease",
"hidden": 1
},
{
"module_name": "Plant Analysis",
"_doctype": "Plant Analysis",
"label": _("Plant Analysis"),
"color": "#8BC34A",
"icon": "fa fa-pagelines",
"type": "list",
"link": "List/Plant Analysis",
"hidden": 1
},
{
"module_name": "Soil Analysis",
"_doctype": "Soil Analysis",
"label": _("Soil Analysis"),
"color": "#8BC34A",
"icon": "fa fa-flask",
"type": "list",
"link": "List/Soil Analysis",
"hidden": 1
},
{
"module_name": "Soil Texture",
"_doctype": "Soil Texture",
"label": _("Soil Texture"),
"color": "#8BC34A",
"icon": "octicon octicon-beaker",
"type": "list",
"link": "List/Soil Texture",
"hidden": 1
},
{
"module_name": "Water Analysis",
"_doctype": "Water Analysis",
"label": _("Water Analysis"),
"color": "#8BC34A",
"icon": "fa fa-tint",
"type": "list",
"link": "List/Water Analysis",
"hidden": 1
},
{
"module_name": "Weather",
"_doctype": "Weather",
"label": _("Weather"),
"color": "#8BC34A",
"icon": "fa fa-sun-o",
"type": "list",
"link": "List/Weather",
"hidden": 1
},
{
"module_name": "Assets",
"color": "#4286f4",
"icon": "octicon octicon-database",
"hidden": 1,
"label": _("Assets"),
"type": "module"
},
{
"module_name": "Grant Application",
"color": "#E9AB17",
"icon": "fa fa-gift",
"_doctype": "Grant Application",
"type": "list",
"link": "List/Grant Application",
"label": _("Grant Application"),
"hidden": 1
},
{
"module_name": "Donor",
"color": "#7F5A58",
"icon": "fa fa-tint",
"_doctype": "Donor",
"type": "list",
"link": "List/Donor",
"label": _("Donor"),
"hidden": 1
},
{
"module_name": "Volunteer",
"color": "#7E587E",
"icon": "fa fa-angellist",
"_doctype": "Volunteer",
"type": "list",
"link": "List/Volunteer",
"label": _("Volunteer"),
"hidden": 1
},
{
"module_name": "Member",
"color": "#79BAEC",
"icon": "fa fa-users",
"_doctype": "Member",
"type": "list",
"link": "List/Member",
"label": _("Member"),
"hidden": 1
},
{
"module_name": "Chapter",
"color": "#3B9C9C",
"icon": "fa fa-handshake-o",
"_doctype": "Chapter",
"type": "list",
"link": "List/Chapter",
"label": _("Chapter"),
"hidden": 1
},
{
"module_name": "Non Profit",
"color": "#DE2B37",
"icon": "octicon octicon-heart",
"type": "module",
"label": _("Non Profit"),
"hidden": 1
}
]
| ovresko/erpnext | erpnext/config/desktop.py | Python | gpl-3.0 | 12,428 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
from importlib import import_module
from django.apps import AppConfig
from django.conf import settings
from django.db.models import signals
from geonode.tasks.tasks import send_queued_notifications
E = getattr(settings, 'NOTIFICATION_ENABLED', False)
M = getattr(settings, 'NOTIFICATIONS_MODULE', None)
notifications = None
has_notifications = E and M and M in settings.INSTALLED_APPS
if has_notifications:
notifications = import_module(M)
class NotificationsAppConfigBase(AppConfig):
"""
Base class for AppConfig notifications setup
You should subclass it and provide list of notifications
in NOTIFICATIONS attribute to automatically register to
post_migrate signal.
"""
# override in subclass
NOTIFICATIONS = tuple()
def _get_logger(self):
return logging.getLogger(self.__class__.__module__)
def _register_notifications(self, *args, **kwargs):
if has_notifications and notifications:
self._get_logger().debug("Creating notifications")
for label, display, description in self.NOTIFICATIONS:
notifications.models.NoticeType.create(
label, display, description)
def ready(self):
signals.post_migrate.connect(self._register_notifications, sender=self)
def call_celery(func):
def wrap(*args, **kwargs):
ret = func(*args, **kwargs)
if settings.PINAX_NOTIFICATIONS_QUEUE_ALL:
send_queued_notifications.delay()
return ret
return wrap
def send_now_notification(*args, **kwargs):
"""
Simple wrapper around notifications.model send().
This can be called safely if notifications are not installed.
"""
if has_notifications:
return notifications.models.send_now(*args, **kwargs)
@call_celery
def send_notification(*args, **kwargs):
"""
Simple wrapper around notifications.model send().
This can be called safely if notifications are not installed.
"""
if has_notifications:
# queue for further processing if required
if settings.PINAX_NOTIFICATIONS_QUEUE_ALL:
return queue_notification(*args, **kwargs)
try:
return notifications.models.send(*args, **kwargs)
except Exception:
logging.exception("Could not send notifications.")
return False
def queue_notification(*args, **kwargs):
if has_notifications:
return notifications.models.queue(*args, **kwargs)
def get_notification_recipients(notice_type_label, exclude_user=None):
""" Get notification recipients
"""
if not has_notifications:
return []
recipients_ids = notifications.models.NoticeSetting.objects \
.filter(notice_type__label=notice_type_label) \
.values('user')
from geonode.people.models import Profile
profiles = Profile.objects.filter(id__in=recipients_ids)
if exclude_user:
profiles.exclude(username=exclude_user.username)
return profiles
| kartoza/geonode | geonode/notifications_helper.py | Python | gpl-3.0 | 3,847 |
Subsets and Splits