code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
"""The tests for the Tasmota binary sensor platform."""
import copy
from datetime import timedelta
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_stat_status,
get_topic_tele_sensor,
get_topic_tele_will,
)
from homeassistant.components import binary_sensor
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import (
ATTR_ASSUMED_STATE,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message, async_fire_time_changed
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"OFF"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test force update flag
entity = hass.data["entity_components"]["binary_sensor"].get_entity(
"binary_sensor.tasmota_binary_sensor_1"
)
assert entity.force_update
async def test_controlling_state_via_mqtt_switchname(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Custom Name"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"ON"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"OFF"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
async def test_pushon_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update is ignored
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update is ignored
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
async def test_friendly_names(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swc"][1] = 1
config["swn"][1] = "Beer"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Tasmota binary_sensor 1"
state = hass.states.get("binary_sensor.beer")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Beer"
async def test_off_delay(hass, mqtt_mock, setup_tasmota):
"""Test off_delay option."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13 # PUSHON: 1s off_delay
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event.data["new_state"].state)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
assert events == ["off"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on", "on"]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert events == ["off", "on", "on", "off"]
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability(hass, mqtt_mock, binary_sensor.DOMAIN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
poll_topic = "tasmota_49A3BC/cmnd/STATUS"
await help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
binary_sensor.DOMAIN,
config,
poll_topic,
"10",
)
async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered binary_sensor."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = 1
config2["swc"][0] = 0
config1["swn"][0] = "Test"
config2["swn"][0] = "Test"
await help_test_discovery_removal(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_binary_sensor(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered binary_sensor."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
with patch(
"homeassistant.components.tasmota.binary_sensor.TasmotaBinarySensor.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_binary_sensor_switch_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, binary_sensor.DOMAIN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
topics = [
get_topic_stat_result(config),
get_topic_tele_sensor(config),
get_topic_stat_status(config, 10),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, binary_sensor.DOMAIN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
| jawilson/home-assistant | tests/components/tasmota/test_binary_sensor.py | Python | apache-2.0 | 13,393 |
# Copyright 2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code that creates simple startup projects."""
from pathlib import Path
from enum import Enum
import subprocess
import shutil
import sys
import os
import re
from glob import glob
from mesonbuild import mesonlib
from mesonbuild.environment import detect_ninja
from mesonbuild.templates.samplefactory import sameple_generator
import typing as T
if T.TYPE_CHECKING:
import argparse
'''
we currently have one meson template at this time.
'''
from mesonbuild.templates.mesontemplates import create_meson_build
FORTRAN_SUFFIXES = {'.f', '.for', '.F', '.f90', '.F90'}
LANG_SUFFIXES = {'.c', '.cc', '.cpp', '.cs', '.cu', '.d', '.m', '.mm', '.rs', '.java', '.vala'} | FORTRAN_SUFFIXES
LANG_SUPPORTED = {'c', 'cpp', 'cs', 'cuda', 'd', 'fortran', 'java', 'rust', 'objc', 'objcpp', 'vala'}
DEFAULT_PROJECT = 'executable'
DEFAULT_VERSION = '0.1'
class DEFAULT_TYPES(Enum):
EXE = 'executable'
LIB = 'library'
INFO_MESSAGE = '''Sample project created. To build it run the
following commands:
meson setup builddir
meson compile -C builddir
'''
def create_sample(options: 'argparse.Namespace') -> None:
'''
Based on what arguments are passed we check for a match in language
then check for project type and create new Meson samples project.
'''
sample_gen = sameple_generator(options)
if options.type == DEFAULT_TYPES['EXE'].value:
sample_gen.create_executable()
elif options.type == DEFAULT_TYPES['LIB'].value:
sample_gen.create_library()
else:
raise RuntimeError('Unreachable code')
print(INFO_MESSAGE)
def autodetect_options(options: 'argparse.Namespace', sample: bool = False) -> None:
'''
Here we autodetect options for args not passed in so don't have to
think about it.
'''
if not options.name:
options.name = Path().resolve().stem
if not re.match('[a-zA-Z_][a-zA-Z0-9]*', options.name) and sample:
raise SystemExit(f'Name of current directory "{options.name}" is not usable as a sample project name.\n'
'Specify a project name with --name.')
print(f'Using "{options.name}" (name of current directory) as project name.')
if not options.executable:
options.executable = options.name
print(f'Using "{options.executable}" (project name) as name of executable to build.')
if sample:
# The rest of the autodetection is not applicable to generating sample projects.
return
if not options.srcfiles:
srcfiles = []
for f in (f for f in Path().iterdir() if f.is_file()):
if f.suffix in LANG_SUFFIXES:
srcfiles.append(f)
if not srcfiles:
raise SystemExit('No recognizable source files found.\n'
'Run meson init in an empty directory to create a sample project.')
options.srcfiles = srcfiles
print("Detected source files: " + ' '.join(map(str, srcfiles)))
options.srcfiles = [Path(f) for f in options.srcfiles]
if not options.language:
for f in options.srcfiles:
if f.suffix == '.c':
options.language = 'c'
break
if f.suffix in ('.cc', '.cpp'):
options.language = 'cpp'
break
if f.suffix == '.cs':
options.language = 'cs'
break
if f.suffix == '.cu':
options.language = 'cuda'
break
if f.suffix == '.d':
options.language = 'd'
break
if f.suffix in FORTRAN_SUFFIXES:
options.language = 'fortran'
break
if f.suffix == '.rs':
options.language = 'rust'
break
if f.suffix == '.m':
options.language = 'objc'
break
if f.suffix == '.mm':
options.language = 'objcpp'
break
if f.suffix == '.java':
options.language = 'java'
break
if f.suffix == '.vala':
options.language = 'vala'
break
if not options.language:
raise SystemExit("Can't autodetect language, please specify it with -l.")
print("Detected language: " + options.language)
def add_arguments(parser: 'argparse.ArgumentParser') -> None:
'''
Here we add args for that the user can passed when making a new
Meson project.
'''
parser.add_argument("srcfiles", metavar="sourcefile", nargs="*", help="source files. default: all recognized files in current directory")
parser.add_argument('-C', dest='wd', action=mesonlib.RealPathAction,
help='directory to cd into before running')
parser.add_argument("-n", "--name", help="project name. default: name of current directory")
parser.add_argument("-e", "--executable", help="executable name. default: project name")
parser.add_argument("-d", "--deps", help="dependencies, comma-separated")
parser.add_argument("-l", "--language", choices=sorted(LANG_SUPPORTED), help="project language. default: autodetected based on source files")
parser.add_argument("-b", "--build", action='store_true', help="build after generation")
parser.add_argument("--builddir", default='build', help="directory for build")
parser.add_argument("-f", "--force", action="store_true", help="force overwrite of existing files and directories.")
parser.add_argument('--type', default=DEFAULT_PROJECT, choices=('executable', 'library'), help=f"project type. default: {DEFAULT_PROJECT} based project")
parser.add_argument('--version', default=DEFAULT_VERSION, help=f"project version. default: {DEFAULT_VERSION}")
def run(options: 'argparse.Namespace') -> int:
'''
Here we generate the new Meson sample project.
'''
if not Path(options.wd).exists():
sys.exit('Project source root directory not found. Run this command in source directory root.')
os.chdir(options.wd)
if not glob('*'):
autodetect_options(options, sample=True)
if not options.language:
print('Defaulting to generating a C language project.')
options.language = 'c'
create_sample(options)
else:
autodetect_options(options)
if Path('meson.build').is_file() and not options.force:
raise SystemExit('meson.build already exists. Use --force to overwrite.')
create_meson_build(options)
if options.build:
if Path(options.builddir).is_dir() and options.force:
print('Build directory already exists, deleting it.')
shutil.rmtree(options.builddir)
print('Building...')
cmd = mesonlib.get_meson_command() + [options.builddir]
ret = subprocess.run(cmd)
if ret.returncode:
raise SystemExit
cmd = detect_ninja() + ['-C', options.builddir]
ret = subprocess.run(cmd)
if ret.returncode:
raise SystemExit
return 0
| mesonbuild/meson | mesonbuild/minit.py | Python | apache-2.0 | 7,660 |
From SimpleCV import Camera
# Initialize the camera
cam = Camera()
# Loop to continuously get images
while True:
# Get Image from camera
img = cam.getImage()
# Make image black and white
img = img.binarize()
# Draw the text "Hello World" on image
img.drawText("Hello World!")
# Show the image
img.show()
| RoboticaBrasil/-ComputerVision | camera.py | Python | apache-2.0 | 336 |
#import In.entity
class RelationHistory(In.entity.Entity):
'''RelationHistory Entity class.
'''
def __init__(self, data = None, items = None, **args):
# default
self.relation_id = 0
self.action = ''
self.actor_entity_type = ''
self.actor_entity_id = 0
self.message = ''
super().__init__(data, items, **args)
@IN.register('RelationHistory', type = 'Entitier')
class RelationHistoryEntitier(In.entity.EntityEntitier):
'''Base RelationHistory Entitier'''
# RelationHistory needs entity insert/update/delete hooks
invoke_entity_hook = False
# load all is very heavy
entity_load_all = False
@IN.register('RelationHistory', type = 'Model')
class RelationHistoryModel(In.entity.EntityModel):
'''RelationHistory Model'''
@IN.hook
def entity_model():
return {
'RelationHistory' : { # entity name
'table' : { # table
'name' : 'relation_history',
'columns' : { # table columns / entity attributes
'id' : {},
'type' : {},
'created' : {},
'status' : {},
'nabar_id' : {},
'relation_id' : {
'type' : 'int', 'unsigned' : True, 'not null' : True,
'description' : 'RelationHistory Id',
},
'actor_entity_type' : {
'type' : 'varchar', 'length' : 32, 'not null' : True,
},
'actor_entity_id' : {
'type' : 'varchar', 'int' : 32, 'not null' : True, 'default' : 'nabar',
},
'message' : {
'type' : 'varchar', 'length' : 32, 'not null' : True,
},
},
'keys' : {
'primary' : 'id',
},
},
},
}
@IN.register('RelationHistory', type = 'Themer')
class RelationHistoryThemer(In.entity.EntityThemer):
'''RelationHistory themer'''
| vinoth3v/In_addon_relation | relation/entity_relation_history.py | Python | apache-2.0 | 1,675 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ToIndexStore.basemodel_ptr'
db.delete_column(u'catalog_toindexstore', u'basemodel_ptr_id')
# Adding field 'ToIndexStore.id'
db.execute('ALTER TABLE "catalog_toindexstore" ADD COLUMN "id" SERIAL NOT NULL PRIMARY KEY')
# db.add_column(u'catalog_toindexstore', u'id',
# self.gf('django.db.models.fields.AutoField')(default=1, primary_key=True),
# keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'ToIndexStore.basemodel_ptr'
raise RuntimeError("Cannot reverse this migration. 'ToIndexStore.basemodel_ptr' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'ToIndexStore.basemodel_ptr'
db.add_column(u'catalog_toindexstore', u'basemodel_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.BaseModel'], unique=True, primary_key=True),
keep_default=False)
# Deleting field 'ToIndexStore.id'
db.delete_column(u'catalog_toindexstore', u'id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.abstractlike': {
'Meta': {'object_name': 'AbstractLike', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'liked_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.abstracttop': {
'Meta': {'object_name': 'AbstractTop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {})
},
'catalog.basemodel': {
'Meta': {'object_name': 'BaseModel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'})
},
'catalog.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"})
},
'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproducttutorial': {
'Meta': {'object_name': 'LikeProductTutorial', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.list': {
'Meta': {'object_name': 'List', '_ormbases': ['catalog.BaseModel']},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.location': {
'Meta': {'object_name': 'Location', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.note': {
'Meta': {'object_name': 'Note', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog'] | Makeystreet/makeystreet | woot/apps/catalog/migrations/0029_auto__del_field_toindexstore_basemodel_ptr__add_field_toindexstore_id.py | Python | apache-2.0 | 26,077 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata as image_metadata_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
base_path = '/v2/%s/images/' % fakes.FAKE_PROJECT_ID
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = b''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPBadRequest())
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank(self.base_path +
'%s/metadata/key1' % image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank(self.base_path +
'%s/metadata/key1' % image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
class ImageMetadataControllerV239(test.NoDBTestCase):
def setUp(self):
super(ImageMetadataControllerV239, self).setUp()
self.controller = image_metadata_v21.ImageMetadataController()
self.req = fakes.HTTPRequest.blank('', version='2.39')
def test_not_found_for_all_image_metadata_api(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req,
fakes.FAKE_UUID, 'id', {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update_all, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
| rahulunair/nova | nova/tests/unit/api/openstack/compute/test_image_metadata.py | Python | apache-2.0 | 17,106 |
"""File system module."""
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file may adapt small portions of https://github.com/mtth/hdfs (MIT
# license), see the LICENSES directory.
import posixpath
from functools import wraps as implements
import ibis.common.exceptions as com
from ibis.config import options
class HDFSError(com.IbisError):
"""HDFS Error class."""
pass
class HDFS:
"""Interface class to HDFS.
Interface class to HDFS for ibis that abstracts away (and protects
user/developer against) various 3rd party library API differences.
"""
def exists(self, path: str) -> bool:
"""Check if the file exists.
Parameters
----------
path : string
Returns
-------
bool
Raises
------
NotImplementedError
"""
raise NotImplementedError
def status(self, path: str) -> dict:
"""Check if the status of the path.
Parameters
----------
path : string
Returns
-------
status : dict
Raises
------
NotImplementedError
"""
raise NotImplementedError
def chmod(self, hdfs_path: str, permissions: str):
"""Change permissions of a file of directory.
Parameters
----------
hdfs_path : string
Directory or path
permissions : string
Octal permissions string
Raises
------
NotImplementedError
"""
raise NotImplementedError
def chown(self, hdfs_path: str, owner: str = None, group: str = None):
"""Change owner (and/or group) of a file or directory.
Parameters
----------
hdfs_path : string
Directory or path
owner : string, optional
Name of owner
group : string, optional
Name of group
Raises
------
NotImplementedError
"""
raise NotImplementedError
def head(
self, hdfs_path: str, nbytes: int = 1024, offset: int = 0
) -> bytes:
"""Retrieve the requested number of bytes from a file.
Parameters
----------
hdfs_path : string
Absolute HDFS path
nbytes : int, default 1024 (1K)
Number of bytes to retrieve
offset : int, default 0
Number of bytes at beginning of file to skip before retrieving data
Returns
-------
head_data : bytes
Raises
------
NotImplementedError
"""
raise NotImplementedError
def get(
self, hdfs_path: str, local_path: str = '.', overwrite: bool = False
) -> str:
"""
Download remote file or directory to the local filesystem.
Parameters
----------
hdfs_path : string
local_path : string, default '.'
overwrite : bool, default False
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
Raises
------
NotImplementedError
"""
raise NotImplementedError
def put(
self,
hdfs_path: str,
resource,
overwrite: bool = False,
verbose: bool = None,
**kwargs,
) -> str:
"""
Write file or directory to HDFS.
Parameters
----------
hdfs_path : string
Directory or path
resource : string or buffer-like
Relative or absolute path to local resource, or a file-like object
overwrite : boolean, default False
verbose : boolean, default ibis options.verbose
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
Raises
------
NotImplementedError
"""
raise NotImplementedError
def put_tarfile(
self,
hdfs_path: str,
local_path: str,
compression: str = 'gzip',
verbose: bool = None,
overwrite: bool = False,
):
"""
Write contents of tar archive to HDFS.
Write contents of tar archive to HDFS directly without having to
decompress it locally first.
Parameters
----------
hdfs_path : string
local_path : string
compression : {'gzip', 'bz2', None}
overwrite : boolean, default False
verbose : boolean, default None (global default)
Raises
------
ValueError
if given compression is none of the following: None, gzip or bz2.
"""
import tarfile
modes = {None: 'r', 'gzip': 'r:gz', 'bz2': 'r:bz2'}
if compression not in modes:
raise ValueError(f'Invalid compression type {compression}')
mode = modes[compression]
tf = tarfile.open(local_path, mode=mode)
for info in tf:
if not info.isfile():
continue
buf = tf.extractfile(info)
abspath = posixpath.join(hdfs_path, info.path)
self.put(abspath, buf, verbose=verbose, overwrite=overwrite)
def put_zipfile(self, hdfs_path: str, local_path: str):
"""Write contents of zipfile archive to HDFS.
Parameters
----------
hdfs_path : string
local_path : string
Raises
------
NotImplementedError
"""
raise NotImplementedError
def write(
self,
hdfs_path: str,
buf,
overwrite: bool = False,
blocksize: int = None,
replication=None,
buffersize: int = None,
):
"""HDFS Write function.
Parameters
----------
hdfs_path : string
buf
overwrite : bool, defaul False
blocksize : int
replication
buffersize : int
Raises
------
NotImplementedError
"""
raise NotImplementedError
def mkdir(self, path: str):
"""Create new directory.
Parameters
----------
path : string
"""
pass
def ls(self, hdfs_path: str, status: bool = False) -> list:
"""Return contents of directory.
Parameters
----------
hdfs_path : string
status : bool
Returns
-------
list
Raises
------
NotImplementedError
"""
raise NotImplementedError
def size(self, hdfs_path: str) -> int:
"""Return total size of file or directory.
Parameters
----------
hdfs_path : basestring
Returns
-------
size : int
Raises
------
NotImplementedError
"""
raise NotImplementedError
def tail(self, hdfs_path: str, nbytes: int = 1024) -> bytes:
"""Retrieve the requested number of bytes from the end of a file.
Parameters
----------
hdfs_path : string
nbytes : int
Returns
-------
data_tail : bytes
Raises
------
NotImplementedError
"""
raise NotImplementedError
def mv(
self, hdfs_path_src: str, hdfs_path_dest: str, overwrite: bool = True
):
"""Move hdfs_path_src to hdfs_path_dest.
Parameters
----------
hdfs_path_src: string
hdfs_path_dest: string
overwrite : boolean, default True
Overwrite hdfs_path_dest if it exists.
Raises
------
NotImplementedError
"""
raise NotImplementedError
def cp(self, hdfs_path_src: str, hdfs_path_dest: str):
"""Copy hdfs_path_src to hdfs_path_dest.
Parameters
----------
hdfs_path_src : string
hdfs_path_dest : string
Raises
------
NotImplementedError
"""
raise NotImplementedError
def rm(self, path: str):
"""Delete a single file.
Parameters
----------
path : string
"""
return self.delete(path)
def rmdir(self, path: str):
"""Delete a directory and all its contents.
Parameters
----------
path : string
"""
self.client.delete(path, recursive=True)
def _find_any_file(self, hdfs_dir):
contents = self.ls(hdfs_dir, status=True)
def valid_filename(name):
head, tail = posixpath.split(name)
tail = tail.lower()
return (
not tail.endswith('.tmp')
and not tail.endswith('.copying')
and not tail.startswith('_')
and not tail.startswith('.')
)
for filename, meta in contents:
if meta['type'].lower() == 'file' and valid_filename(filename):
return filename
raise com.IbisError('No files found in the passed directory')
class WebHDFS(HDFS):
"""A WebHDFS-based interface to HDFS using the HDFSCli library."""
def __init__(self, client):
self.client = client
@property
def protocol(self) -> str:
"""Return the protocol used by WebHDFS.
Returns
-------
protocol : string
"""
return 'webhdfs'
def status(self, path: str) -> dict:
"""Retrieve HDFS metadata for path.
Parameters
----------
path : str
Returns
-------
status : dict
Client status
"""
return self.client.status(path)
@implements(HDFS.chmod)
def chmod(self, path: str, permissions: str):
"""Change the permissions of a HDFS file.
Parameters
----------
path : string
permissions : string
New octal permissions string of the file.
"""
self.client.set_permission(path, permissions)
@implements(HDFS.chown)
def chown(self, path: str, owner=None, group=None):
"""
Change the owner of a HDFS file.
At least one of `owner` and `group` must be specified.
Parameters
----------
hdfs_path : HDFS path.
owner : string, optional
group: string, optional
"""
self.client.set_owner(path, owner, group)
@implements(HDFS.exists)
def exists(self, path: str) -> dict:
"""Check if the HDFS file exists.
Parameters
----------
path : string
Returns
-------
bool
"""
return not self.client.status(path, strict=False) is None
@implements(HDFS.ls)
def ls(self, hdfs_path: str, status: bool = False) -> list:
"""Return contents of directory.
Parameters
----------
hdfs_path : string
status : bool
Returns
-------
list
"""
return self.client.list(hdfs_path, status=status)
@implements(HDFS.mkdir)
def mkdir(self, dir_path: str):
"""Create new directory.
Parameters
----------
path : string
"""
self.client.makedirs(dir_path)
@implements(HDFS.size)
def size(self, hdfs_path: str) -> int:
"""Return total size of file or directory.
Parameters
----------
hdfs_path : string
Returns
-------
size : int
"""
return self.client.content(hdfs_path)['length']
@implements(HDFS.mv)
def mv(
self, hdfs_path_src: str, hdfs_path_dest: str, overwrite: bool = True
):
"""Move hdfs_path_src to hdfs_path_dest.
Parameters
----------
hdfs_path_src: string
hdfs_path_dest: string
overwrite : boolean, default True
Overwrite hdfs_path_dest if it exists.
"""
if overwrite and self.exists(hdfs_path_dest):
if self.status(hdfs_path_dest)['type'] == 'FILE':
self.rm(hdfs_path_dest)
self.client.rename(hdfs_path_src, hdfs_path_dest)
def delete(self, hdfs_path: str, recursive: bool = False) -> bool:
"""Delete a file located at `hdfs_path`.
Parameters
----------
hdfs_path : string
recursive : bool, default False
Returns
-------
bool
True if the function was successful.
"""
return self.client.delete(hdfs_path, recursive=recursive)
@implements(HDFS.head)
def head(
self, hdfs_path: str, nbytes: int = 1024, offset: int = 0
) -> bytes:
"""Retrieve the requested number of bytes from a file.
Parameters
----------
hdfs_path : string
Absolute HDFS path
nbytes : int, default 1024 (1K)
Number of bytes to retrieve
offset : int, default 0
Number of bytes at beginning of file to skip before retrieving data
Returns
-------
head_data : bytes
"""
_reader = self.client.read(hdfs_path, offset=offset, length=nbytes)
with _reader as reader:
return reader.read()
@implements(HDFS.put)
def put(
self,
hdfs_path: str,
resource,
overwrite: bool = False,
verbose: bool = None,
**kwargs,
):
"""
Write file or directory to HDFS.
Parameters
----------
hdfs_path : string
Directory or path
resource : string or buffer-like
Relative or absolute path to local resource, or a file-like object
overwrite : boolean, default False
verbose : boolean, default ibis options.verbose
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
"""
verbose = verbose or options.verbose
if isinstance(resource, str):
# `resource` is a path.
return self.client.upload(
hdfs_path, resource, overwrite=overwrite, **kwargs
)
else:
# `resource` is a file-like object.
hdfs_path = self.client.resolve(hdfs_path)
self.client.write(
hdfs_path, data=resource, overwrite=overwrite, **kwargs
)
return hdfs_path
@implements(HDFS.get)
def get(
self,
hdfs_path: str,
local_path: str,
overwrite: bool = False,
verbose: bool = None,
**kwargs,
) -> str:
"""
Download remote file or directory to the local filesystem.
Parameters
----------
hdfs_path : string
local_path : string, default '.'
overwrite : bool, default False
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
"""
verbose = verbose or options.verbose
return self.client.download(
hdfs_path, local_path, overwrite=overwrite, **kwargs
)
def hdfs_connect(
host='localhost',
port=50070,
protocol='webhdfs',
use_https='default',
auth_mechanism='NOSASL',
verify=True,
session=None,
**kwds,
):
"""Connect to HDFS.
Parameters
----------
host : str
Host name of the HDFS NameNode
port : int
NameNode's WebHDFS port
protocol : str,
The protocol used to communicate with HDFS. The only valid value is
``'webhdfs'``.
use_https : bool
Connect to WebHDFS with HTTPS, otherwise plain HTTP. For secure
authentication, the default for this is True, otherwise False.
auth_mechanism : str
Set to NOSASL or PLAIN for non-secure clusters.
Set to GSSAPI or LDAP for Kerberos-secured clusters.
verify : bool
Set to :data:`False` to turn off verifying SSL certificates.
session : Optional[requests.Session]
A custom :class:`requests.Session` object.
Notes
-----
Other keywords are forwarded to HDFS library classes.
Returns
-------
WebHDFS
"""
import requests
if session is None:
session = requests.Session()
session.verify = verify
if auth_mechanism in ('GSSAPI', 'LDAP'):
from hdfs.ext.kerberos import KerberosClient
if use_https == 'default':
prefix = 'https'
else:
prefix = 'https' if use_https else 'http'
# note SSL
url = f'{prefix}://{host}:{port}'
kwds.setdefault('mutual_auth', 'OPTIONAL')
hdfs_client = KerberosClient(url, session=session, **kwds)
else:
if use_https == 'default':
prefix = 'http'
else:
prefix = 'https' if use_https else 'http'
from hdfs.client import InsecureClient
url = f'{prefix}://{host}:{port}'
hdfs_client = InsecureClient(url, session=session, **kwds)
return WebHDFS(hdfs_client)
| cloudera/ibis | ibis/backends/impala/hdfs.py | Python | apache-2.0 | 17,817 |
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import common
import ports
import os
import subprocess
import time
await_seconds = 1 # Async invocations must be verfied (or used) after a delay.
def setup_container_port(docker_pid=None, port_name=None, port_ip_addr=None):
"""
Push the vport to the container namespace.
"""
# Push the KNI port to namespace.
common.run_local_cmd(
"ip link set {} netns {}".format(port_name, docker_pid))
time.sleep(await_seconds)
common.run_local_cmd(
"ip netns exec {} ifconfig {} up".format(docker_pid, port_name))
time.sleep(await_seconds)
common.run_local_cmd("ip netns exec {} ifconfig {} {}".format(
docker_pid, port_name, port_ip_addr))
time.sleep(await_seconds)
common.run_local_cmd(
"ip netns exec {} ifconfig {} promisc".format(docker_pid, port_name))
time.sleep(await_seconds)
def setup_network_namespace(docker_pid=None, port_name=None):
"""
Setup the network namespace for the docker container.
"""
# Namespace configuration.
proc_filepath = "/proc/{}/ns/net".format(docker_pid)
netns_filepath = "/var/run/netns/{}".format(docker_pid)
# Proc dir wont exist if the container is not running.
if not os.path.isfile(proc_filepath):
print('proc pid dir does not exist. {}'.format(proc_filepath))
exit()
# Create a symbolic link.
common.run_local_cmd("ln -sf {} {}".format(proc_filepath, netns_filepath))
# Wait for linking to be successful.
time.sleep(await_seconds)
# Check if the netns is correctly setup.
if not os.path.isfile(netns_filepath):
print('netns pid dir does not exist. {}'.format(netns_filepath))
exit()
# Verify that the KNI port (exposed by DPDK) is up.
kni_ports = common.get_kni_ports()
if port_name not in kni_ports:
print('KNI {} not up'.format(port_name))
exit()
# Get port ip and ethernet address.
port_ip_addr = common.run_local_cmd(
common.get_port_ip(port_name), get_output=True)
port_eth_addr = common.run_local_cmd(
common.get_port_ether(port_name), get_output=True)
# Verify port is setup with a valid ip and ethernet address.
if not common.is_ipv4(port_ip_addr):
print('Port {} does not have an assigned IP addr')
exit()
if not common.is_mac(port_eth_addr):
print('Port {} does not have an assigned ether addr')
exit()
setup_container_port(docker_pid=docker_pid,
port_name=port_name, port_ip_addr=port_ip_addr)
return port_eth_addr, port_ip_addr
def start_container(command=None, port_name=None, name=None):
"""
Start the docker containers. Setup the network namespace.
"""
docker_pid_cmd = "%s %s" % ("docker inspect -f {{.State.Pid}}", name)
print('Docker command: {}'.format(command))
# Start the docker.
common.run_local_cmd(command)
# Wait for container to run.
time.sleep(await_seconds)
docker_pid = common.run_local_cmd(docker_pid_cmd, get_output=True)
# If there is no such container with this 'name', then this will error out.
docker_pid = int(docker_pid)
port_eth_addr, port_ip_addr = setup_network_namespace(
docker_pid=docker_pid, port_name=port_name)
# Get the container IP address and Ethernet address.
container_port_ip_addr = common.run_local_cmd(
common.get_container_port_ip(str(docker_pid), port_name), get_output=True)
container_port_eth_addr = common.run_local_cmd(
common.get_container_port_ether(str(docker_pid), port_name), get_output=True)
# Verify the correctness of the port and ethernet addr.
if container_port_ip_addr != port_ip_addr:
print('Incorrect IP within container: Container {}, Host {}'
.format(container_port_ip_addr, port_ip_addr))
print(common.get_container_port_ip(str(docker_pid), port_name))
exit()
if container_port_eth_addr != port_eth_addr:
print('Incorrect Ether within container: Container {}, Host {}'
.format(container_port_eth_addr, port_eth_addr))
print(common.get_container_port_ether(str(docker_pid), port_name))
exit()
# Store the configuration.
ret = {
'name': name,
'netns': docker_pid,
'pid': docker_pid,
'ip_addr': container_port_ip_addr,
'eth_addr': container_port_eth_addr,
'port_name': port_name,
'command': command
}
return ret
def connect(node_a, node_b):
"""
Setup the IP route and ARP table in the containers.
Inputs:
- node_a : Container 'a' conf.
- node_b : Container 'b' conf.
node_a and node_b are interchangable.
"""
route_a2b = ("ip netns exec {} ip route add {} dev {}"
.format(node_a['netns'],
node_b['ip_addr'],
node_a['port_name']))
route_b2a = ("ip netns exec {} ip route add {} dev {}"
.format(node_b['netns'],
node_a['ip_addr'],
node_b['port_name']))
arp_a2b = ("ip netns exec {} arp -s {} {}"
.format(node_a['netns'],
node_b['ip_addr'],
node_b['eth_addr']))
arp_b2a = ("ip netns exec {} arp -s {} {}"
.format(node_b['netns'],
node_a['ip_addr'],
node_a['eth_addr']))
common.run_local_cmd(route_a2b)
common.run_local_cmd(route_b2a)
common.run_local_cmd(arp_a2b)
common.run_local_cmd(arp_b2a)
def dns(node_this, node_other):
"""
Setup the DNS in 'node_this' so that 'node_other'
can be reached by name (e.g., resolved) instead of IP.
Input:
- node_a: Container whose DNS is to be updated.
- node_b: Container that should be reachable.
"""
command = ("docker exec -u root -it {} bash -c \"echo \'{} {}\' >> /etc/hosts\""
.format(node_this['name'], node_other['ip_addr'], node_other['name']))
os.popen(command)
time.sleep(await_seconds)
def number_of_running_processes():
"""
Return the count of running containers.
"""
n_docker = common.run_local_cmd('expr $(docker ps -a | wc -l) - 1', get_output=True)
return int(n_docker)
def stop_all_docker_containers():
"""
Stop all containers.
"""
common.run_local_cmd('docker stop $(docker ps -a -q)')
time.sleep(await_seconds)
def remove_all_docker_containers():
"""
Remove all containers.
"""
common.run_local_cmd('docker rm $(docker ps -a -q)')
time.sleep(await_seconds)
| google/ghost-dataplane | orchestrator/src/docker.py | Python | apache-2.0 | 7,385 |
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import json
import os
import pdb
import pickle
import sys
import h5py
import numpy as np
import pandas as pd
import pysam
import pyBigWig
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import bed
from basenji import dna_io
from basenji import seqnn
from basenji import stream
'''
basenji_predict_bed.py
Predict sequences from a BED file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-b', dest='bigwig_indexes',
default=None, help='Comma-separated list of target indexes to write BigWigs')
parser.add_option('-e', dest='embed_layer',
default=None, type='int',
help='Embed sequences using the specified layer index.')
parser.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-g', dest='genome_file',
default=None,
help='Chromosome length information [Default: %default]')
parser.add_option('-l', dest='site_length',
default=None, type='int',
help='Prediction site length. [Default: model seq_length]')
parser.add_option('-o', dest='out_dir',
default='pred_out',
help='Output directory [Default: %default]')
# parser.add_option('--plots', dest='plots',
# default=False, action='store_true',
# help='Make heatmap plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
parser.add_option('-s', dest='sum',
default=False, action='store_true',
help='Sum site predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--species', dest='species',
default='human')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
(options, args) = parser.parse_args()
if len(args) == 2:
model_file = args[0]
bed_file = args[1]
elif len(args) == 4:
# multi worker
options_pkl_file = args[0]
model_file = args[1]
bed_file = args[2]
worker_index = int(args[3])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameter and model files and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
if options.bigwig_indexes is not None:
options.bigwig_indexes = [int(bi) for bi in options.bigwig_indexes.split(',')]
else:
options.bigwig_indexes = []
if len(options.bigwig_indexes) > 0:
bigwig_dir = '%s/bigwig' % options.out_dir
if not os.path.isdir(bigwig_dir):
os.mkdir(bigwig_dir)
#################################################################
# read parameters and collet target information
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_slice = targets_df.index
#################################################################
# setup model
seqnn_model = tf.saved_model.load(model_file).model
# query num model targets
seq_length = seqnn_model.predict_on_batch.input_signature[0].shape[1]
null_1hot = np.zeros((1,seq_length,4))
null_preds = seqnn_model.predict_on_batch(null_1hot)
null_preds = null_preds[options.species].numpy()
_, preds_length, preds_depth = null_preds.shape
# hack sizes
preds_window = 128
seq_crop = (seq_length - preds_length*preds_window) // 2
#################################################################
# sequence dataset
if options.site_length is None:
options.site_length = preds_window*preds_length
print('site_length: %d' % options.site_length)
# construct model sequences
model_seqs_dna, model_seqs_coords = bed.make_bed_seqs(
bed_file, options.genome_fasta,
seq_length, stranded=False)
# construct site coordinates
site_seqs_coords = bed.read_bed_coords(bed_file, options.site_length)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(model_seqs_dna), options.processes+1, dtype='int')
model_seqs_dna = model_seqs_dna[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
model_seqs_coords = model_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
site_seqs_coords = site_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_seqs = len(model_seqs_dna)
#################################################################
# setup output
assert(preds_length % 2 == 0)
preds_mid = preds_length // 2
assert(options.site_length % preds_window == 0)
site_preds_length = options.site_length // preds_window
assert(site_preds_length % 2 == 0)
site_preds_start = preds_mid - site_preds_length//2
site_preds_end = site_preds_start + site_preds_length
# initialize HDF5
out_h5_file = '%s/predict.h5' % options.out_dir
if os.path.isfile(out_h5_file):
os.remove(out_h5_file)
out_h5 = h5py.File(out_h5_file, 'w')
# create predictions
if options.sum:
out_h5.create_dataset('preds', shape=(num_seqs, preds_depth), dtype='float16')
else:
out_h5.create_dataset('preds', shape=(num_seqs, site_preds_length, preds_depth), dtype='float16')
# store site coordinates
site_seqs_chr, site_seqs_start, site_seqs_end = zip(*site_seqs_coords)
site_seqs_chr = np.array(site_seqs_chr, dtype='S')
site_seqs_start = np.array(site_seqs_start)
site_seqs_end = np.array(site_seqs_end)
out_h5.create_dataset('chrom', data=site_seqs_chr)
out_h5.create_dataset('start', data=site_seqs_start)
out_h5.create_dataset('end', data=site_seqs_end)
#################################################################
# predict scores, write output
# define sequence generator
def seqs_gen():
for seq_dna in model_seqs_dna:
yield dna_io.dna_1hot(seq_dna)
# initialize predictions stream
preds_stream = stream.PredStreamSonnet(seqnn_model, seqs_gen(),
rc=options.rc, shifts=options.shifts, species=options.species)
for si in range(num_seqs):
preds_seq = preds_stream[si]
# slice site
preds_site = preds_seq[site_preds_start:site_preds_end,:]
# write
if options.sum:
out_h5['preds'][si] = preds_site.sum(axis=0)
else:
out_h5['preds'][si] = preds_site
# write bigwig
for ti in options.bigwig_indexes:
bw_file = '%s/s%d_t%d.bw' % (bigwig_dir, si, ti)
bigwig_write(preds_seq[:,ti], model_seqs_coords[si], bw_file,
options.genome_file, seq_crop)
# close output HDF5
out_h5.close()
def bigwig_open(bw_file, genome_file):
""" Open the bigwig file for writing and write the header. """
bw_out = pyBigWig.open(bw_file, 'w')
chrom_sizes = []
for line in open(genome_file):
a = line.split()
chrom_sizes.append((a[0], int(a[1])))
bw_out.addHeader(chrom_sizes)
return bw_out
def bigwig_write(signal, seq_coords, bw_file, genome_file, seq_crop=0):
""" Write a signal track to a BigWig file over the region
specified by seqs_coords.
Args
signal: Sequences x Length signal array
seq_coords: (chr,start,end)
bw_file: BigWig filename
genome_file: Chromosome lengths file
seq_crop: Sequence length cropped from each side of the sequence.
"""
target_length = len(signal)
# open bigwig
bw_out = bigwig_open(bw_file, genome_file)
# initialize entry arrays
entry_starts = []
entry_ends = []
# set entries
chrm, start, end = seq_coords
preds_pool = (end - start - 2 * seq_crop) // target_length
bw_start = start + seq_crop
for li in range(target_length):
bw_end = bw_start + preds_pool
entry_starts.append(bw_start)
entry_ends.append(bw_end)
bw_start = bw_end
# add
bw_out.addEntries(
[chrm]*target_length,
entry_starts,
ends=entry_ends,
values=[float(s) for s in signal])
bw_out.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| calico/basenji | bin/sonnet_predict_bed.py | Python | apache-2.0 | 9,710 |
#!/usr/bin/env python
# standard library imports
import signal
# third party related imports
import boto.sqs
import ujson
# local library imports
from mobile_push.config import setting
from mobile_push.logger import logger
from mobile_push.message_router import MessageRouter
keep_running = True
def sigterm_handler(signum, _):
global keep_running
logger.warn('Receive SIGTERM')
keep_running = False
def get_queue():
conn = boto.sqs.connect_to_region(setting.get('sqs', 'region'))
return conn.get_queue(setting.get('sqs', 'queue'))
def poll_message(queue):
message = queue.read(wait_time_seconds=20)
if message is None:
return
try:
body = message.get_body()
units = ujson.loads(body)
except ValueError:
logger.error('Cannot parse: %s', body)
units = []
if not isinstance(units, list):
units = [units]
for unit in units:
try:
MessageRouter(unit).get_actor().run(unit)
except MessageRouter.BaseError:
logger.error('Cannot route message: %s', ujson.dumps(unit))
except Exception as e:
logger.exception(e)
queue.delete_message(message)
def main():
signal.signal(signal.SIGTERM, sigterm_handler)
q = get_queue()
while keep_running:
poll_message(q)
if __name__ == '__main__':
main()
| theKono/mobile-push | bin/competing_consumer.py | Python | apache-2.0 | 1,384 |
import os
import subprocess
from ruamel import yaml
import great_expectations as ge
context = ge.get_context()
# NOTE: The following code is only for testing and depends on an environment
# variable to set the gcp_project. You can replace the value with your own
# GCP project information
gcp_project = os.environ.get("GE_TEST_GCP_PROJECT")
if not gcp_project:
raise ValueError(
"Environment Variable GE_TEST_GCP_PROJECT is required to run BigQuery integration tests"
)
# parse great_expectations.yml for comparison
great_expectations_yaml_file_path = os.path.join(
context.root_directory, "great_expectations.yml"
)
with open(great_expectations_yaml_file_path) as f:
great_expectations_yaml = yaml.safe_load(f)
stores = great_expectations_yaml["stores"]
pop_stores = ["checkpoint_store", "evaluation_parameter_store", "validations_store"]
for store in pop_stores:
stores.pop(store)
actual_existing_expectations_store = {}
actual_existing_expectations_store["stores"] = stores
actual_existing_expectations_store["expectations_store_name"] = great_expectations_yaml[
"expectations_store_name"
]
expected_existing_expectations_store_yaml = """
stores:
expectations_store:
class_name: ExpectationsStore
store_backend:
class_name: TupleFilesystemStoreBackend
base_directory: expectations/
expectations_store_name: expectations_store
"""
assert actual_existing_expectations_store == yaml.safe_load(
expected_existing_expectations_store_yaml
)
configured_expectations_store_yaml = """
stores:
expectations_GCS_store:
class_name: ExpectationsStore
store_backend:
class_name: TupleGCSStoreBackend
project: <YOUR GCP PROJECT NAME>
bucket: <YOUR GCS BUCKET NAME>
prefix: <YOUR GCS PREFIX NAME>
expectations_store_name: expectations_GCS_store
"""
# replace example code with integration test configuration
configured_expectations_store = yaml.safe_load(configured_expectations_store_yaml)
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"project"
] = gcp_project
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"bucket"
] = "test_metadata_store"
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"prefix"
] = "how_to_configure_an_expectation_store_in_gcs/expectations"
try:
# remove this bucket if there was a failure in the script last time
result = subprocess.run(
"gsutil rm -r gs://test_metadata_store/how_to_configure_an_expectation_store_in_gcs/expectations".split(),
check=True,
stderr=subprocess.PIPE,
)
except Exception as e:
pass
# add and set the new expectation store
context.add_store(
store_name=configured_expectations_store["expectations_store_name"],
store_config=configured_expectations_store["stores"]["expectations_GCS_store"],
)
with open(great_expectations_yaml_file_path) as f:
great_expectations_yaml = yaml.safe_load(f)
great_expectations_yaml["expectations_store_name"] = "expectations_GCS_store"
great_expectations_yaml["stores"]["expectations_GCS_store"]["store_backend"].pop(
"suppress_store_backend_id"
)
with open(great_expectations_yaml_file_path, "w") as f:
yaml.dump(great_expectations_yaml, f, default_flow_style=False)
expectation_suite_name = "my_expectation_suite"
context.create_expectation_suite(expectation_suite_name=expectation_suite_name)
# try gsutil cp command
copy_expectation_command = """
gsutil cp expectations/my_expectation_suite.json gs://<YOUR GCS BUCKET NAME>/<YOUR GCS PREFIX NAME>/my_expectation_suite.json
"""
local_expectation_suite_file_path = os.path.join(
context.root_directory, "expectations", f"{expectation_suite_name}.json"
)
copy_expectation_command = copy_expectation_command.replace(
"expectations/my_expectation_suite.json", local_expectation_suite_file_path
)
copy_expectation_command = copy_expectation_command.replace(
"<YOUR GCS BUCKET NAME>",
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"bucket"
],
)
copy_expectation_command = copy_expectation_command.replace(
"<YOUR GCS PREFIX NAME>/my_expectation_suite.json",
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"prefix"
]
+ f"/{expectation_suite_name}.json",
)
result = subprocess.run(
copy_expectation_command.strip().split(),
check=True,
stderr=subprocess.PIPE,
)
stderr = result.stderr.decode("utf-8")
copy_expectation_output = """
Operation completed over 1 objects
"""
assert copy_expectation_output.strip() in stderr
# list expectation stores
list_expectation_stores_command = """
great_expectations store list
"""
result = subprocess.run(
list_expectation_stores_command.strip().split(),
check=True,
stdout=subprocess.PIPE,
)
stdout = result.stdout.decode("utf-8")
list_expectation_stores_output = """
- name: expectations_GCS_store
class_name: ExpectationsStore
store_backend:
class_name: TupleGCSStoreBackend
project: <YOUR GCP PROJECT NAME>
bucket: <YOUR GCS BUCKET NAME>
prefix: <YOUR GCS PREFIX NAME>
"""
assert "expectations_GCS_store" in list_expectation_stores_output
assert "expectations_GCS_store" in stdout
assert "TupleGCSStoreBackend" in list_expectation_stores_output
assert "TupleGCSStoreBackend" in stdout
# list expectation suites
list_expectation_suites_command = """
great_expectations suite list
"""
result = subprocess.run(
list_expectation_suites_command.strip().split(),
check=True,
stdout=subprocess.PIPE,
)
stdout = result.stdout.decode("utf-8")
list_expectation_suites_output = """
1 Expectation Suite found:
- my_expectation_suite
"""
assert "1 Expectation Suite found:" in list_expectation_suites_output
assert "1 Expectation Suite found:" in stdout
assert "my_expectation_suite" in list_expectation_suites_output
assert "my_expectation_suite" in stdout
# clean up this bucket for next time
result = subprocess.run(
"gsutil rm -r gs://test_metadata_store/how_to_configure_an_expectation_store_in_gcs/expectations".split(),
check=True,
stderr=subprocess.PIPE,
)
| great-expectations/great_expectations | tests/integration/docusaurus/setup/configuring_metadata_stores/how_to_configure_an_expectation_store_in_gcs.py | Python | apache-2.0 | 6,223 |
"""Support for KNX/IP climate devices."""
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
STATE_DRY, STATE_ECO, STATE_FAN_ONLY, STATE_HEAT, STATE_IDLE, STATE_MANUAL,
SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = 'target_temperature_state_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES):
vol.All(cv.ensure_list, [vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_controller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config[CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_target_temperature_state=config[
CONF_TARGET_TEMPERATURE_STATE_ADDRESS],
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature.value
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.device.setpoint_shift_step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.device.target_temperature.value
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.device.target_temperature_min
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device.target_temperature_max
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self.device.set_target_temperature(temperature)
await self.async_update_ha_state()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode.supports_operation_mode:
return OPERATION_MODES.get(self.device.mode.operation_mode.value)
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [OPERATION_MODES.get(operation_mode.value) for
operation_mode in
self.device.mode.operation_modes]
async def async_set_operation_mode(self, operation_mode):
"""Set operation mode."""
if self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(
OPERATION_MODES_INV.get(operation_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
@property
def is_on(self):
"""Return true if the device is on."""
if self.device.supports_on_off:
return self.device.is_on
return None
async def async_turn_on(self):
"""Turn on."""
await self.device.turn_on()
async def async_turn_off(self):
"""Turn off."""
await self.device.turn_off()
| jamespcole/home-assistant | homeassistant/components/knx/climate.py | Python | apache-2.0 | 11,048 |
#!/usr/bin/env python3
############################################################################
# Copyright 2017 RIFT.IO Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_waf_haproxy_cp(logger, run_dir, mgmt_ip, haproxy_cp_ip):
sh_file = "{}/waf_set_haproxy_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "echo \"<VirtualHost *:80>\r"
send " AddDefaultCharset UTF-8\r"
send " ProxyPreserveHost On\r"
send " ProxyRequests off\r"
send " ProxyVia Off\r"
send " ProxyPass / http://{haproxy_cp_ip}:5000/\r"
send " ProxyPassReverse / http://{haproxy_cp_ip}:5000/\r"
send " </VirtualHost>\" > /etc/httpd/conf.d/waf_proxy.conf\r"
expect "]# "
send "echo \"<IfModule mod_security2.c>\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/modsecurity_crs_10_setup.conf\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/base_rules/*.conf\r\r"
send " SecRuleEngine On\r"
send " SecRequestBodyAccess On\r"
send " SecResponseBodyAccess On\r"
send " SecDebugLog /var/log/httpd/modsec-debug.log\r"
send " SecDebugLogLevel 3\r"
send "</IfModule>\" > /etc/httpd/conf.d/mod_security.conf\r"
expect "]# "
send "systemctl stop httpd\r"
expect "]# "
send "systemctl start httpd\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip, haproxy_cp_ip=haproxy_cp_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name):
sh_file = "{}/haproxy_add_waf_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {waf_server_name} {waf_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*WAF list.*\\)/\\1\\n server {waf_server_name} {waf_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_cp_ip=waf_cp_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {waf_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove waf config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_waf_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
dry_run = args.dry_run
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueError("Could not find vnfd %s mgmt ip", vnfd_name)
def find_vnfr(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr
raise ValueError("Could not find vnfd %s", vnfd_name)
haproxy_cp_ip = find_cp_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd", "cp0")
haproxy_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd")
waf_cp_ip = find_cp_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd", "cp0")
waf_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
waf_vnfr = find_vnfr(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
# HAProxy wants to use a name without .'s
waf_server_name = waf_vnfr["name"].replace(".", "__")
if yaml_cfg['trigger'] == 'post_scale_out':
logger.debug("Sleeping for 60 seconds to give VNFD mgmt VM a chance to boot up")
time.sleep(60)
configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name)
configure_waf_haproxy_cp(logger, run_dir, waf_mgmt_ip, haproxy_cp_ip)
elif yaml_cfg['trigger'] == 'pre_scale_in':
configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name)
else:
raise ValueError("Unexpected trigger {}".format(yaml_cfg['trigger']))
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
| RIFTIO/rift.ware-descriptor-packages | 4.3/src/nsd/haproxy_waf_http_ns/scripts/waf_config.py | Python | apache-2.0 | 9,663 |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
try:
from unittest import mock
except ImportError:
import mock
import testtools
import threading
import six
from six.moves.queue import Queue, Empty
from swiftclient import multithreading as mt
from swiftclient.exceptions import ClientException
class ThreadTestCase(testtools.TestCase):
def setUp(self):
super(ThreadTestCase, self).setUp()
self.got_args_kwargs = Queue()
self.starting_thread_count = threading.active_count()
def _func(self, q_item, *args, **kwargs):
self.got_items.put(q_item)
self.got_args_kwargs.put((args, kwargs))
if q_item == 'go boom':
raise Exception('I went boom!')
if q_item == 'c boom':
raise ClientException(
'Client Boom', http_scheme='http', http_host='192.168.22.1',
http_port=80, http_path='/booze', http_status=404,
http_reason='to much', http_response_content='no sir!')
return 'best result EVAR!'
def assertQueueContains(self, queue, expected_contents):
got_contents = []
try:
while True:
got_contents.append(queue.get(timeout=0.1))
except Empty:
pass
if isinstance(expected_contents, set):
got_contents = set(got_contents)
self.assertEqual(expected_contents, got_contents)
class TestQueueFunctionThread(ThreadTestCase):
def setUp(self):
super(TestQueueFunctionThread, self).setUp()
self.input_queue = Queue()
self.got_items = Queue()
self.stored_results = []
self.qft = mt.QueueFunctionThread(self.input_queue, self._func,
'one_arg', 'two_arg',
red_fish='blue_arg',
store_results=self.stored_results)
self.qft.start()
def tearDown(self):
if self.qft.is_alive():
self.finish_up_thread()
super(TestQueueFunctionThread, self).tearDown()
def finish_up_thread(self):
self.input_queue.put(mt.StopWorkerThreadSignal())
while self.qft.is_alive():
time.sleep(0.05)
def test_plumbing_and_store_results(self):
self.input_queue.put('abc')
self.input_queue.put(123)
self.finish_up_thread()
self.assertQueueContains(self.got_items, ['abc', 123])
self.assertQueueContains(self.got_args_kwargs, [
(('one_arg', 'two_arg'), {'red_fish': 'blue_arg'}),
(('one_arg', 'two_arg'), {'red_fish': 'blue_arg'})])
self.assertEqual(self.stored_results,
['best result EVAR!', 'best result EVAR!'])
def test_exception_handling(self):
self.input_queue.put('go boom')
self.input_queue.put('ok')
self.input_queue.put('go boom')
self.finish_up_thread()
self.assertQueueContains(self.got_items,
['go boom', 'ok', 'go boom'])
self.assertEqual(len(self.qft.exc_infos), 2)
self.assertEqual(Exception, self.qft.exc_infos[0][0])
self.assertEqual(Exception, self.qft.exc_infos[1][0])
self.assertEqual(('I went boom!',), self.qft.exc_infos[0][1].args)
self.assertEqual(('I went boom!',), self.qft.exc_infos[1][1].args)
class TestQueueFunctionManager(ThreadTestCase):
def setUp(self):
super(TestQueueFunctionManager, self).setUp()
self.thread_manager = mock.create_autospec(
mt.MultiThreadingManager, spec_set=True, instance=True)
self.thread_count = 4
self.error_counter = [0]
self.got_items = Queue()
self.stored_results = []
self.qfq = mt.QueueFunctionManager(
self._func, self.thread_count, self.thread_manager,
thread_args=('1arg', '2arg'),
thread_kwargs={'a': 'b', 'store_results': self.stored_results},
error_counter=self.error_counter,
connection_maker=self.connection_maker)
def connection_maker(self):
return 'yup, I made a connection'
def test_context_manager_without_error_counter(self):
self.qfq = mt.QueueFunctionManager(
self._func, self.thread_count, self.thread_manager,
thread_args=('1arg', '2arg'),
thread_kwargs={'a': 'b', 'store_results': self.stored_results},
connection_maker=self.connection_maker)
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
input_queue.put('go boom')
self.assertEqual(self.starting_thread_count, threading.active_count())
error_strs = list(map(str, self.thread_manager.error.call_args_list))
self.assertEqual(1, len(error_strs))
self.assertTrue('Exception: I went boom!' in error_strs[0])
def test_context_manager_without_conn_maker_or_error_counter(self):
self.qfq = mt.QueueFunctionManager(
self._func, self.thread_count, self.thread_manager,
thread_args=('1arg', '2arg'), thread_kwargs={'a': 'b'})
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('slap%d' % i)
self.assertEqual(self.starting_thread_count, threading.active_count())
self.assertEqual([], self.thread_manager.error.call_args_list)
self.assertEqual(0, self.error_counter[0])
self.assertQueueContains(self.got_items,
set(['slap%d' % i for i in range(20)]))
self.assertQueueContains(
self.got_args_kwargs,
[(('1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, [])
def test_context_manager_with_exceptions(self):
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('item%d' % i if i % 2 == 0 else 'go boom')
self.assertEqual(self.starting_thread_count, threading.active_count())
error_strs = list(map(str, self.thread_manager.error.call_args_list))
self.assertEqual(10, len(error_strs))
self.assertTrue(all(['Exception: I went boom!' in s for s in
error_strs]))
self.assertEqual(10, self.error_counter[0])
expected_items = set(['go boom'] +
['item%d' % i for i in range(20)
if i % 2 == 0])
self.assertQueueContains(self.got_items, expected_items)
self.assertQueueContains(
self.got_args_kwargs,
[(('yup, I made a connection', '1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, ['best result EVAR!'] * 10)
def test_context_manager_with_client_exceptions(self):
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('item%d' % i if i % 2 == 0 else 'c boom')
self.assertEqual(self.starting_thread_count, threading.active_count())
error_strs = list(map(str, self.thread_manager.error.call_args_list))
self.assertEqual(10, len(error_strs))
stringification = 'Client Boom: ' \
'http://192.168.22.1:80/booze 404 to much no sir!'
self.assertTrue(all([stringification in s for s in error_strs]))
self.assertEqual(10, self.error_counter[0])
expected_items = set(['c boom'] +
['item%d' % i for i in range(20)
if i % 2 == 0])
self.assertQueueContains(self.got_items, expected_items)
self.assertQueueContains(
self.got_args_kwargs,
[(('yup, I made a connection', '1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, ['best result EVAR!'] * 10)
def test_context_manager_with_connection_maker(self):
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('item%d' % i)
self.assertEqual(self.starting_thread_count, threading.active_count())
self.assertEqual([], self.thread_manager.error.call_args_list)
self.assertEqual(0, self.error_counter[0])
self.assertQueueContains(self.got_items,
set(['item%d' % i for i in range(20)]))
self.assertQueueContains(
self.got_args_kwargs,
[(('yup, I made a connection', '1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, ['best result EVAR!'] * 20)
class TestMultiThreadingManager(ThreadTestCase):
@mock.patch('swiftclient.multithreading.QueueFunctionManager')
def test_instantiation(self, mock_qfq):
thread_manager = mt.MultiThreadingManager()
self.assertEqual([
mock.call(thread_manager._print, 1, thread_manager),
mock.call(thread_manager._print_error, 1, thread_manager),
], mock_qfq.call_args_list)
# These contexts don't get entered into until the
# MultiThreadingManager's context is entered.
self.assertEqual([], thread_manager.printer.__enter__.call_args_list)
self.assertEqual([],
thread_manager.error_printer.__enter__.call_args_list)
# Test default values for the streams.
self.assertEqual(sys.stdout, thread_manager.print_stream)
self.assertEqual(sys.stderr, thread_manager.error_stream)
@mock.patch('swiftclient.multithreading.QueueFunctionManager')
def test_queue_manager_no_args(self, mock_qfq):
thread_manager = mt.MultiThreadingManager()
mock_qfq.reset_mock()
mock_qfq.return_value = 'slap happy!'
self.assertEqual(
'slap happy!',
thread_manager.queue_manager(self._func, 88))
self.assertEqual([
mock.call(self._func, 88, thread_manager, thread_args=(),
thread_kwargs={}, connection_maker=None,
error_counter=None)
], mock_qfq.call_args_list)
@mock.patch('swiftclient.multithreading.QueueFunctionManager')
def test_queue_manager_with_args(self, mock_qfq):
thread_manager = mt.MultiThreadingManager()
mock_qfq.reset_mock()
mock_qfq.return_value = 'do run run'
self.assertEqual(
'do run run',
thread_manager.queue_manager(self._func, 88, 'fun', times='are',
connection_maker='abc', to='be had',
error_counter='def'))
self.assertEqual([
mock.call(self._func, 88, thread_manager, thread_args=('fun',),
thread_kwargs={'times': 'are', 'to': 'be had'},
connection_maker='abc', error_counter='def')
], mock_qfq.call_args_list)
def test_printers(self):
out_stream = six.StringIO()
err_stream = six.StringIO()
with mt.MultiThreadingManager(
print_stream=out_stream,
error_stream=err_stream) as thread_manager:
# Sanity-checking these gives power to the previous test which
# looked at the default values of thread_manager.print/error_stream
self.assertEqual(out_stream, thread_manager.print_stream)
self.assertEqual(err_stream, thread_manager.error_stream)
self.assertEqual(self.starting_thread_count + 2,
threading.active_count())
thread_manager.print_msg('one-argument')
thread_manager.print_msg('one %s, %d fish', 'fish', 88)
thread_manager.error('I have %d problems, but a %s is not one',
99, u'\u062A\u062A')
thread_manager.print_msg('some\n%s\nover the %r', 'where',
u'\u062A\u062A')
thread_manager.error('one-error-argument')
thread_manager.error('Sometimes\n%.1f%% just\ndoes not\nwork!',
3.14159)
self.assertEqual(self.starting_thread_count, threading.active_count())
out_stream.seek(0)
if six.PY3:
over_the = "over the '\u062a\u062a'\n"
else:
over_the = "over the u'\\u062a\\u062a'\n"
self.assertEqual([
'one-argument\n',
'one fish, 88 fish\n',
'some\n', 'where\n', over_the,
], list(out_stream.readlines()))
err_stream.seek(0)
first_item = u'I have 99 problems, but a \u062A\u062A is not one\n'
if six.PY2:
first_item = first_item.encode('utf8')
self.assertEqual([
first_item,
'one-error-argument\n',
'Sometimes\n', '3.1% just\n', 'does not\n', 'work!\n',
], list(err_stream.readlines()))
self.assertEqual(3, thread_manager.error_count)
if __name__ == '__main__':
testtools.main()
| zackmdavis/python-swiftclient | tests/test_multithreading.py | Python | apache-2.0 | 14,131 |
import concurrent
from concurrent.futures._base import Future
import json
from threading import Barrier
import time
import unittest
import requests_mock
from rpcclient.client import RpcClient
from rpcclient.deserialize import DictDeserializer
from rpcclient.exceptions import RemoteFailedError
from rpcclient.handlers import RequestHandler
from rpcclient.test.testutils import insert_id, create_mock_rpc_client
UNMAPPED_BEHAVIOUR = DictDeserializer.UnmappedBehaviour
__author__ = '[email protected]'
class ClientTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.client = create_mock_rpc_client()
def test_login(self):
self.assertEqual(self.client.token, "yea")
@requests_mock.mock()
def test_get_first_level_method(self, mock):
mock.register_uri('POST', "http://server/api/", status_code=200, json=insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}}),
)
self.client.test(arg1="arg")
request = mock.request_history[-1].json()
self.assertRegex(request['jsonrpc'], '2.0')
self.assertRegex(request['method'], 'test')
self.assertIn('token', request['params'])
self.assertRegex(request['params']['token'], 'yea')
self.assertIn('arg1', request['params'])
self.assertRegex(request['params']['arg1'], 'arg')
@requests_mock.mock()
def test_get_second_level_method(self, mock):
mock.register_uri('POST', "http://server/api/", status_code=200, json=insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}}),
)
self.client.test.level2(arg1="arg")
request = mock.request_history[-1].json()
self.assertRegex(request['jsonrpc'], '2.0')
self.assertRegex(request['method'], 'test.level2')
self.assertIn('token', request['params'])
self.assertRegex(request['params']['token'], 'yea')
self.assertIn('arg1', request['params'])
self.assertRegex(request['params']['arg1'], 'arg')
@requests_mock.mock()
def test_async_request(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
start_time = time.time()
interval_time = 2
response = self.client.test.task(_sleep_interval=interval_time)
self.assertEqual(response, {"report": "success"})
self.assertGreater(time.time() - start_time, interval_time, "Expected request to wait between calls")
last_request = mock.request_history[-1].json()
self.assertIn('method', last_request)
self.assertRegex(last_request['method'], 'report.data.get')
self.assertIn('params', last_request)
self.assertIn('report_token', last_request['params'])
self.assertRegex(last_request['params']['report_token'], "08d7d7bc608848668b3afa6b528a45d8")
@requests_mock.mock()
def test_async_timeout(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
self.assertRaises(TimeoutError, self.client.test.task, _timeout=3, _sleep_interval=2)
@requests_mock.mock()
def test_async_timeout_from_configuration(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
self.client.configuration['timeout'] = 3
self.client.configuration['sleep_interval'] = 2
self.assertRaises(TimeoutError, self.client.test.task)
@requests_mock.mock()
def test_async_handler_ignores_single_failure_for_status(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
interval_time = 1
response = self.client.test.task(_sleep_interval=interval_time)
self.assertEqual(response, {"report": "success"})
def test_override_handlers(self):
called_with_params = {}
class MockHandler(RequestHandler):
def __init__(self, method, url, headers, token, configuration=None, **kwargs):
super().__init__(method, url, headers, token, configuration, **kwargs)
called_with_params['method'] = method
def handle(self, **kwargs):
return 'Mock value'
client = RpcClient(configuration={
'host': 'http://mockhost',
'handlers': [
(lambda *args, **kwargs: True, MockHandler)
],
'login': 'False token',
'username': '',
'password': '',
})
self.assertEqual(client.some.method(arg1='Argument'), 'Mock value')
self.assertEqual(called_with_params['method'], 'some.method')
self.assertEqual(client.token, 'False token')
@requests_mock.mock()
def test_async_can_run_in_different_thread(self, mock):
b = Barrier(2, timeout=5)
def block_response(response_dict):
def callback(request, context):
b.wait()
body = request.body
request_json = json.loads(body)
response_dict['id'] = request_json['id']
context.status_code = 200
return response_dict
return callback
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': block_response(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
response = self.client.test.task(_sleep_interval=0.5, _async=True)
b.wait()
self.assertIsInstance(response, Future)
self.assertTrue(response.running())
done, not_done = concurrent.futures.wait([response], timeout=5)
self.assertGreater(len(done), 0)
self.assertIsInstance(response.result(), dict)
@requests_mock.mock()
def test_return_result(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
response = self.client.test(arg1="arg")
self.assertEqual(response, {"report": "success"})
@requests_mock.mock()
def test_return_list_result(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": [1, 2, 3]})},
])
response = self.client.test(arg1="arg")
self.assertListEqual(response, [1, 2, 3])
@requests_mock.mock()
def test_raises_error_on_none_200(self, mock):
mock.register_uri('POST', "http://server/api/", json=insert_id({
"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}
}, status_code=500))
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
@requests_mock.mock()
def test_raises_error_on_response_error(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id({
"error": 1, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}
})}
])
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
@requests_mock.mock()
def test_raises_error_on_result_error(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id({
"error": None, "jsonrpc": "2.0", "id": {},
"result": {"error": "true"}
})}
])
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
class AutoDeserializationTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.client = create_mock_rpc_client()
@requests_mock.mock()
def test_deserializer_passed_in_method(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
response = self.client.test(_deserializer=result_deserializer)
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_given_in_dictionary(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': {
'test': result_deserializer,
}
})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_given_in_dictionary_used_just_for_method(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': {
'test': result_deserializer,
}
})
response = client.test2()
self.assertNotIsInstance(response, Result)
self.assertEqual(response, {"report": "success"})
@requests_mock.mock()
def test_deserializer_from_factory(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': lambda method: result_deserializer if method == 'test' else None
})
response = client.test2()
self.assertNotIsInstance(response, Result)
self.assertEqual(response, {"report": "success"})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_global_from_conf(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': result_deserializer
})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
| ajillion-by-crossrider/ajillion-rpc-client | rpcclient/test/test_with_httpretty.py | Python | apache-2.0 | 17,368 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from resource_management.core.source import Template
from resource_management.core.logger import Logger
from yarn import yarn
from service import service
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HistoryServer(Script):
def get_component_name(self):
return "hadoop-mapreduce-historyserver"
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
yarn(name="historyserver")
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
conf_select.select(params.stack_name, "hadoop", params.version)
stack_select.select("hadoop-mapreduce-historyserver", params.version)
#Execute(format("iop-select set hadoop-mapreduce-historyserver {version}"))
#copy_tarballs_to_hdfs('mapreduce', 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
# MC Hammer said, "Can't touch this"
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
params.HdfsResource(None, action="execute")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
# MC Hammer said, "Can't touch this"
resource_created = copy_to_hdfs(
"mapreduce",
params.user_group,
params.hdfs_user,
skip=params.host_sys_prepped)
resource_created = copy_to_hdfs(
"slider",
params.user_group,
params.hdfs_user,
skip=params.host_sys_prepped) or resource_created
if resource_created:
params.HdfsResource(None, action="execute")
service('historyserver', action='start', serviceName='mapreduce')
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service('historyserver', action='stop', serviceName='mapreduce')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.mapred_historyserver_pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
expectations = {}
expectations.update(build_expectations('mapred-site',
None,
[
'mapreduce.jobhistory.keytab',
'mapreduce.jobhistory.principal',
'mapreduce.jobhistory.webapp.spnego-keytab-file',
'mapreduce.jobhistory.webapp.spnego-principal'
],
None))
security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
{'mapred-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ( 'mapred-site' not in security_params or
'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out(
{"securityIssuesFound": "Keytab file or principal not set."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.mapred_user,
security_params['mapred-site']['mapreduce.jobhistory.keytab'],
security_params['mapred-site']['mapreduce.jobhistory.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.mapred_user,
security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
if __name__ == "__main__":
HistoryServer().execute()
| alexryndin/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/package/scripts/historyserver.py | Python | apache-2.0 | 7,308 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for binary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with test_util.force_cpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = self.evaluate(tf_func(x, iny))
np_right = self.evaluate(tf_func(inx, y))
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate(variables.global_variables_initializer())
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = self.evaluate(tf_func(x, var_y))
np_var_right = self.evaluate(tf_func(var_x, y))
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {
dtypes_lib.float16: 1e-3,
dtypes_lib.float32: 1e-3,
dtypes_lib.complex64: 1e-2,
dtypes_lib.float64: 1e-5,
dtypes_lib.complex128: 1e-4
}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.zeta,
math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (math_ops.zeta, math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
@test_util.run_deprecated_v1
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(5, 6).astype(np.float32)
x2 = np.random.randn(5, 6).astype(np.float32)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.05] = 0.05 * np.sign(x1[np.abs(x1) < 0.05])
x2[np.abs(x2) < 0.05] = 0.05 * np.sign(x2[np.abs(x2) < 0.05])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta,
math_ops.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma,
math_ops.polygamma)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
@test_util.run_deprecated_v1
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
s = math_ops.reduce_sum(inx * iny)
gx, gy = sess.run(gradients_impl.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx,
np.array([1, 1, 2, 2]).reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate([var_x.initializer, var_y.initializer])
left_result = self.evaluate(var_x * y)
right_result = self.evaluate(x * var_y)
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
@test_util.run_deprecated_v1
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(7, 4).astype(np.float64)
x2 = np.random.randn(7, 4).astype(np.float64)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.5] = 0.5 * np.sign(x1[np.abs(x1) < 0.5])
x2[np.abs(x2) < 0.5] = 0.5 * np.sign(x2[np.abs(x2) < 0.5])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testUint8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint8)
self._compareBoth(x, y, np.add, math_ops.add)
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testUint16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testUint32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint32)
self._compareBoth(x, y, np.add, math_ops.add_v2)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
@test_util.run_deprecated_v1
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
@test_util.run_deprecated_v1
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with test_util.force_cpu():
cmp_eq = math_ops.equal(x, y)
cmp_not_eq = math_ops.not_equal(x, y)
values = self.evaluate([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"], ["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]],
dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"], ["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]],
dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
# TODO(aselle): Make the test work for dtypes:
# (np.complex64, np.complex128).
if tf_func not in (_FLOORDIV, math_ops.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, math_ops.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, math_ops.subtract),
(np.subtract, _SUB),
(np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, math_ops.multiply),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, math_ops.truediv),
(np.floor_divide, math_ops.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
@test_util.run_deprecated_v1
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testMismatchedDimensions(self):
for func in [
math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.div, _ADD,
_SUB, _MUL, _TRUEDIV, _FLOORDIV
]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_deprecated_v1
def testZeroPowGrad(self):
with self.cached_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = constant_op.constant(0.0, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testComplexPowGrad(self):
with self.cached_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = constant_op.constant(base, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
def testAtan2SpecialValues(self):
x1l, x2l = zip((+0.0, +0.0), (+0.0, -0.0), (-0.0, +0.0), (-0.0, -0.0),
(1.2345, float("inf")), (1.2345, -float("inf")),
(-4.321, float("inf")), (-4.125, -float("inf")),
(float("inf"), float("inf")), (float("inf"), -float("inf")),
(-float("inf"), float("inf")),
(-float("inf"), -float("inf")))
for dtype in np.float32, np.float64:
x1 = np.array(x1l).astype(dtype)
x2 = np.array(x2l).astype(dtype)
self._compareCpu(x1, x2, np.arctan2, math_ops.atan2)
self._compareGpu(x1, x2, np.arctan2, math_ops.atan2)
def testPowNegativeExponent(self):
for dtype in [np.int32, np.int64]:
with test_util.force_cpu():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([-2, 3]).astype(dtype)
self.evaluate(math_ops.pow(x, y))
with test_util.force_cpu():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([2, -3]).astype(dtype)
self.evaluate(math_ops.pow(x, y))
with test_util.force_cpu():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = -3
self.evaluate(math_ops.pow(x, y))
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Incompatible shapes|Dimensions must be equal"):
f(x.astype(t), y.astype(t))
def testEqualDType(self):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.bool,
]
x = np.asarray([0, 1, 2, 3, 4])
y = np.asarray([0, 1, 2, 3, 4])
for dtype in dtypes:
xt = x.astype(dtype)
yt = y.astype(dtype)
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
for dtype in [np.complex64, np.complex128]:
xt = x.astype(dtype)
xt -= 1j * xt
yt = y.astype(dtype)
yt -= 1j * yt
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
def testEqualQuantizeDType(self):
dtypes = [
dtypes_lib.qint8,
dtypes_lib.qint16,
dtypes_lib.quint8,
dtypes_lib.quint16,
]
x = np.asarray([0, 1, 2, 3, 4])
y = np.asarray([0, 1, 2, 3, 4])
for dtype in dtypes:
xt = x.astype(dtype.as_numpy_dtype)
yt = y.astype(dtype.as_numpy_dtype)
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
if __name__ == "__main__":
test.main()
| davidzchen/tensorflow | tensorflow/python/kernel_tests/cwise_ops_binary_test.py | Python | apache-2.0 | 36,605 |
#!/usr/bin/env python
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Helper module
#
# =============================================================================
import argparse, sys
from confluent_kafka import avro, KafkaError
from confluent_kafka.admin import AdminClient, NewTopic
from uuid import uuid4
#import certifi
name_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Name",
"type": "record",
"fields": [
{"name": "name", "type": "string"}
]
}
"""
class Name(object):
"""
Name stores the deserialized Avro record for the Kafka key.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["name", "id"]
def __init__(self, name=None):
self.name = name
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_name(obj, ctx):
return Name(obj['name'])
@staticmethod
def name_to_dict(name, ctx):
return Name.to_dict(name)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(name=self.name)
# Schema used for serializing Count class, passed in as the Kafka value
count_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Count",
"type": "record",
"fields": [
{"name": "count", "type": "int"}
]
}
"""
class Count(object):
"""
Count stores the deserialized Avro record for the Kafka value.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["count", "id"]
def __init__(self, count=None):
self.count = count
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_count(obj, ctx):
return Count(obj['count'])
@staticmethod
def count_to_dict(count, ctx):
return Count.to_dict(count)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(count=self.count)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Confluent Python Client example to produce messages \
to Confluent Cloud")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-f',
dest="config_file",
help="path to Confluent Cloud configuration file",
required=True)
required.add_argument('-t',
dest="topic",
help="topic name",
required=True)
args = parser.parse_args()
return args
def read_ccloud_config(config_file):
"""Read Confluent Cloud configuration for librdkafka clients"""
conf = {}
with open(config_file) as fh:
for line in fh:
line = line.strip()
if len(line) != 0 and line[0] != "#":
parameter, value = line.strip().split('=', 1)
conf[parameter] = value.strip()
#conf['ssl.ca.location'] = certifi.where()
return conf
def pop_schema_registry_params_from_config(conf):
"""Remove potential Schema Registry related configurations from dictionary"""
conf.pop('schema.registry.url', None)
conf.pop('basic.auth.user.info', None)
conf.pop('basic.auth.credentials.source', None)
return conf
def create_topic(conf, topic):
"""
Create a topic if needed
Examples of additional admin API functionality:
https://github.com/confluentinc/confluent-kafka-python/blob/master/examples/adminapi.py
"""
admin_client_conf = pop_schema_registry_params_from_config(conf.copy())
a = AdminClient(admin_client_conf)
fs = a.create_topics([NewTopic(
topic,
num_partitions=1,
replication_factor=3
)])
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} created".format(topic))
except Exception as e:
# Continue if error code TOPIC_ALREADY_EXISTS, which may be true
# Otherwise fail fast
if e.args[0].code() != KafkaError.TOPIC_ALREADY_EXISTS:
print("Failed to create topic {}: {}".format(topic, e))
sys.exit(1)
| confluentinc/examples | clients/cloud/python/ccloud_lib.py | Python | apache-2.0 | 5,500 |
from django.shortcuts import redirect
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.decorators import login_required
from registration.backends import get_backend
def register(request, backend='default', template_name='registration/registration_form.html'):
backend = get_backend(backend)
# determine is registration is currently allowed. the ``request`` object
# is passed which can be used to selectively disallow registration based on
# the user-agent
if not backend.registration_allowed(request):
return redirect(*backend.registration_closed_redirect(request))
form_class = backend.get_registration_form_class(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
user = backend.register(request, form)
return redirect(backend.post_registration_redirect(request, user))
else:
form = form_class()
return render(request, template_name, {'form': form})
@never_cache
def verify(request, backend='default', template_name='registration/registration_verify.html', **kwargs):
backend = get_backend(backend)
profile = backend.get_profile(request, **kwargs)
if profile:
# check to see if moderation for this profile is required and whether or
# not it is a verified account.
if backend.moderation_required(request, profile):
moderation_required = True
backend.verify(request, profile, **kwargs)
else:
moderation_required = False
# attempt to activate this user
backend.activate(request, profile, **kwargs)
else:
moderation_required = None
return render(request, template_name, {
'profile': profile,
'moderation_required': moderation_required,
})
@never_cache
@login_required()
def moderate(request, backend='default', template_name='registration/registration_moderate.html', **kwargs):
backend = get_backend(backend)
profile = backend.get_profile(request, **kwargs)
form_class = backend.get_moderation_form_class(request)
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
backend.moderate(request, form, profile, **kwargs)
return redirect(backend.post_moderation_redirect(request, profile))
else:
form = form_class()
return render(request, template_name, {
'form': form,
'profile': profile,
})
@permission_required('registration.change_registrationprofile')
@login_required()
def moderate_list(request, backend='default', template_name='registration/registration_moderate_list.html'):
backend = get_backend(backend)
profiles = backend.get_unmoderated_profiles(request)
return render(request, template_name, {
'profiles': profiles,
})
| chop-dbhi/biorepo-portal | registration/views.py | Python | bsd-2-clause | 3,005 |
import happybase
from StringIO import StringIO
from PIL import Image
def decode_image_PIL(binary_data):
""" Returns PIL image from binary buffer.
"""
f = StringIO(binary_data)
img = Image.open(f)
return img
if __name__=="__main__":
tab_image = 'image_cache'
col_image = dict()
col_image['image_cache'] = 'image:binary'
conn = happybase.Connection(host='10.1.94.57')
image_rows = dict()
image_rows['image_cache'] = ['0000007031E3FA80C97940017253BEAB542EA334', '000001EC5DD154E58B72326EFC26A41A4C8E9586',
'0000081A1D6D1A2023DAE07547C242ED3106E7FE']
table = conn.table(tab_image)
for row in table.rows(image_rows[tab_image]):
binary_data = row[1][col_image[tab_image]]
img = decode_image_PIL(binary_data)
print("Saving image to: {}".format(row[0]+'.jpeg'))
img.save(row[0]+'.jpeg',"JPEG")
| svebk/DeepSentiBank_memex | scripts/tests/deprecated/read_image_from_hbase.py | Python | bsd-2-clause | 875 |
import os
import json
import tempfile
import urllib, urllib2
import requests
from indra.java_vm import autoclass, JavaException
import indra.databases.pmc_client as pmc_client
from processor import ReachProcessor
def process_pmc(pmc_id):
xml_str = pmc_client.get_xml(pmc_id)
with tempfile.NamedTemporaryFile() as fh:
fh.write(xml_str)
fh.flush()
rp = process_nxml(fh.name)
return rp
def process_text(txt, use_tempdir=False, offline=False):
if offline:
nxml_txt = '<article><body><sec><p>%s</p></sec></body></article>' % txt
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.file.write(nxml_txt)
tmp_file.file.flush()
return process_nxml(tmp_file.name)
else:
url = 'http://agathon.sista.arizona.edu:8080/odinweb/api/text'
req = urllib2.Request(url, data=urllib.urlencode({'text': txt}))
res = urllib2.urlopen(req)
json_str = res.read()
json_dict = json.loads(json_str)
events_dict = json_dict['events']
events_json_str = json.dumps(events_dict, indent=1)
with open('reach_output.json', 'wt') as fh:
fh.write(json_str)
return process_json_str(events_json_str)
def process_nxml(file_name, use_tempdir=False, offline=False):
if offline:
base = os.path.basename(file_name)
file_id = os.path.splitext(base)[0]
if use_tempdir:
tmp_dir = tempfile.mkdtemp()
else:
tmp_dir = '.'
try:
paper_reader = autoclass('edu.arizona.sista.reach.ReadPaper')
paper_reader.main([file_name, tmp_dir])
except JavaException:
print 'Could not process file %s.' % file_name
return None
json_file_name = os.path.join(tmp_dir, file_id + '.uaz.events.json')
return process_json_file(json_file_name)
else:
url = 'http://agathon.sista.arizona.edu:8080/odinweb/api/nxml'
txt = open(file_name, 'rt').read()
req = urllib2.Request(url, data=urllib.urlencode({'nxml': txt}))
res = urllib2.urlopen(req)
json_str = res.read()
json_dict = json.loads(json_str)
return process_json_str(json_str, events_only=False)
def process_json_file(file_name):
try:
with open(file_name, 'rt') as fh:
json_str = fh.read()
return process_json_str(json_str)
except IOError:
print 'Could not read file %s.' % file_name
def process_json_str(json_str, events_only=True):
if not events_only:
json_dict = json.loads(json_str)
events_dict = json_dict['events']
events_json_str = json.dumps(events_dict, indent=1)
else:
events_json_str = json_str
events_json_str = events_json_str.replace('frame-id','frame_id')
events_json_str = events_json_str.replace('argument-label','argument_label')
events_json_str = events_json_str.replace('object-meta','object_meta')
events_json_str = events_json_str.replace('doc-id','doc_id')
json_dict = json.loads(events_json_str)
rp = ReachProcessor(json_dict)
rp.get_phosphorylation()
rp.get_complexes()
return rp
if __name__ == '__main__':
rp = process_json_file('PMC0000001.uaz.events.json')
| decarlin/indra | indra/reach/reach_api.py | Python | bsd-2-clause | 3,264 |
import copy
import re
import sys
import tempfile
import unittest
from mock.tests.support import ALWAYS_EQ
from mock.tests.support import is_instance
from mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, AsyncMock,
create_autospec, mock
)
from mock.mock import _Call, _CallList
import mock.mock as mock_module
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Something(object):
def meth(self, a, b, c, d=None): pass
@classmethod
def cmeth(cls, a, b, c, d=None): pass
@staticmethod
def smeth(a, b, c, d=None): pass
def something(a): pass
class MockTest(unittest.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from mock.mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertNotIn('_items', mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_change_return_value_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.return_value = 1
self.assertEqual(mock(), 1)
def test_change_side_effect_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.side_effect = TypeError()
with self.assertRaises(TypeError):
mock()
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def test_autospec_side_effect(self):
# Test for issue17826
results = [1, 2, 3]
def effect():
return results.pop()
def f(): pass
mock = create_autospec(f)
mock.side_effect = [1, 2, 3]
self.assertEqual([mock(), mock(), mock()], [1, 2, 3],
"side effect not used correctly in create_autospec")
# Test where side effect is a callable
results = [1, 2, 3]
mock = create_autospec(f)
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"callable side effect not used correctly")
def test_autospec_side_effect_exception(self):
# Test for issue 23661
def f(): pass
mock = create_autospec(f)
mock.side_effect = ValueError('Bazinga!')
self.assertRaisesRegex(ValueError, 'Bazinga!', mock)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_reset_mock_on_mock_open_issue_18622(self):
a = mock.mock_open()
a.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incorrect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args.args, (sentinel.Arg,),
"call_args not set")
self.assertEqual(mock.call_args.kwargs, {},
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
self.assertEqual(mock.call_args.args, (sentinel.Arg,))
self.assertEqual(mock.call_args.kwargs, {"kw": sentinel.Kwarg})
# Comparing call_args to a long sequence should not raise
# an exception. See issue 24857.
self.assertFalse(mock.call_args == "a long sequence")
def test_calls_equal_with_any(self):
# Check that equality and non-equality is consistent even when
# comparing with mock.ANY
mm = mock.MagicMock()
self.assertTrue(mm == mm)
self.assertFalse(mm != mm)
self.assertFalse(mm == mock.MagicMock())
self.assertTrue(mm != mock.MagicMock())
self.assertTrue(mm == mock.ANY)
self.assertFalse(mm != mock.ANY)
self.assertTrue(mock.ANY == mm)
self.assertFalse(mock.ANY != mm)
self.assertTrue(mm == ALWAYS_EQ)
self.assertFalse(mm != ALWAYS_EQ)
call1 = mock.call(mock.MagicMock())
call2 = mock.call(mock.ANY)
self.assertTrue(call1 == call2)
self.assertFalse(call1 != call2)
self.assertTrue(call2 == call1)
self.assertFalse(call2 != call1)
self.assertTrue(call1 == ALWAYS_EQ)
self.assertFalse(call1 != ALWAYS_EQ)
self.assertFalse(call1 == 1)
self.assertTrue(call1 != 1)
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_with_any(self):
m = MagicMock()
m(MagicMock())
m.assert_called_with(mock.ANY)
def test_assert_called_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_called_with_method_spec(self):
def _check(mock):
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
mock = Mock(spec=Something().meth)
_check(mock)
mock = Mock(spec=Something.cmeth)
_check(mock)
mock = Mock(spec=Something().cmeth)
_check(mock)
mock = Mock(spec=Something.smeth)
_check(mock)
mock = Mock(spec=Something().smeth)
_check(mock)
def test_assert_called_exception_message(self):
msg = "Expected '{0}' to have been called"
with self.assertRaisesRegex(AssertionError, msg.format('mock')):
Mock().assert_called()
with self.assertRaisesRegex(AssertionError, msg.format('test_name')):
Mock(name="test_name").assert_called()
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_assert_called_once_with_call_list(self):
m = Mock()
m(1)
m(2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1), call(2)]"),
lambda: m.assert_called_once_with(2))
def test_assert_called_once_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_once_with(1, 2, 3)
mock.assert_called_once_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_once_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
# Mock called more than once => always fails
mock(4, 5, 6)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, 2, 3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
4, 5, 6)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self): pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_prevents_automatic_creation_of_mocks(self):
class Real(object):
pass
real = Real()
mock = Mock(wraps=real)
self.assertRaises(AttributeError, lambda: mock.new_attr())
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_customize_wrapped_object_with_side_effect_iterable_with_default(self):
class Real(object):
def method(self):
return sentinel.ORIGINAL_VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.ORIGINAL_VALUE)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_iterable(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_exception(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = RuntimeError
self.assertRaises(RuntimeError, mock.method)
def test_customize_wrapped_object_with_side_effect_function(self):
class Real(object):
def method(self): pass
def side_effect():
return sentinel.VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = side_effect
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect(self):
# side_effect should always take precedence over return_value.
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
mock.method.return_value = sentinel.WRONG_VALUE
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_return_value_and_side_effect2(self):
# side_effect can return DEFAULT to default to return_value
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = lambda: DEFAULT
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect_default(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
mock.method.return_value = sentinel.RETURN
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.RETURN)
self.assertRaises(StopIteration, mock.method)
def test_magic_method_wraps_dict(self):
# bpo-25597: MagicMock with wrap doesn't call wrapped object's
# method for magic methods with default values.
data = {'foo': 'bar'}
wrapped_dict = MagicMock(wraps=data)
self.assertEqual(wrapped_dict.get('foo'), 'bar')
# Accessing key gives a MagicMock
self.assertIsInstance(wrapped_dict['foo'], MagicMock)
# __contains__ method has a default value of False
self.assertFalse('foo' in wrapped_dict)
# return_value is non-sentinel and takes precedence over wrapped value.
wrapped_dict.get.return_value = 'return_value'
self.assertEqual(wrapped_dict.get('foo'), 'return_value')
# return_value is sentinel and hence wrapped value is returned.
wrapped_dict.get.return_value = sentinel.DEFAULT
self.assertEqual(wrapped_dict.get('foo'), 'bar')
self.assertEqual(wrapped_dict.get('baz'), None)
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
data['baz'] = 'spam'
self.assertEqual(wrapped_dict.get('baz'), 'spam')
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
del data['baz']
self.assertEqual(wrapped_dict.get('baz'), None)
def test_magic_method_wraps_class(self):
class Foo:
def __getitem__(self, index):
return index
def __custom_method__(self):
return "foo"
klass = MagicMock(wraps=Foo)
obj = klass()
self.assertEqual(obj.__getitem__(2), 2)
self.assertEqual(obj[2], 2)
self.assertEqual(obj.__custom_method__(), "foo")
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegex(AssertionError, 'not called',
mock.assert_called_with)
def test_assert_called_once_with_message(self):
mock = Mock(name='geoffrey')
self.assertRaisesRegex(AssertionError,
r"Expected 'geoffrey' to be called once\.",
mock.assert_called_once_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_spec_class_no_object_base(self):
class X:
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
self._increment_mock_call(a)
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
def test_dir_from_spec(self):
mock = Mock(spec=unittest.TestCase)
testcase_attrs = set(dir(unittest.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_dir_does_not_include_deleted_attributes(self):
mock = Mock()
mock.child.return_value = 1
self.assertIn('child', dir(mock))
del mock.child
self.assertNotIn('child', dir(mock))
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
with self.assertRaises(exception) as context:
func(*args, **kwargs)
msg = str(context.exception)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
actual = 'not called.'
expected = "mock(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_child_mock_call_equal(self):
m = Mock()
result = m()
result.wibble()
# parent looks like this:
self.assertEqual(m.mock_calls, [call(), call().wibble()])
# but child should look like this:
self.assertEqual(result.mock_calls, [call.wibble()])
def test_mock_call_not_equal_leaf(self):
m = Mock()
m.foo().something()
self.assertNotEqual(m.mock_calls[1], call.foo().different())
self.assertEqual(m.mock_calls[0], call.foo())
def test_mock_call_not_equal_non_leaf(self):
m = Mock()
m.foo().bar()
self.assertNotEqual(m.mock_calls[1], call.baz().bar())
self.assertNotEqual(m.mock_calls[0], call.baz())
def test_mock_call_not_equal_non_leaf_params_different(self):
m = Mock()
m.foo(x=1).bar()
# This isn't ideal, but there's no way to fix it without breaking backwards compatibility:
self.assertEqual(m.mock_calls[1], call.foo(x=2).bar())
def test_mock_call_not_equal_non_leaf_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.baz.bar())
def test_mock_call_not_equal_non_leaf_call_versus_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.foo().bar())
def test_mock_call_repr(self):
m = Mock()
m.foo().bar().baz.bob()
self.assertEqual(repr(m.mock_calls[0]), 'call.foo()')
self.assertEqual(repr(m.mock_calls[1]), 'call.foo().bar()')
self.assertEqual(repr(m.mock_calls[2]), 'call.foo().bar().baz.bob()')
def test_mock_call_repr_loop(self):
m = Mock()
m.foo = m
repr(m.foo())
self.assertRegex(repr(m.foo()), r"<Mock name='mock\(\)' id='\d+'>")
def test_mock_calls_contains(self):
m = Mock()
self.assertFalse([call()] in m.mock_calls)
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
self.assertEqual(mock.call_args.args, (2,))
self.assertEqual(mock.call_args.kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_default(self):
mock = Mock(return_value=2)
mock.side_effect = iter([1, DEFAULT])
self.assertEqual([mock(), mock()], [1, 2])
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_has_calls_nested_spec(self):
class Something:
def __init__(self): pass
def meth(self, a, b, c, d=None): pass
class Foo:
def __init__(self, a): pass
def meth1(self, a, b): pass
mock_class = create_autospec(Something)
for m in [mock_class, mock_class()]:
m.meth(1, 2, 3, d=1)
m.assert_has_calls([call.meth(1, 2, 3, d=1)])
m.assert_has_calls([call.meth(1, 2, 3, 1)])
mock_class.reset_mock()
for m in [mock_class, mock_class()]:
self.assertRaises(AssertionError, m.assert_has_calls, [call.Foo()])
m.Foo(1).meth1(1, 2)
m.assert_has_calls([call.Foo(1), call.Foo(1).meth1(1, 2)])
m.Foo.assert_has_calls([call(1), call().meth1(1, 2)])
mock_class.reset_mock()
invalid_calls = [call.meth(1),
call.non_existent(1),
call.Foo().non_existent(1),
call.Foo().meth(1, 2, 3, 4)]
for kall in invalid_calls:
self.assertRaises(AssertionError,
mock_class.assert_has_calls,
[kall]
)
def test_assert_has_calls_nested_without_spec(self):
m = MagicMock()
m().foo().bar().baz()
m.one().two().three()
calls = call.one().two().three().call_list()
m.assert_has_calls(calls)
def test_assert_has_calls_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock(10, 11, c=12)
calls = [
('', (1, 2, 3), {}),
('', (4, 5, 6), {'d': 7}),
((10, 11, 12), {}),
]
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
# Reversed order
calls = list(reversed(calls))
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
def test_assert_has_calls_not_matching_spec_error(self):
def f(x=None): pass
mock = Mock(spec=f)
mock(1)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape('Calls not found.\n'
'Expected: [call()]\n'
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call()])
self.assertIsNone(cm.exception.__cause__)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape(
'Error processing expected calls.\n'
"Errors: [None, TypeError('too many positional arguments')]\n"
"Expected: [call(), call(1, 2)]\n"
'Actual: [call(1)]').replace(
"arguments\\'", "arguments\\',?"
))) as cm:
mock.assert_has_calls([call(), call(1, 2)])
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_assert_any_call_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock.assert_any_call(1, 2, 3)
mock.assert_any_call(a=1, b=2, c=3)
mock.assert_any_call(4, 5, 6, 7)
mock.assert_any_call(a=4, b=5, c=6, d=7)
self.assertRaises(AssertionError, mock.assert_any_call,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_any_call(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_mock_calls_create_autospec(self):
def f(a, b): pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
#Issue21222
def test_create_autospec_with_name(self):
m = mock.create_autospec(object(), name='sweet_func')
self.assertIn('sweet_func', repr(m))
#Issue23078
def test_create_autospec_classmethod_and_staticmethod(self):
class TestClass:
@classmethod
def class_method(cls): pass
@staticmethod
def static_method(): pass
for method in ('class_method', 'static_method'):
with self.subTest(method=method):
mock_method = mock.create_autospec(getattr(TestClass, method))
mock_method()
mock_method.assert_called_once_with()
self.assertRaises(TypeError, mock_method, 'extra_arg')
#Issue21238
def test_mock_unsafe(self):
m = Mock()
msg = "Attributes cannot start with 'assert' or its misspellings"
with self.assertRaisesRegex(AttributeError, msg):
m.assert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assret_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.asert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.aseert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assrt_foo_call()
m = Mock(unsafe=True)
m.assert_foo_call()
m.assret_foo_call()
m.asert_foo_call()
m.aseert_foo_call()
m.assrt_foo_call()
#Issue21262
def test_assert_not_called(self):
m = Mock()
m.hello.assert_not_called()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_not_called()
def test_assert_not_called_message(self):
m = Mock()
m(1, 2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2)]"),
m.assert_not_called)
def test_assert_called(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called()
m.hello()
m.hello.assert_called()
m.hello()
m.hello.assert_called()
def test_assert_called_once(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
m.hello()
m.hello.assert_called_once()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
def test_assert_called_once_message(self):
m = Mock()
m(1, 2)
m(3)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2), call(3)]"),
m.assert_called_once)
def test_assert_called_once_message_not_called(self):
m = Mock()
with self.assertRaises(AssertionError) as e:
m.assert_called_once()
self.assertNotIn("Calls:", str(e.exception))
#Issue37212 printout of keyword args now preserves the original order
def test_ordered_call_signature(self):
m = Mock()
m.hello(name='hello', daddy='hero')
text = "call(name='hello', daddy='hero')"
self.assertEqual(repr(m.hello.call_args), text)
#Issue21270 overrides tuple methods for mock.call objects
def test_override_tuple_methods(self):
c = call.count()
i = call.index(132,'hello')
m = Mock()
m.count()
m.index(132,"hello")
self.assertEqual(m.method_calls[0], c)
self.assertEqual(m.method_calls[1], i)
def test_reset_return_sideeffect(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True, side_effect=True)
self.assertIsInstance(m.return_value, Mock)
self.assertEqual(m.side_effect, None)
def test_reset_return(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True)
self.assertIsInstance(m.return_value, Mock)
self.assertNotEqual(m.side_effect, None)
def test_reset_sideeffect(self):
m = Mock(return_value=10, side_effect=[2, 3])
m.reset_mock(side_effect=True)
self.assertEqual(m.return_value, 10)
self.assertEqual(m.side_effect, None)
def test_reset_return_with_children(self):
m = MagicMock(f=MagicMock(return_value=1))
self.assertEqual(m.f(), 1)
m.reset_mock(return_value=True)
self.assertNotEqual(m.f(), 1)
def test_reset_return_with_children_side_effect(self):
m = MagicMock(f=MagicMock(side_effect=[2, 3]))
self.assertNotEqual(m.f.side_effect, None)
m.reset_mock(side_effect=True)
self.assertEqual(m.f.side_effect, None)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in (NonCallableMock, Mock, MagicMock, NonCallableMagicMock,
AsyncMock):
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_mock_open_reuse_issue_21750(self):
mocked_open = mock.mock_open(read_data='data')
f1 = mocked_open('a-name')
f1_data = f1.read()
f2 = mocked_open('another-name')
f2_data = f2.read()
self.assertEqual(f1_data, f2_data)
def test_mock_open_dunder_iter_issue(self):
# Test dunder_iter method generates the expected result and
# consumes the iterator.
mocked_open = mock.mock_open(read_data='Remarkable\nNorwegian Blue')
f1 = mocked_open('a-name')
lines = [line for line in f1]
self.assertEqual(lines[0], 'Remarkable\n')
self.assertEqual(lines[1], 'Norwegian Blue')
self.assertEqual(list(f1), [])
def test_mock_open_using_next(self):
mocked_open = mock.mock_open(read_data='1st line\n2nd line\n3rd line')
f1 = mocked_open('a-name')
line1 = next(f1)
line2 = f1.__next__()
lines = [line for line in f1]
self.assertEqual(line1, '1st line\n')
self.assertEqual(line2, '2nd line\n')
self.assertEqual(lines[0], '3rd line')
self.assertEqual(list(f1), [])
with self.assertRaises(StopIteration):
next(f1)
def test_mock_open_next_with_readline_with_return_value(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.return_value = 'abc'
self.assertEqual('abc', next(mopen()))
def test_mock_open_write(self):
# Test exception in file writing write()
mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV'))
with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp):
mock_filehandle = mock_namedtemp.return_value
mock_write = mock_filehandle.write
mock_write.side_effect = OSError('Test 2 Error')
def attempt():
tempfile.NamedTemporaryFile().write('asd')
self.assertRaises(OSError, attempt)
def test_mock_open_alter_readline(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.side_effect = lambda *args:'abc'
first = mopen().readline()
second = mopen().readline()
self.assertEqual('abc', first)
self.assertEqual('abc', second)
def test_mock_open_after_eof(self):
# read, readline and readlines should work after end of file.
_open = mock.mock_open(read_data='foo')
h = _open('bar')
h.read()
self.assertEqual('', h.read())
self.assertEqual('', h.read())
self.assertEqual('', h.readline())
self.assertEqual('', h.readline())
self.assertEqual([], h.readlines())
self.assertEqual([], h.readlines())
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attach_mock_patch_autospec(self):
parent = Mock()
with mock.patch(f'{__name__}.something', autospec=True) as mock_func:
self.assertEqual(mock_func.mock._extract_mock_name(), 'something')
parent.attach_mock(mock_func, 'child')
parent.child(1)
something(2)
mock_func(3)
parent_calls = [call.child(1), call.child(2), call.child(3)]
child_calls = [call(1), call(2), call(3)]
self.assertEqual(parent.mock_calls, parent_calls)
self.assertEqual(parent.child.mock_calls, child_calls)
self.assertEqual(something.mock_calls, child_calls)
self.assertEqual(mock_func.mock_calls, child_calls)
self.assertIn('mock.child', repr(parent.child.mock))
self.assertEqual(mock_func.mock._extract_mock_name(), 'mock.child')
def test_attach_mock_patch_autospec_signature(self):
with mock.patch(f'{__name__}.Something.meth', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_meth')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_meth(mock.ANY, 1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
mocked.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
with mock.patch(f'{__name__}.something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_func')
something(1)
manager.assert_has_calls([call.attach_func(1)])
something.assert_has_calls([call(1)])
mocked.assert_has_calls([call(1)])
with mock.patch(f'{__name__}.Something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_obj')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_obj(),
call.attach_obj().meth(1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(1, 2, 3, d=4)])
mocked.assert_has_calls([call(), call().meth(1, 2, 3, d=4)])
def test_attribute_deletion(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_mock_does_not_raise_on_repeated_attribute_deletion(self):
# bpo-20239: Assigning and deleting twice an attribute raises.
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
mock.foo = 3
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 3)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
mock.foo = 4
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 4)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
def test_mock_raises_when_deleting_nonexistent_attribute(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
del mock.foo
with self.assertRaises(AttributeError):
del mock.foo
def test_reset_mock_does_not_raise_on_attr_deletion(self):
# bpo-31177: reset_mock should not raise AttributeError when attributes
# were deleted in a mock instance
mock = Mock()
mock.child = True
del mock.child
mock.reset_mock()
self.assertFalse(hasattr(mock, 'child'))
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
mock.foo
def test_name_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".name")
self.assertIsNotNone(call.name)
self.assertEqual(type(call.name), _Call)
self.assertEqual(type(call.name().name), _Call)
def test_parent_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".parent")
self.assertIsNotNone(call.parent)
self.assertEqual(type(call.parent), _Call)
self.assertEqual(type(call.parent().parent), _Call)
def test_parent_propagation_with_create_autospec(self):
def foo(a, b): pass
mock = Mock()
mock.child = create_autospec(foo)
mock.child(1, 2)
self.assertRaises(TypeError, mock.child, 1)
self.assertEqual(mock.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(mock.child.mock))
def test_parent_propagation_with_autospec_attach_mock(self):
def foo(a, b): pass
parent = Mock()
parent.attach_mock(create_autospec(foo, name='bar'), 'child')
parent.child(1, 2)
self.assertRaises(TypeError, parent.child, 1)
self.assertEqual(parent.child.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(parent.child.mock))
def test_isinstance_under_settrace(self):
# bpo-36593 : __class__ is not set for a class that has __class__
# property defined when it's used with sys.settrace(trace) set.
# Delete the module to force reimport with tracing function set
# restore the old reference later since there are other tests that are
# dependent on unittest.mock.patch. In testpatch.PatchTest
# test_patch_dict_test_prefix and test_patch_test_prefix not restoring
# causes the objects patched to go out of sync
old_patch = mock_module.patch
# Directly using __setattr__ on unittest.mock causes current imported
# reference to be updated. Use a lambda so that during cleanup the
# re-imported new reference is updated.
self.addCleanup(lambda patch: setattr(mock_module, 'patch', patch),
old_patch)
with patch.dict('sys.modules'):
del sys.modules['mock']
# This trace will stop coverage being measured ;-)
def trace(frame, event, arg): # pragma: no cover
return trace
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(trace)
from mock.mock import (
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
)
mocks = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock, AsyncMock
]
for mock in mocks:
obj = mock(spec=Something)
self.assertIsInstance(obj, Something)
def test_bool_not_called_when_passing_spec_arg(self):
class Something:
def __init__(self):
self.obj_with_bool_func = mock_module.MagicMock()
obj = Something()
with mock_module.patch.object(obj, 'obj_with_bool_func', autospec=True): pass
self.assertEqual(obj.obj_with_bool_func.__bool__.call_count, 0)
if __name__ == '__main__':
unittest.main()
| testing-cabal/mock | mock/tests/testmock.py | Python | bsd-2-clause | 72,280 |
import logging
from redash.query_runner import *
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json_dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
})
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
| chriszs/redash | redash/query_runner/influx_db.py | Python | bsd-2-clause | 2,575 |
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from formhelpers2.views import comment
request = testing.DummyRequest()
info = comment(request)
self.assertTrue(hasattr(info['forms'], 'comment_form'))
| tholo/formhelpers2 | formhelpers2/tests.py | Python | bsd-2-clause | 406 |
"""
Python module presenting an API to an ELM327 serial interface
(C) 2015 Jamie Fraser <[email protected]>
http://github.com/fwaggle/pyELM327
Please see License.txt and Readme.md.
"""
# Pretty much taken from https://en.wikipedia.org/wiki/OBD-II_PIDs
__pids ={
0x01: {
# TODO: ignoring fuel system #2 atm
0x03: {
'Name': 'Fuel system status',
'Units': '',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: decode_0x03(int(m.group(1),16))},
0x04: {
'Name': 'Calculated engine load value',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x05: {
'Name': 'Engine coolant temperature',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
0x06: {
'Name': 'Short term fuel % trim - Bank 1',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x07: {
'Name': 'Long term fuel % trim - Bank 1',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x08: {
'Name': 'Short term fuel % trim - Bank 2',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x09: {
'Name': 'Long term fuel % trim - Bank 2',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x0A: {
'Name': 'Fuel pressure',
'Units': 'kPa (gauge)',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 3 },
0x0B: {
'Name': 'Intake manifold absolute pressure',
'Units': 'kPa (absolute)',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x0C: {
'Name': 'Engine RPM',
'Units': 'RPM',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16))/4.0},
0x0D: {
'Name': 'Vehicle speed',
'Units': 'km/h',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x0E: {
'Name': 'Timing advance',
'Units': '* rel #1 cylinder',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) - 128) / 2.0 },
0x0F: {
'Name': 'Intake air temperature',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
0x10: {
'Name': 'MAF Sensor air flow rate',
'Units': 'grams/sec',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16))/100.0},
0x11: {
'Name': 'Throttle position',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x12: {
'Name': 'Commanded secondary air status',
'Units': 'Bit-encoded',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x13: {
'Name': 'Oxygen sensors present',
'Units': 'Bit-encoded',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
# NOTE: We currently throw away the fuel trim readings for these PIDs
0x14: {
'Name': 'Bank 1, Sensor 1: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x15: {
'Name': 'Bank 1, Sensor 2: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x16: {
'Name': 'Bank 1, Sensor 3: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x17: {
'Name': 'Bank 1, Sensor 4 Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x18: {
'Name': 'Bank 2, Sensor 1: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x19: {
'Name': 'Bank 2, Sensor 2: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x1A: {
'Name': 'Bank 2, Sensor 3: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x1B: {
'Name': 'Bank 2, Sensor 4 Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x1C: {
'Name': 'OBD standards this vehicle conforms to',
'Units': '',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: decode_0x1c(int(m.group(1),16)) },
0x1F: {
'Name': 'Run time since engine start',
'Units': 's',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x21: {
'Name': 'Distance traveled with malfuction indicator lamp on',
'Units': 'km',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x22: {
'Name': 'Fuel Rail Pressure (relative to manifold vacuum)',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) * 0.079},
0x23: {
'Name': 'Fuel Rail Pressure (diesel, or gasoline direct injection)',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) * 10},
0x2C: {
'Name': 'Commanded EGR',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x2D: {
'Name': 'EGR Error',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) - 128) * 100.0) / 128 },
0x2E: {
'Name': 'Commanded evaporative purge',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x2F: {
'Name': 'Fuel level input',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x30: {
'Name': '# of warm-ups since codes cleared',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x31: {
'Name': 'Distance traveled since codes cleared',
'Units': 'km',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x33: {
'Name': 'Barometric pressure',
'Units': 'kPa (absolute)',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x42: {
'Name': 'Control module voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) / 1000.0},
0x43: {
'Name': 'Absolute load value',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) * 100.0 / 255},
0x44: {
'Name': 'Fuel/Air commanded equivalence ratio',
'Units': '',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) / 32768.0},
0x45: {
'Name': 'Relative throttle position',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x46: {
'Name': 'Ambient air temperature',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
0x47: {
'Name': 'Absolute throttle position B',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x48: {
'Name': 'Absolute throttle position C',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x49: {
'Name': 'Absolute throttle position D',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4A: {
'Name': 'Absolute throttle position E',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4B: {
'Name': 'Absolute throttle position F',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4C: {
'Name': 'Commanded throttle actuator',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4D: {
'Name': 'Time run with MIL on',
'Units': 'minutes',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x4E: {
'Name': 'Time since codes cleared',
'Units': 'minutes',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x52: {
'Name': 'Fuel ethanol percentage',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x53: {
'Name': 'Absolute evaporative system vapor pressure',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16)) / 200.0},
0x54: {
'Name': 'Relative evaporative system vapor pressure',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16)) - 32767},
0x59: {
'Name': 'Absolute fuel rail pressure',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16)) * 10},
0x5A: {
'Name': 'Relative accelerator pedal position',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x5B: {
'Name': 'Hybrid battery pack remaining life',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x5C: {
'Name': 'Engine oil temperature ',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
}
}
# Most of these strings also stolen mercilessly from Wikipedia
def decode_0x03(data):
"""
Decode the bit-encoding of Mode 01, PID 03 and return appropriate string.
This is apparently bit-encoded, but only one bit may be set at any one time.
If you want the raw value, just do int([0:1]) on the string.
"""
if data == 1:
return '01: Open loop due to insufficient engine temperature'
elif data == 2:
return '02: Closed loop, using oxygen sensor feedback to determine fuel mix'
elif data == 4:
return '04: Open loop due to engine load OR fuel cut due to deceleration'
elif data == 8:
return '08: Open loop due to system failure'
elif data == 16:
return '16: Closed loop, using at least one oxygen sensor but there is a fault in the feedback system'
else:
return 'NO DATA'
__standards ={
1: 'OBD-II as defined by the CARB',
2: 'OBD as defined by the EPA',
3: 'OBD and OBD-II',
4: 'OBD-I',
5: 'Not OBD compliant',
6: 'EOBD (Europe)',
7: 'EOBD and OBD-II',
8: 'EOBD and OBD',
9: 'EOBD, OBD and OBD II',
10: 'JOBD (Japan)',
11: 'JOBD and OBD II',
12: 'JOBD and EOBD',
13: 'JOBD, EOBD, and OBD II',
14: 'Reserved',
15: 'Reserved',
16: 'Reserved',
17: 'Engine Manufacturer Diagnostics (EMD)',
18: 'Engine Manufacturer Diagnostics Enhanced (EMD+)',
19: 'Heavy Duty On-Board Diagnostics (Child/Partial) (HD OBD-C)',
20: 'Heavy Duty On-Board Diagnostics (HD OBD)',
21: 'World Wide Harmonized OBD (WWH OBD)',
22: 'Reserved',
23: 'Heavy Duty Euro OBD Stage I without NOx control (HD EOBD-I)',
24: 'Heavy Duty Euro OBD Stage I with NOx control (HD EOBD-I N)',
25: 'Heavy Duty Euro OBD Stage II without NOx control (HD EOBD-II)',
26: 'Heavy Duty Euro OBD Stage II with NOx control (HD EOBD-II N)',
27: 'Reserved',
28: 'Brazil OBD Phase 1 (OBDBr-1)',
29: 'Brazil OBD Phase 2 (OBDBr-2)',
30: 'Korean OBD (KOBD)',
31: 'India OBD I (IOBD I)',
32: 'India OBD II (IOBD II)',
33: 'Heavy Duty Euro OBD Stage VI (HD EOBD-IV)',
}
def decode_0x1c(data):
"""
Decode the bit-encoding of Mode 01, PID 1C.
Returns a string describing the standards adhered to by the ECU.
If you want the raw value, use int([0:2]) on the result.
"""
if data in __standards:
return '%3d: %s' % (data, __standards[data])
else:
return 'NO DATA' | fwaggle/pyELM327 | elm327/pids.py | Python | bsd-2-clause | 14,201 |
# *****************************************************************
# Copyright (c) 2013 Massachusetts Institute of Technology
#
# Developed exclusively at US Government expense under US Air Force contract
# FA8721-05-C-002. The rights of the United States Government to use, modify,
# reproduce, release, perform, display or disclose this computer software and
# computer software documentation in whole or in part, in any manner and for
# any purpose whatsoever, and to have or authorize others to do so, are
# Unrestricted and Unlimited.
#
# Licensed for use under the BSD License as described in the BSD-LICENSE.txt
# file in the root directory of this release.
#
# Project: SPAR
# Authors: SY
# Description: IBM TA2 wire class
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 22 Oct 2012 SY Original Version
# *****************************************************************
import ibm_circuit_object as ico
class IBMInputWire(ico.IBMCircuitObject):
"""
This class represents a single IBM input wire.
"""
def __init__(self, displayname, circuit):
"""Initializes the wire with the display name and circuit specified."""
ico.IBMCircuitObject.__init__(self, displayname, 0.0, 0, circuit)
| y4n9squared/HEtest | hetest/python/circuit_generation/ibm/ibm_wire.py | Python | bsd-2-clause | 1,345 |
#!/usr/bin/env python
from __future__ import print_function
import time
import numpy as np
from numba import jit, stencil
@stencil
def jacobi_kernel(A):
return 0.25 * (A[0,1] + A[0,-1] + A[-1,0] + A[1,0])
@jit(parallel=True)
def jacobi_relax_core(A, Anew):
error = 0.0
n = A.shape[0]
m = A.shape[1]
Anew = jacobi_kernel(A)
error = np.max(np.abs(Anew - A))
return error
def main():
NN = 3000
NM = 3000
A = np.zeros((NN, NM), dtype=np.float64)
Anew = np.zeros((NN, NM), dtype=np.float64)
n = NN
m = NM
iter_max = 1000
tol = 1.0e-6
error = 1.0
for j in range(n):
A[j, 0] = 1.0
Anew[j, 0] = 1.0
print("Jacobi relaxation Calculation: %d x %d mesh" % (n, m))
timer = time.time()
iter = 0
while error > tol and iter < iter_max:
error = jacobi_relax_core(A, Anew)
# swap A and Anew
tmp = A
A = Anew
Anew = tmp
if iter % 100 == 0:
print("%5d, %0.6f (elapsed: %f s)" % (iter, error, time.time()-timer))
iter += 1
runtime = time.time() - timer
print(" total: %f s" % runtime)
if __name__ == '__main__':
main()
| jriehl/numba | examples/laplace2d/laplace2d-pa.py | Python | bsd-2-clause | 1,199 |
# -*- coding: utf-8 -*-
"""
Custom module logger
"""
import logging
module_name = 'moflow'
logger = logging.getLogger(module_name)
logger.addHandler(logging.NullHandler()) # best practice to not show anything
def use_basic_config(level=logging.INFO, format=logging.BASIC_FORMAT):
"""Add basic configuration and formatting to the logger
By default, the logger should not be configured in any way. However
users and developers may prefer to see the logger messages.
"""
logger.level = level
if module_name not in [_.name for _ in logger.handlers]:
formatter = logging.Formatter(format)
handler = logging.StreamHandler()
handler.name = module_name
handler.setFormatter(formatter)
logger.addHandler(handler)
| mwtoews/moflow | moflow/_logger.py | Python | bsd-2-clause | 773 |
from airy.core.conf import settings
from mongoengine import *
connect(getattr(settings, 'database_name', 'airy'))
| letolab/airy | airy/core/db.py | Python | bsd-2-clause | 116 |
from .sizedist import *
from .WD01 import make_WD01_DustSpectrum
| eblur/dust | astrodust/distlib/__init__.py | Python | bsd-2-clause | 67 |
import ana
import weakref
default_plugins = { }
# This is a base class for SimState plugins. A SimState plugin will be copied along with the state when the state is branched. They
# are intended to be used for things such as tracking open files, tracking heap details, and providing storage and persistence for SimProcedures.
class SimStatePlugin(ana.Storable):
#__slots__ = [ 'state' ]
def __init__(self):
self.state = None
# Sets a new state (for example, if the state has been branched)
def set_state(self, state):
if state is None or type(state).__name__ == 'weakproxy':
self.state = state
else:
self.state = weakref.proxy(state)
# Should return a copy of the state plugin.
def copy(self):
raise Exception("copy() not implement for %s", self.__class__.__name__)
def merge(self, others, merge_flag, flag_values): # pylint: disable=W0613
'''
Should merge the state plugin with the provided others.
others - the other state plugin
merge_flag - a symbolic expression for the merge flag
flag_values - the values to compare against to check which content should be used.
self.symbolic_content = self.state.se.If(merge_flag == flag_values[0], self.symbolic_content, other.se.symbolic_content)
Can return a sequence of constraints to be added to the state.
'''
raise Exception("merge() not implement for %s", self.__class__.__name__)
def widen(self, others, merge_flag, flag_values):
"""
The widening operation for plugins.
"""
raise Exception('widen() not implemented for %s', self.__class__.__name__)
@staticmethod
def register_default(name, cls):
if name in default_plugins:
raise Exception("%s is already set as the default for %s" % (default_plugins[name], name))
default_plugins[name] = cls
| zhuyue1314/simuvex | simuvex/plugins/plugin.py | Python | bsd-2-clause | 1,953 |
from collections import Counter
from django.contrib import admin
from django.contrib.auth.models import User
from gem.models import GemCommentReport, Invite
from gem.rules import ProfileDataRule, CommentCountRule
from molo.commenting.admin import MoloCommentAdmin, MoloCommentsModelAdmin
from molo.commenting.models import MoloComment
from molo.profiles.models import UserProfile
from molo.forms.models import FormsSegmentUserGroup
from wagtail.contrib.modeladmin.helpers import PermissionHelper
from wagtail.contrib.modeladmin.options import (
ModelAdmin as WagtailModelAdmin, modeladmin_register)
from wagtail.contrib.modeladmin.views import CreateView
class InviteAdmin(WagtailModelAdmin):
model = Invite
menu_order = 600
menu_icon = 'mail'
menu_label = 'Invites'
add_to_settings_menu = True
search_fields = ['email']
list_filter = ['is_accepted', 'created_at']
list_display = [
'email', 'created_at', 'modified_at', 'is_accepted', 'user',
]
class InviteCreateView(CreateView):
def form_valid(self, form):
site = self.request._wagtail_site
if not form.instance.user:
form.instance.user = self.request.user
if not form.instance.site:
form.instance.site = site
return super().form_valid(form)
create_view_class = InviteCreateView
modeladmin_register(InviteAdmin)
class UserProfileInlineModelAdmin(admin.StackedInline):
model = UserProfile
can_delete = False
class GemCommentReportModelAdmin(admin.StackedInline):
model = GemCommentReport
can_delete = True
max_num = 0
actions = None
readonly_fields = ["user", "reported_reason", ]
class FormsSegementUserPermissionHelper(PermissionHelper):
def __init__(self, model, inspect_view_enabled=False):
model = FormsSegmentUserGroup
super(FormsSegementUserPermissionHelper, self).__init__(
model, inspect_view_enabled
)
class GemCommentModelAdmin(MoloCommentsModelAdmin):
list_display = (
'comment', 'parent_comment', 'moderator_reply', 'content', '_user',
'is_removed', 'is_reported', 'reported_count', 'reported_reason',
'submit_date', 'country')
def reported_reason(self, obj):
all_reported_reasons = list(
GemCommentReport.objects.filter(comment=obj.pk).values_list(
'reported_reason', flat=True))
breakdown_of_reasons = []
for value, count in Counter(all_reported_reasons).most_common():
reason = '%s, (%s)' % (value, count)
breakdown_of_reasons.append(reason)
return breakdown_of_reasons
def reported_count(self, obj):
return GemCommentReport.objects.filter(comment=obj.pk).count()
class GemCommentReportAdmin(MoloCommentAdmin):
inlines = (GemCommentReportModelAdmin,)
class ProfileDataRuleAdminInline(admin.TabularInline):
"""
Inline the ProfileDataRule into the administration
interface for segments.
"""
model = ProfileDataRule
class CommentCountRuleAdminInline(admin.TabularInline):
"""
Inline the CommentCountRule into the administration
interface for segments.
"""
model = CommentCountRule
admin.site.unregister(User)
admin.site.unregister(MoloComment)
admin.site.register(MoloComment, GemCommentReportAdmin)
| praekelt/molo-gem | gem/admin.py | Python | bsd-2-clause | 3,379 |
# Copyright 2014-2016 Morgan Delahaye-Prat. All Rights Reserved.
#
# Licensed under the Simplified BSD License (the "License");
# you may not use this file except in compliance with the License.
import pytest
from hypr.helpers.mini_dsl import Range
@pytest.mark.populate(10)
class TestIntervalTypes:
models = 'SQLiteModel',
# interval notation
def test_closed_interval(self, model):
"""Test explicit bound interval."""
ref = [model.one(i) for i in range(2, 7) if model.one(i)]
rv = sorted(model.get(id=Range(2, 7)))
assert rv == ref
def test_right_open(self, model):
"""Interval with a minimum value only."""
ref = [model.one(i) for i in range(7, 100) if model.one(i)]
rv = sorted(model.get(id=Range(start=7)))
assert rv == ref
def test_left_open(self, model):
"""Interval with a maximum value only."""
ref = [model.one(i) for i in range(0, 3) if model.one(i)]
rv = sorted(model.get(id=Range(stop=3)))
assert rv == ref
def test_negation(self, model):
"""Test negation of an interval."""
ref = sorted(model.get(id=Range(stop=2)) +
model.get(id=Range(start=7)))
rv = sorted(model.get(id=(False, Range(2, 7))))
assert rv == ref
A = Range(10, 20)
B = Range(15, 25)
A_and_B = Range(15, 20)
A_or_B = Range(10, 25)
@pytest.mark.populate(30)
class TestIntervalCombination:
"""Test logical operators."""
models = 'SQLiteModel',
def test_false(self, model):
"""Test an interval always false."""
assert model.get(id=(False, Range(0, 100))) == []
def test_true(self, model):
"""Test an interval always true."""
ref = sorted(model.get())
rv = sorted(model.get(id=Range(0, 100)))
assert rv == ref
def test_conjunction(self, model):
"""A ∧ B."""
ref = model.get(id=A_and_B)
rv = model.get(id=((True, A, 0), (True, B, 1)))
assert sorted(rv) == sorted(ref)
def test_disjunction(self, model):
"""A ∨ B."""
ref = model.get(id=A_or_B)
rv = model.get(id=(A, B))
assert sorted(rv) == sorted(ref)
def test_nand(self, model):
"""A ⊼ B encoded as ¬A ∨ ¬B."""
ref = model.get(id=(False, A_and_B))
rv = model.get(id=((False, A), (False, B)))
assert sorted(rv) == sorted(ref)
def test_nor(self, model):
"""A ⊽ B encoded as ¬A ∧ ¬B."""
ref = model.get(id=(False, A_or_B))
rv = model.get(id=(
(False, A, 0),
(False, B, 1)
))
assert sorted(rv) == sorted(ref)
def test_implication(self, model):
"""A → B encoded as ¬A ∨ B."""
ref = model.get(id=(False, Range(10, 15)))
rv = model.get(id=((False, A), B))
assert sorted(rv) == sorted(ref)
def test_converse_implication(self, model):
"""A ← B encoded as A ∨ ¬B."""
ref = model.get(id=(False, Range(20, 25)))
rv = model.get(id=(A, (False, B)))
assert sorted(rv) == sorted(ref)
def test_xor(self, model):
"""A ⊕ B encoded as (¬A ∨ ¬B) ∧ (A ∨ B)."""
ref = model.get(id=Range(10, 15)) + model.get(id=Range(20, 25))
rv = model.get(id=(
(False, A, 0), (False, B, 0),
(True, A, 1), (True, B, 1),
))
assert sorted(rv) == sorted(ref)
def test_biconditional(self, model):
"""A ↔ B encoded as (¬A ∨ B) ∧ (A ∨ ¬B)."""
ref = model.get(id=(False, A_or_B)) + model.get(id=A_and_B)
rv = model.get(id=(
(False, A, 0), (True, B, 0),
(True, A, 1), (False, B, 1),
))
assert sorted(rv) == sorted(ref)
def test_non_implication(self, model):
"""A ↛ B encoded as A ∨ ¬B."""
ref = model.get(id=Range(10, 15))
rv = model.get(id=(
(True, A, 0),
(False, B, 1)
))
assert sorted(rv) == sorted(ref)
def test_converse_non_implication(self, model):
"""A ↚ B encoded as ¬A ∨ B."""
ref = model.get(id=Range(20, 25))
rv = model.get(id=(
(False, A, 0),
(True, B, 1)
))
assert sorted(rv) == sorted(ref)
@pytest.mark.populate(10)
class TestIntervalIntersection:
"""Test some intersections."""
models = 'SQLiteModel',
def test_empty_intersection(self, model):
"""Empty intersection."""
rv = model.get(id=((True, Range(2, 4), 0), (True, Range(7, 9), 1)))
assert sorted(rv) == []
def test_union_without_intersection(self, model):
"""Union without intersection."""
ref = model.get(id=Range(2, 4)) + model.get(id=Range(7, 9))
rv = model.get(id=(Range(2, 4), Range(7, 9)))
assert sorted(rv) == sorted(ref)
| project-hypr/hypr2 | tests/models/test_range_filter.py | Python | bsd-2-clause | 4,895 |
#!/usr/bin/env python
'''Tool to generate computationally-rarefied graphs kmer spectra'''
import sys
import os
import scipy.stats
from optparse import OptionParser
import numpy as np
import ksatools
def fract(aa, epsilon, threshold):
'''Evaluates the fraction of theoretically-subsampled spectra
above a specified threshold. Dataset abundance is attenuated by
the factor epsilon. Returns a float beween 0 and 1. aa is a
two-column abudnance table, epsilon and threshold are floats.
'''
sys.stderr.write("E %f T %f\n" % (epsilon, threshold))
xr = aa[:, 0]
xn = aa[:, 1]
NO = np.sum(xn * xr)
p = 0.0
for i in range(len(xr)):
# this is the expected number of nonzero categories after hypergeometric sampling
# nonzero = (1.-scipy.stats.hypergeom.cdf(0.5, NO, xr[i], epsilon*NO))
nonzero = (1. - scipy.stats.hypergeom.pmf(0, NO, xr[i], epsilon * NO))
# For efficiency, don't evaluate if numerator is too small
# For numerical stability, don't evaluate term if denominator (nonzero) is too small
# note: second threshold (on nonzero) here creates kinks in the graph, but is important
if nonzero * xr[i] * xn[i] > 10E-0 and nonzero > 1E-2:
# and this is the expected number of above-threshold survivors
gt_thresh = 1. - \
scipy.stats.hypergeom.cdf(
threshold + 0.5, NO, xr[i], epsilon * NO)
interim = float(xn[i] * xr[i]) * (gt_thresh / nonzero)
if (not np.isnan(interim)) and (interim > 0):
p += interim
return p / NO
def rich(aa, epsilon, threshold):
sys.stderr.write("richness E %f T %f\n" % (epsilon, threshold))
xr = aa[:, 0]
xn = aa[:, 1]
NO = np.sum(xn * xr)
interim = 0
for i in range(len(xr)):
# this is the expected number of nonzero categories after hypergeometric sampling
# nonzero = (1.-scipy.stats.hypergeom.cdf(0.5, NO, xr[i], epsilon*NO))
nonzero = (1. - scipy.stats.hypergeom.pmf(0, NO, xr[i], epsilon * NO))
interim += nonzero * xn[i]
return interim
def calc_resampled_fraction(aa, samplefracs, thresholds):
'''calculate 2D array of return value of fract by evaluating it
for each fraction in samplefracs and each threshold in thresholds.
Returns 2d matrix sith shape = len(samplefracs), len(thresholds)
aa must be 2d ndarray
'''
assert aa.shape[1] == 2
matrix = np.zeros((len(samplefracs), len(thresholds)))
for i, frac in enumerate(samplefracs):
for j, threshold in enumerate(thresholds):
dummy = fract(aa, frac, threshold)
matrix[i][j] = dummy
return matrix
def calc_resampled_richness(aa, samplefracs, thresholds):
'''calculate 2D array, like calc_resampled_richness, of
calculated subsampled richness for each fraction in samplefracs
and each threshold in thresholds.
Returns 2d matrix sith shape = len(samplefracs), len(thresholds)
aa must be 2d ndarray
'''
assert aa.shape[1] == 2
matrix = np.zeros((len(samplefracs), len(thresholds)))
for i, frac in enumerate(samplefracs):
for j, threshold in enumerate(thresholds):
dummy = rich(aa, frac, threshold)
matrix[i][j] = dummy
return matrix
def plotme(b, label, color=None, thresholdlist=None, numplots=4,
suppress=False, dump=False, shaded=0, n=1):
'''Performs calculations and calls graphing routines,
given spectra
'''
# define range of subsamples
import matplotlib.pyplot as plt
N = np.sum(b[:, 0] * b[:, 1])
samplefractions = 10**np.arange(2, 11, .5) / N # CHEAP
samplefractions = 10**np.arange(2, 11, .1) / N
# Throw away unecessary samples
samplefractions = np.hstack((samplefractions[samplefractions < 1], 1))
SHADED = shaded
if thresholdlist is None:
thresholdlist = [1]
if SHADED != 3:
matrix = calc_resampled_fraction(b, samplefractions, thresholdlist)
else:
matrix = calc_resampled_richness(b, samplefractions, thresholdlist)
effort = N * samplefractions
data = np.hstack([np.atleast_2d(effort).T, matrix])
# np.savetxt(sys.stdout, data, fmt="%.3f") # Numpy can't write to standard out in python3
headertext = "subsetsize\t" + "\t".join(map(str, thresholdlist))
with open(label + ".rare.csv", 'wb') as fp:
np.savetxt(fp, data, header=headertext, delimiter="\t")
if dump:
with open(label + ".rare.csv") as f:
for l in f:
print(l)
pex2 = np.hstack((effort[0], effort, effort[-1]))
pex = effort
for i in range(matrix.shape[1]):
aug2 = np.hstack((0, matrix[:, i], 0))
aug = matrix[:, i]
# lab = label + " " + str(thresholdlist[i])
lab = str(thresholdlist[i]) + "x"
plt.grid(axis='both')
if SHADED == 0:
plt.title(label)
plt.semilogx(pex, aug, "-o", label=lab)
elif SHADED == 2:
lab = label + str(thresholdlist[i]) + "x"
lab = label
plt.semilogx(pex, aug, "-", label=lab, color=color)
plt.ylabel("Nonunique fraction of data")
elif SHADED == 3:
plt.semilogy(pex, aug, "-", label=lab, color=color)
plt.ylabel("Number of unique categories ")
plt.xlabel("Sampling effort")
elif SHADED == 1:
plt.subplot(numplots, 1, n + 1)
plt.semilogx(pex, aug, "-", label=lab, color=color)
plt.fill(pex2, aug2, "k", alpha=0.2)
plt.title(label)
plt.ylabel("Fraction of data")
else:
plt.semilogx(pex, aug, "-", label=lab)
plt.fill(pex2, aug2, "k", alpha=0.2)
plt.title(label)
plt.ylabel("Fraction of data")
# label=str(thresholdlist[i]))
# plt.fill(pex, aug, "k", alpha=0.2)
plt.ylim((0, 1))
plt.xlim((1E4, 1E11))
if SHADED == 0 or n + 1 == numplots:
plt.xlabel("Sequencing effort (bp)")
else: # suppress drawing of x-axis labels for all but last plot
frame1 = plt.gca()
frame1.axes.get_xaxis().set_ticks([])
plt.tight_layout()
return()
| MG-RAST/kmerspectrumanalyzer | ksatools/rare.py | Python | bsd-2-clause | 6,250 |
from django.contrib.auth import get_user_model
from django.db import models
from imagekit.cachefiles import ImageCacheFile
from imagekit.registry import generator_registry
from imagekit.templatetags.imagekit import DEFAULT_THUMBNAIL_GENERATOR
from rest_framework import serializers
User = get_user_model()
class ThumbnailField(serializers.ImageField):
"""
Image field that returns an images url.
Pass get parameters to thumbnail the image.
Options are:
width: Specify the width (in pixels) to resize / crop to.
height: Specify the height (in pixels) to resize / crop to.
crop: Whether to crop or not [1,0]
anchor: Where to anchor the crop [t,r,b,l]
upscale: Whether to upscale or not [1,0]
If no options are specified the users avatar is returned.
To crop to 100x100 anchored to the top right:
?width=100&height=100&crop=1&anchor=tr
"""
def __init__(self, *args, **kwargs):
self.generator_id = kwargs.pop('generator_id', DEFAULT_THUMBNAIL_GENERATOR)
super(ThumbnailField, self).__init__(*args, **kwargs)
def get_generator_kwargs(self, query_params):
width = int(query_params.get('width', 0)) or None
height = int(query_params.get('height', 0)) or None
return {
'width': width,
'height': height,
'anchor': query_params.get('anchor', None),
'crop': query_params.get('crop', None),
'upscale': query_params.get('upscale', None)
}
def generate_thumbnail(self, source, **kwargs):
generator = generator_registry.get(
self.generator_id,
source=source,
**kwargs)
return ImageCacheFile(generator)
def to_native(self, image):
if not image.name:
return None
request = self.context.get('request', None)
if request is None:
return image.url
kwargs = self.get_generator_kwargs(request.query_params)
if kwargs.get('width') or kwargs.get('height'):
image = self.generate_thumbnail(image, **kwargs)
return request.build_absolute_uri(image.url)
class AvatarSerializer(serializers.ModelSerializer):
# Override default field_mapping to map ImageField to HyperlinkedImageField.
# As there is only one field this is the only mapping needed.
field_mapping = {
models.ImageField: ThumbnailField,
}
class Meta:
model = User
fields = ('avatar',)
| incuna/django-user-management | user_management/api/avatar/serializers.py | Python | bsd-2-clause | 2,513 |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD License.
"""This script is some boilerplate needed by Alembic to do its fancy database
migration stuff.
"""
# A hack so that we can get the librarian_server module.
import sys
sys.path.insert(0, '.')
from alembic import context
from logging.config import fileConfig
config = context.config
fileConfig(config.config_file_name)
from librarian_server import app, db
target_metadata = db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode -- all we need is a URL.
"""
url = app.config['SQLALCHEMY_DATABASE_URI']
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode -- using the actual Librarian database
connection.
"""
with db.engine.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| HERA-Team/librarian | alembic/env.py | Python | bsd-2-clause | 1,321 |
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
del a, box
class pandawalkhive(bee.inithive):
animation = dragonfly.scene.bound.animation()
walk = dragonfly.std.variable("str")("walk")
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
setPos = dragonfly.scene.bound.setPos()
setHpr = dragonfly.scene.bound.setHpr()
interval = dragonfly.time.interval_time(18)
connect(key_w, interval.start)
connect(key_s, interval.pause)
sequence = dragonfly.time.sequence(4)(8, 1, 8, 1)
connect(interval.value, sequence.inp)
ip1 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0))
connect(sequence.outp1, ip1)
connect(ip1, setPos)
connect(key_w, ip1.start)
connect(key_s, ip1.stop)
ip2 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (180, 0, 0))
connect(sequence.outp2, ip2)
connect(ip2, setHpr)
connect(key_w, ip2.start)
connect(key_s, ip2.stop)
ip3 = dragonfly.time.interpolation("Coordinate")((0, -10, 0), (0, 0, 0))
connect(sequence.outp3, ip3)
connect(ip3, setPos)
connect(key_w, ip3.start)
connect(key_s, ip3.stop)
ip4 = dragonfly.time.interpolation("Coordinate")((180, 0, 0), (0, 0, 0))
connect(sequence.outp4, ip4)
connect(ip4, setHpr)
connect(key_w, ip4.start)
connect(key_s, ip4.stop)
connect(ip4.reach_end, interval.start)
from bee.staticbind import staticbind_baseclass
class pandawalkbind(dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = pandawalkhive
bind_entity = "relative"
bind_keyboard = "indirect"
class camerabindhive(bee.inithive):
interval = dragonfly.time.interval_time(30)
sequence = dragonfly.time.sequence(2)(1, 1)
connect(interval.value, sequence.inp)
startsensor = dragonfly.sys.startsensor()
ip1 = dragonfly.time.interpolation("Coordinate")((180, -20, 0), (360, -20, 0))
ip2 = dragonfly.time.interpolation("Coordinate")((0, -20, 0), (180, -20, 0))
connect(sequence.outp1, ip1.inp)
connect(sequence.outp2, ip2.inp)
connect(startsensor, interval.start)
connect(startsensor, ip1.start)
connect(ip1.reach_end, ip1.stop)
connect(ip1.reach_end, ip2.start)
connect(ip2.reach_end, ip2.stop)
connect(ip2.reach_end, ip1.start)
connect(ip2.reach_end, interval.start)
sethpr = dragonfly.scene.bound.setHpr()
connect(ip1, sethpr)
connect(ip2, sethpr)
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
startsensor = dragonfly.sys.startsensor()
cam = dragonfly.scene.get_camera()
camparent = dragonfly.scene.unbound.parent()
connect(cam, camparent.entityname)
connect(camcenter, camparent.entityparentname)
connect(startsensor, camparent)
cphide = dragonfly.scene.unbound.hide()
connect(camcenter, cphide)
connect(startsensor, cphide)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id_gen = dragonfly.std.generator("id", id_generator)()
panda_id = dragonfly.std.variable("id")("")
t_panda_id_gen = dragonfly.std.transistor("id")()
connect(panda_id_gen, t_panda_id_gen)
connect(t_panda_id_gen, panda_id)
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
z_pandawalk = pandawalkbind().worker()
t_bind = dragonfly.std.transistor("id")()
connect(panda_id, t_bind)
connect(t_bind, z_pandawalk.bind)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
trig_spawn = dragonfly.std.pushconnector("trigger")()
connect(trig_spawn, t_panda_id_gen)
connect(trig_spawn, do_spawn)
connect(trig_spawn, t_bind)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, trig_spawn)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, trig_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, 45, 25)
wininit.camera.setHpr(180, -20, 0)
keyboardevents = dragonfly.event.sensor_match_leader("keyboard")
add_head = dragonfly.event.add_head()
head = dragonfly.std.variable("event")("spawnedpanda3")
connect(keyboardevents, add_head)
connect(head, add_head)
connect(add_head, z_pandawalk.event)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
| agoose77/hivesystem | manual/movingpanda/panda-12b.py | Python | bsd-2-clause | 7,088 |
import sys, pygame, math, random, time
from Level import *
from Player import *
from Enemy import *
from NPC import *
from Menu import *
from Item import *
pygame.init()
clock = pygame.time.Clock()
width = 1000
height = 700
size = width, height
bgColor = r,b,g = 255,255,255
screen = pygame.display.set_mode(size)
mode = "menu"
enemies = pygame.sprite.Group()
boundries = pygame.sprite.Group()
backGrounds = pygame.sprite.Group()
people = pygame.sprite.Group()
items = pygame.sprite.Group()
players = pygame.sprite.Group()
all = pygame.sprite.OrderedUpdates()
Enemy.containers = (enemies, all)
SoftBlock.containers = (backGrounds, all)
HardBlock.containers = (boundries, all)
NPC.containers = (people, all)
Item.containers = (items, all)
Player.containers = (people, players, all)
levLayer =0
levx = 3
levy = 3
start = time.time()
def loadNewLev(direction, levx, levy):
if direction == "up":
if levy >1:
levy-=1
elif direction == "down":
if levy <3:
levy+=1
elif direction == "left":
if levx >1:
levx-=1
elif direction == "right":
if levx <3:
levx+=1
for s in all.sprites():
s.kill()
levFile = "Levels/map" + str(levLayer) + str(levy) + str(levx)
level=Level(levFile)
return levx, levy
while True:
while mode == "menu":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
mode = "game"
if event.key == pygame.K_2:
mode = "how to play"
if event.key == pygame.K_q:
mode = "quit"
bg = pygame.image.load("Resources/mainmenu.png")
bgrect = bg.get_rect(center = [width/2,height/2])
screen.fill(bgColor)
screen.blit(bg, bgrect)
pygame.display.flip()
clock.tick(60)
while mode == "how to play":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
mode = "menu"
bg = pygame.image.load("Resources/howtoplay.png")
bgrect = bg.get_rect(center = [width/2,height/1.9])
screen.fill(bgColor)
screen.blit(bg, bgrect)
pygame.display.flip()
clock.tick(60)
while mode == "quit":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit()
levFile = "Levels/map" + str(levLayer) + str(levy) + str(levx)
level=Level(levFile)
player = Player([5,5], [900,500])
while mode == "test":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
levx, levy = loadNewLev("up", levx, levy)
elif event.key == pygame.K_s:
levx, levy = loadNewLev("down", levx, levy)
elif event.key == pygame.K_a:
levx, levy = loadNewLev("left", levx, levy)
elif event.key == pygame.K_d:
levx, levy = loadNewLev("right", levx, levy)
#print len(all.sprites())
bgColor = r,g,b
screen.fill(bgColor)
dirty = all.draw(screen)
pygame.display.update(dirty)
pygame.display.flip()
clock.tick(60)
while mode == "game":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w or event.key == pygame.K_UP:
player.go("up")
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
player.go("down")
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
player.go("left")
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
player.go("right")
elif event.type == pygame.KEYUP:
if event.key == pygame.K_w or event.key == pygame.K_UP:
player.go("stop up")
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
player.go("stop down")
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
player.go("stop left")
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
player.go("stop right")
all.update(size)
#print len(all.sprites())
#From Manpac V2
if player.rect.center[0] > size[0]:
levx, levy = loadNewLev("right", levx, levy)
player = Player([5,5], [0, player.rect.center[1]])
elif player.rect.center[0] < 0:
levx, levy = loadNewLev("left", levx, levy)
player = Player([5,5], [size[0], player.rect.center[1]])
elif player.rect.center[1] > size[1]:
levx, levy = loadNewLev("down", levx, levy)
player = Player([5,5], [player.rect.center[0], 0])
elif player.rect.center[1] < 0:
levx, levy = loadNewLev("up", levx, levy)
player = Player([5,5], [player.rect.center[0], size[1]])
playersHitsBoundries = pygame.sprite.groupcollide(players, boundries, False, False)
for p in playersHitsBoundries:
for boundry in playersHitsBoundries[p]:
p.collideHardblock(boundry)
#playersHitsItems = pygame.sprite.groupcollide(players, items, False, False)
#for p in playersHitsitems:
#for item in playersHitsitems[p]:
enemiesHitsBoundries = pygame.sprite.groupcollide(enemies, boundries, False, False)
for e in enemiesHitsBoundries:
for boundry in enemiesHitsBoundries[e]:
e.collideHardblock(boundry)
bgColor = r,g,b
screen.fill(bgColor)
dirty = all.draw(screen)
pygame.display.update(dirty)
pygame.display.flip()
clock.tick(60)
| KRHS-GameProgramming-2015/Adlez | Adlez.py | Python | bsd-2-clause | 6,657 |
# coding: utf-8
import flask
from flask import url_for
from .base import BaseTestCase
from . import utils
class TOCTestCase(BaseTestCase):
# TOC
def test_the_title_of_the_article_list_when_language_pt(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Português.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='pt_BR')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'pt_BR')
self.assertIn("Artigo Com Título Em Português", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_es(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Espanhol.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Título Del Artículo En Portugués",
response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_en(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Inglês.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Title In Portuguese", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não tem idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_unknow_language_for_article(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não conhece o idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_with_and_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original para artigos que não tem tradução e o título traduzido
quando tem tradução do título.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': []
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
self.assertIn("Título Del Artículo En Portugués", response.data.decode('utf-8'))
def test_ahead_of_print_is_displayed_at_table_of_contents(self):
"""
Teste para verificar se caso o issue for um ahead o valor da legenda bibliográfica é alterada para 'ahead of print'.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal, 'type': 'ahead'})
response = c.get(url_for('main.aop_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment))
self.assertIn("ahead of print", response.data.decode('utf-8'))
def test_abstract_links_are_displayed(self):
"""
Teste para verificar se caso o issue for um ahead o valor da
legenda bibliográfica é alterada para 'ahead of print'.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
_article_data = {
'title': 'Article Y',
'original_language': 'en',
'languages': ['es', 'pt', 'en'],
'issue': issue,
'journal': journal,
'abstract_languages': ["en", "es", "pt"],
'url_segment': '10-11',
'translated_titles': [
{'language': 'es', 'name': u'Artículo en español'},
{'language': 'pt', 'name': u'Artigo en Português'},
],
'pid': 'pidv2',
}
article = utils.makeOneArticle(_article_data)
response = c.get(url_for('main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment))
uris = [
url_for(
'main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
part='abstract',
lang=abstract_lang,
)
for abstract_lang in ["en", "es", "pt"]
]
for uri in uris:
with self.subTest(uri):
self.assertIn(uri, response.data.decode('utf-8'))
| scieloorg/opac | opac/tests/test_interface_TOC.py | Python | bsd-2-clause | 11,514 |
__author__ = 'Mark Worden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.adcps_jln.stc.adcps_jln_stc_recovered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class SampleTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi', 'dataset', 'driver',
'adcps_jln', 'stc', 'resource',
'adcpt_20130929_091817.DAT')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one() | JeffRoy/mi-dataset | mi/dataset/driver/adcps_jln/stc/test/test_adcps_jln_stc_recovered_driver.py | Python | bsd-2-clause | 1,063 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_bar10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar', 'subtype': 'percent_stacked'})
chart.axis_ids = [40274560, 40295040]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_bar10.py | Python | bsd-2-clause | 1,580 |
from envs.common import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# EMAIL_BACKEND = 'django_ses.SESBackend'
STATIC_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME
COMPRESS_URL = STATIC_URL
FAVICON_URL = "%sfavicon.ico" % STATIC_URL
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = "backends.CachedS3BotoStorage"
COMPRESS_STORAGE = STATICFILES_STORAGE
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True | skoczen/weexist | project/envs/live.py | Python | bsd-2-clause | 451 |
#!/usr/bin/python
#
# Copyright (C) 2009, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the mcpu module"""
import unittest
import itertools
from ganeti import compat
from ganeti import mcpu
from ganeti import opcodes
from ganeti import cmdlib
from ganeti import locking
from ganeti import constants
from ganeti.constants import \
LOCK_ATTEMPTS_TIMEOUT, \
LOCK_ATTEMPTS_MAXWAIT, \
LOCK_ATTEMPTS_MINWAIT
import testutils
REQ_BGL_WHITELIST = compat.UniqueFrozenset([
opcodes.OpClusterActivateMasterIp,
opcodes.OpClusterDeactivateMasterIp,
opcodes.OpClusterDestroy,
opcodes.OpClusterPostInit,
opcodes.OpClusterRename,
opcodes.OpInstanceRename,
opcodes.OpNodeAdd,
opcodes.OpNodeRemove,
opcodes.OpTestAllocator,
])
class TestLockAttemptTimeoutStrategy(unittest.TestCase):
def testConstants(self):
tpa = mcpu.LockAttemptTimeoutStrategy._TIMEOUT_PER_ATTEMPT
self.assert_(len(tpa) > LOCK_ATTEMPTS_TIMEOUT / LOCK_ATTEMPTS_MAXWAIT)
self.assert_(sum(tpa) >= LOCK_ATTEMPTS_TIMEOUT)
self.assertTrue(LOCK_ATTEMPTS_TIMEOUT >= 1800,
msg="Waiting less than half an hour per priority")
self.assertTrue(LOCK_ATTEMPTS_TIMEOUT <= 3600,
msg="Waiting more than an hour per priority")
def testSimple(self):
strat = mcpu.LockAttemptTimeoutStrategy(_random_fn=lambda: 0.5,
_time_fn=lambda: 0.0)
prev = None
for i in range(len(strat._TIMEOUT_PER_ATTEMPT)):
timeout = strat.NextAttempt()
self.assert_(timeout is not None)
self.assert_(timeout <= LOCK_ATTEMPTS_MAXWAIT)
self.assert_(timeout >= LOCK_ATTEMPTS_MINWAIT)
self.assert_(prev is None or timeout >= prev)
prev = timeout
for _ in range(10):
self.assert_(strat.NextAttempt() is None)
class TestDispatchTable(unittest.TestCase):
def test(self):
for opcls in opcodes.OP_MAPPING.values():
if not opcls.WITH_LU:
continue
self.assertTrue(opcls in mcpu.Processor.DISPATCH_TABLE,
msg="%s missing handler class" % opcls)
# Check against BGL whitelist
lucls = mcpu.Processor.DISPATCH_TABLE[opcls]
if lucls.REQ_BGL:
self.assertTrue(opcls in REQ_BGL_WHITELIST,
msg=("%s not whitelisted for BGL" % opcls.OP_ID))
else:
self.assertFalse(opcls in REQ_BGL_WHITELIST,
msg=("%s whitelisted for BGL, but doesn't use it" %
opcls.OP_ID))
class TestProcessResult(unittest.TestCase):
def setUp(self):
self._submitted = []
self._count = itertools.count(200)
def _Submit(self, jobs):
job_ids = [self._count.next() for _ in jobs]
self._submitted.extend(zip(job_ids, jobs))
return job_ids
def testNoJobs(self):
for i in [object(), [], False, True, None, 1, 929, {}]:
self.assertEqual(mcpu._ProcessResult(NotImplemented, NotImplemented, i),
i)
def testDefaults(self):
src = opcodes.OpTestDummy()
res = mcpu._ProcessResult(self._Submit, src, cmdlib.ResultWithJobs([[
opcodes.OpTestDelay(),
opcodes.OpTestDelay(),
], [
opcodes.OpTestDelay(),
]]))
self.assertEqual(res, {
constants.JOB_IDS_KEY: [200, 201],
})
(_, (op1, op2)) = self._submitted.pop(0)
(_, (op3, )) = self._submitted.pop(0)
self.assertRaises(IndexError, self._submitted.pop)
for op in [op1, op2, op3]:
self.assertTrue("OP_TEST_DUMMY" in op.comment)
self.assertFalse(hasattr(op, "priority"))
self.assertFalse(hasattr(op, "debug_level"))
def testParams(self):
src = opcodes.OpTestDummy(priority=constants.OP_PRIO_HIGH,
debug_level=3)
res = mcpu._ProcessResult(self._Submit, src, cmdlib.ResultWithJobs([[
opcodes.OpTestDelay(priority=constants.OP_PRIO_LOW),
], [
opcodes.OpTestDelay(comment="foobar", debug_level=10),
]], other=True, value=range(10)))
self.assertEqual(res, {
constants.JOB_IDS_KEY: [200, 201],
"other": True,
"value": range(10),
})
(_, (op1, )) = self._submitted.pop(0)
(_, (op2, )) = self._submitted.pop(0)
self.assertRaises(IndexError, self._submitted.pop)
self.assertEqual(op1.priority, constants.OP_PRIO_LOW)
self.assertTrue("OP_TEST_DUMMY" in op1.comment)
self.assertEqual(op1.debug_level, 3)
self.assertEqual(op2.priority, constants.OP_PRIO_HIGH)
self.assertEqual(op2.comment, "foobar")
self.assertEqual(op2.debug_level, 3)
class _FakeLuWithLocks:
def __init__(self, needed_locks, share_locks):
self.needed_locks = needed_locks
self.share_locks = share_locks
class _FakeGlm:
def __init__(self, owning_nal):
self._owning_nal = owning_nal
def check_owned(self, level, names):
assert level == locking.LEVEL_NODE_ALLOC
assert names == locking.NAL
return self._owning_nal
def owning_all(self, level):
return False
class TestVerifyLocks(unittest.TestCase):
def testNoLocks(self):
lu = _FakeLuWithLocks({}, {})
glm = _FakeGlm(False)
mcpu._VerifyLocks(lu, glm,
_mode_whitelist=NotImplemented,
_nal_whitelist=NotImplemented)
def testNotAllSameMode(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: ["foo"],
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 0,
})
glm = _FakeGlm(False)
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
def testDifferentMode(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: ["foo"],
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 1,
})
glm = _FakeGlm(False)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
except AssertionError, err:
self.assertTrue("using the same mode as nodes" in str(err))
else:
self.fail("Exception not raised")
# Once more with the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[_FakeLuWithLocks],
_nal_whitelist=[])
def testSameMode(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: ["foo"],
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}, {
level: 1,
locking.LEVEL_NODE_ALLOC: 1,
})
glm = _FakeGlm(True)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[_FakeLuWithLocks],
_nal_whitelist=[])
except AssertionError, err:
self.assertTrue("whitelisted to use different modes" in str(err))
else:
self.fail("Exception not raised")
# Once more without the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
def testAllWithoutAllocLock(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: locking.ALL_SET,
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 0,
})
glm = _FakeGlm(False)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
except AssertionError, err:
self.assertTrue("allocation lock must be used if" in str(err))
else:
self.fail("Exception not raised")
# Once more with the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[],
_nal_whitelist=[_FakeLuWithLocks])
def testAllWithAllocLock(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: locking.ALL_SET,
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 0,
})
glm = _FakeGlm(True)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[],
_nal_whitelist=[_FakeLuWithLocks])
except AssertionError, err:
self.assertTrue("whitelisted for not acquiring" in str(err))
else:
self.fail("Exception not raised")
# Once more without the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
if __name__ == "__main__":
testutils.GanetiTestProgram()
| apyrgio/snf-ganeti | test/py/ganeti.mcpu_unittest.py | Python | bsd-2-clause | 9,694 |
from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.base import (clone, TransformerMixin, ClusterMixin,
BaseEstimator, is_classifier, is_regressor)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.linear_model.stochastic_gradient import BaseSGD
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter, _num_samples
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
yield check_complex_data
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "ComplementNB", "LabelPropagation",
"LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
yield check_supervised_y_no_nan
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(estimator, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
estimator.fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised error as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, estimator):
for check in _yield_non_meta_checks(name, estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(name, estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(name, estimator):
yield check
if isinstance(estimator, TransformerMixin):
for check in _yield_transformer_checks(name, estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(name, estimator):
yield check
yield check_fit2d_predict1d
if name != 'GaussianProcess': # FIXME
# XXX GaussianProcess deprecated in 0.20
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d
yield check_get_params_invariance
yield check_dict_unchanged
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
check_no_fit_attributes_set_in_init(name, Estimator)
estimator = Estimator()
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"
and not isinstance(estimator, BaseSGD)):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in ['Scaler', 'StandardScaler']:
estimator = clone(estimator).set_params(with_mean=False)
else:
estimator = clone(estimator)
# fit and predict
try:
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(estimator, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_complex_data(name, estimator_orig):
# check that estimators raise an exception on providing complex data
X = np.random.sample(10) + 1j * np.random.sample(10)
X = X.reshape(-1, 1)
y = np.random.sample(10) + 1j * np.random.sample(10)
estimator = clone(estimator_orig)
assert_raises_regex(ValueError, "Complex data not supported",
estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample",
"1 class", "one class"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator, 1)
msgs = ["1 feature(s)", "n_features = 1", "n_features=1"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
assert_raises(ValueError, estimator.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_general(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, transformer, X, y)
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformers_unfitted(name, transformer):
X, y = _boston_subset()
transformer = clone(transformer)
with assert_raises((AttributeError, ValueError), msg="The unfitted "
"transformer {} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform.".format(name)):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert_equal(_num_samples(X_pred2), n_samples)
assert_equal(_num_samples(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
with assert_raises(ValueError, msg="The transformer {} does "
"not raise an error when the number of "
"features in transform is different from"
" the number of features in "
"fit.".format(name)):
transformer.transform(X.T)
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
with assert_raises(ValueError, msg="The estimator {} does not"
" raise an error when an empty data is used "
"to train. Perhaps use "
"check_array in train.".format(name)):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(e, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, estimator)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
estimator = clone(estimator_orig)
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with assert_raises(ValueError,
msg="The estimator {} does not raise an"
" error when the number of features"
" changes between calls to "
"partial_fit.".format(name)):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_clustering(name, clusterer_orig):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert_equal(pred.shape, (n_samples,))
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert_in(pred.dtype, [np.dtype('int32'), np.dtype('int64')])
assert_in(pred2.dtype, [np.dtype('int32'), np.dtype('int64')])
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
pred_sorted = np.unique(pred)
assert_array_equal(pred_sorted, np.arange(pred_sorted[0],
pred_sorted[-1] + 1))
# labels_ should be greater than -1
assert_greater_equal(pred_sorted[0], -1)
# labels_ should be less than n_clusters - 1
if hasattr(clusterer, 'n_clusters'):
n_clusters = getattr(clusterer, 'n_clusters')
assert_greater_equal(n_clusters - 1, pred_sorted[-1])
# else labels_ should be less than max(labels_) which is necessarily true
@ignore_warnings(category=DeprecationWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
classifier = clone(classifier_orig)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
X -= X.min()
set_random_state(classifier)
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifer {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of labels."
" Perhaps use check_X_y in fit.".format(name)):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the number of features "
"in predict is different from the number of"
" features in fit.".format(name)):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes == 3 and
# 1on1 of LibSVM works differently
not isinstance(classifier, BaseLibSVM)):
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
with assert_raises(ValueError, msg="The classifier {} does"
" not raise an error when the number of "
"features in decision_function is "
"different from the number of features"
" in fit.".format(name)):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_allclose(np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input for predict_proba
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the number of features "
"in predict_proba is different from the number "
"of features in fit.".format(name)):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_fit_returns_self(name, estimator_orig):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = clone(estimator_orig)
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_2d(name, estimator_orig):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_classes(name, classifier_orig):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
X = X > X.mean()
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_int(name, regressor_orig):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_train(name, regressor_orig):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, y)
rnd = np.random.RandomState(0)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifer {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit.".format(name)):
regressor.fit(X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, X[:, 0])
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_classifiers(name, classifier_orig):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest("Not testing NuSVC class weight as it is ignored.")
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
assert_greater(np.mean(y_pred == 0), 0.87)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_classifiers(name, classifier_orig, X_train,
y_train, X_test, y_test, weights):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
# this check works on classes, not instances
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=DeprecationWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_data_not_an_array(name, estimator_orig, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest("Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic.")
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# this check works on classes, not instances
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
if (issubclass(Estimator, BaseSGD) and
init_param.name in ['tol', 'max_iter']):
# To remove in 0.21, when they get their future default values
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default, init_param.name)
def multioutput_estimator_convert_y_2d(estimator, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in estimator.__class__.__name__:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(estimator, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = clone(estimator_orig)
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
| zorroblue/scikit-learn | sklearn/utils/estimator_checks.py | Python | bsd-3-clause | 71,139 |
#!/bin/env python
#==========================================================================
# (c) 2004 Total Phase, Inc.
#--------------------------------------------------------------------------
# Project : Aardvark Sample Code
# File : aaspi_file.c
#--------------------------------------------------------------------------
# Configure the device as an SPI master and send data.
#--------------------------------------------------------------------------
# Redistribution and use of this file in source and binary forms, with
# or without modification, are permitted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==========================================================================
#==========================================================================
# IMPORTS
#==========================================================================
import sys
from aardvark_py import *
#==========================================================================
# CONSTANTS
#==========================================================================
BUFFER_SIZE = 2048
SPI_BITRATE = 1000
#==========================================================================
# FUNCTIONS
#==========================================================================
def blast_bytes (handle, filename):
# Open the file
try:
f=open(filename, 'rb')
except:
print "Unable to open file '" + filename + "'"
return
trans_num = 0
while 1:
# Read from the file
filedata = f.read(BUFFER_SIZE)
if (len(filedata) == 0):
break
# Write the data to the bus
data_out = array('B', filedata)
data_in = array_u08(len(data_out))
(count, data_in) = aa_spi_write(handle, data_out, data_in)
if (count < 0):
print "error: %s" % aa_status_string(count)
break
elif (count != len(data_out)):
print "error: only a partial number of bytes written"
print " (%d) instead of full (%d)" % (count, num_write)
sys.stdout.write("*** Transaction #%02d\n" % trans_num)
sys.stdout.write("Data written to device:")
for i in range(count):
if ((i&0x0f) == 0):
sys.stdout.write("\n%04x: " % i)
sys.stdout.write("%02x " % (data_out[i] & 0xff))
if (((i+1)&0x07) == 0):
sys.stdout.write(" ")
sys.stdout.write("\n\n")
sys.stdout.write("Data read from device:")
for i in range(count):
if ((i&0x0f) == 0):
sys.stdout.write("\n%04x: " % i)
sys.stdout.write("%02x " % (data_in[i] & 0xff))
if (((i+1)&0x07) == 0):
sys.stdout.write(" ")
sys.stdout.write("\n\n")
trans_num = trans_num + 1
# Sleep a tad to make sure slave has time to process this request
aa_sleep_ms(10)
f.close()
#==========================================================================
# MAIN PROGRAM
#==========================================================================
if (len(sys.argv) < 4):
print "usage: aaspi_file PORT MODE filename"
print " mode 0 : pol = 0, phase = 0"
print " mode 1 : pol = 0, phase = 1"
print " mode 2 : pol = 1, phase = 0"
print " mode 3 : pol = 1, phase = 1"
print ""
print " 'filename' should contain data to be sent"
print " to the downstream spi device"
sys.exit()
port = int(sys.argv[1])
mode = int(sys.argv[2])
filename = sys.argv[3]
handle = aa_open(port)
if (handle <= 0):
print "Unable to open Aardvark device on port %d" % port
print "Error code = %d" % handle
sys.exit()
# Ensure that the SPI subsystem is enabled
aa_configure(handle, AA_CONFIG_SPI_I2C)
# Enable the Aardvark adapter's power supply.
# This command is only effective on v2.0 hardware or greater.
# The power pins on the v1.02 hardware are not enabled by default.
aa_target_power(handle, AA_TARGET_POWER_BOTH)
# Setup the clock phase
aa_spi_configure(handle, mode >> 1, mode & 1, AA_SPI_BITORDER_MSB)
# Set the bitrate
bitrate = aa_spi_bitrate(handle, SPI_BITRATE)
print "Bitrate set to %d kHz" % bitrate
blast_bytes(handle, filename)
# Close the device
aa_close(handle)
| oceansystemslab/thrusters_controller | docs/examples/aaspi_file.py | Python | bsd-3-clause | 5,024 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-13 18:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myblog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_from_me', models.TextField()),
('subject', models.CharField(max_length=33)),
('message_from_user', models.TextField()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('link', models.URLField()),
('image', models.ImageField(default=None, upload_to='myblog/image/project')),
('detail', models.TextField()),
('created_on', models.DateTimeField()),
],
),
migrations.CreateModel(
name='SocialSite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(max_length=10)),
('link', models.URLField()),
],
options={
'verbose_name_plural': 'Social Sites',
},
),
]
| NischalLal/Humpty-Dumpty-SriGanesh | myblog/migrations/0002_contact_project_socialsite.py | Python | bsd-3-clause | 1,637 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from ...territori.models import Territorio
class Command(BaseCommand):
help = 'Fix for provincie autonome'
def handle(self, *args, **options):
Territorio.objects.regioni().get(denominazione='TRENTINO-ALTO ADIGE/SUDTIROL').delete()
for name in ['BOLZANO', 'TRENTO']:
territorio = Territorio.objects.provincie().get(denominazione__istartswith=name)
territorio.pk = None
territorio.tipo = Territorio.TIPO.R
territorio.cod_reg = territorio.cod_prov
territorio.cod_prov = None
territorio.denominazione = 'P.A. DI {}'.format(name)
territorio.slug = None
territorio.save()
Territorio.objects.provincie().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
Territorio.objects.comuni().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
| DeppSRL/open-partecipate | project/open_partecipate/management/commands/fix_provincie_autonome.py | Python | bsd-3-clause | 995 |
from django.contrib import admin
from .models import dynamic_models
# Register your models here.
admin.site.register(dynamic_models.values())
| potar/django_test | django_test/dynamic_models/admin.py | Python | bsd-3-clause | 144 |
from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
import compressible_sr.eos as eos
from util import msg
def init_data(my_data, rp):
""" initialize the bubble problem """
msg.bold("initializing the bubble problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in bubble.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
scale_height = rp.get_param("bubble.scale_height")
dens_base = rp.get_param("bubble.dens_base")
dens_cutoff = rp.get_param("bubble.dens_cutoff")
x_pert = rp.get_param("bubble.x_pert")
y_pert = rp.get_param("bubble.y_pert")
r_pert = rp.get_param("bubble.r_pert")
pert_amplitude_factor = rp.get_param("bubble.pert_amplitude_factor")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = dens_cutoff
# set the density to be stratified in the y-direction
myg = my_data.grid
p = myg.scratch_array()
cs2 = scale_height*abs(grav)
for j in range(myg.jlo, myg.jhi+1):
dens[:, j] = max(dens_base*np.exp(-myg.y[j]/scale_height),
dens_cutoff)
if j == myg.jlo:
p[:, j] = dens[:, j]*cs2
else:
p[:, j] = p[:, j-1] + 0.5*myg.dy*(dens[:, j] + dens[:, j-1])*grav
# set the energy (P = cs2*dens)
ener[:, :] = p[:, :]/(gamma - 1.0) + \
0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
r = np.sqrt((myg.x2d - x_pert)**2 + (myg.y2d - y_pert)**2)
idx = r <= r_pert
# boost the specific internal energy, keeping the pressure
# constant, by dropping the density
eint = (ener[idx] - 0.5*(xmom[idx]**2 - ymom[idx]**2)/dens[idx])/dens[idx]
pres = dens[idx]*eint*(gamma - 1.0)
eint = eint*pert_amplitude_factor
dens[idx] = pres/(eint*(gamma - 1.0))
ener[idx] = dens[idx]*eint + 0.5*(xmom[idx]**2 + ymom[idx]**2)/dens[idx]
# p[idx] = pres
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
W = 1 / (np.sqrt(1-(xmom**2-ymom**2)/dens))
dens[:, :] *= W
xmom[:, :] *= rhoh[:, :]/dens*W**2
ymom[:, :] *= rhoh[:, :]/dens*W**2
# HACK: didn't work but W = 1 so shall cheat
ener[:, :] = rhoh[:, :]*W**2 - p - dens[:, :]
# ener[:, :] = p / (gamma-1)
# print(ener[:,myg.jlo:myg.jhi])#*W[:,myg.jlo:myg.jhi]**2)
# exit()
def finalize():
""" print out any information to the user at the end of the run """
pass
| zingale/pyro2 | compressible_sr/problems/bubble.py | Python | bsd-3-clause | 2,964 |
import os
import pygame
import sys
import threading, time
from pygame.locals import *
import logging
log = logging.getLogger('pytality.term.pygame')
log.debug("pygame version: %r", pygame.version.ver)
"""
A mapping of special keycodes into representative strings.
Based off the keymap in WConio, but with 'alt', 'ctrl', and 'shift'
stripped in order to be portable with the other pytality backends.
"""
key_map = {
K_RETURN: 'enter',
K_F1 : 'f1',
K_F2 : 'f2',
K_F3 : 'f3',
K_F4 : 'f4',
K_F5 : 'f5',
K_F6 : 'f6',
K_F7 : 'f7',
K_F8 : 'f8',
K_F9 : 'f9',
K_F10 : 'f10',
K_INSERT : 'ins',
K_DELETE : 'del',
K_HOME : 'home',
K_END : 'end',
K_PAGEDOWN : 'pgdn',
K_PAGEUP : 'pgup',
K_DOWN : 'down',
K_LEFT : 'left',
K_RIGHT : 'right',
K_UP : 'up',
}
#image path
#todo: figure out how I want to make this configurable
if hasattr(sys, 'frozen'):
base_path = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), 'data')
else:
base_path = os.path.join(os.path.dirname(__file__), 'silverlight_html', 'images')
#pixel dimensions of each cell
W = 8
H = 12
#loaded sprite data
sprites = {}
#have we quit?
quit = False
#blinky cursor stuff
cursor_thread = None
replaced_character = None
cursor_x = 0
cursor_y = 0
cursor_type = None
class CursorThread(threading.Thread):
def __init__(self, *args, **kwargs):
super(CursorThread, self).__init__(*args, **kwargs)
self.quitEvent = threading.Event()
def run(self):
blink = True
while True:
blink = not blink
try:
pygame.event.post(pygame.event.Event(USEREVENT, blink=blink))
except pygame.error:
return
if self.quitEvent.wait(timeout=0.5):
break
def init(use_cp437=True, blink=False):
pygame.init()
#There are several kinds of event we are patently not interested in
pygame.event.set_blocked([
MOUSEBUTTONUP,
JOYAXISMOTION, JOYBALLMOTION, JOYHATMOTION, JOYBUTTONDOWN, JOYBUTTONUP,
#we only need KEYDOWN
KEYUP
])
pygame.mouse.set_visible(False)
#prepare the raw_getkey generator
prepare_raw_getkey()
global quit
quit = False
#spawn a blinky-cursor manager
global cursor_thread, replaced_character, cursor_x, cursor_y, cursor_type
cursor_x = 0
cursor_y = 0
replaced_character = None
cursor_type = None
if blink:
cursor_thread = CursorThread()
cursor_thread.daemon = True
cursor_thread.start()
def load_sprites():
if 'bg' in sprites:
#we only need to load once
return
def load_image(key_name, *filepath):
full_path = os.path.join(base_path, *filepath)
surface = pygame.image.load(full_path).convert_alpha()
sprites[key_name] = surface
load_image('bg', 'colors.png')
for color_id in range(16):
load_image(color_id, 'char', '%s.png' % color_id)
def blink_cursor(event):
global replaced_character
if event.blink:
replace_character()
else:
restore_character()
def replace_character():
global replaced_character
if not cursor_type:
return
fg, bg, ch = get_at(cursor_x, cursor_y)
replaced_character = (cursor_x, cursor_y, fg, bg, ch)
new_fg = 15
if bg == 15:
new_fg = 7
blit_at(cursor_x, cursor_y, new_fg, bg, cursor_type)
pygame.display.flip()
def restore_character():
global replaced_character
if not replaced_character:
return
x, y, fg, bg, ch = replaced_character
blit_at(x, y, fg, bg, ch)
pygame.display.flip()
replaced_character = None
#----------------------------------------------------------------------------
#Actual functions
def flip():
#keep the event queue happy
for event in pygame.event.get([
#this should be all the event types we aren't blocking
#and aren't about keyboard input
QUIT,
ACTIVEEVENT,
VIDEORESIZE,
VIDEOEXPOSE,
USEREVENT
]):
if event.type == QUIT:
raise KeyboardInterrupt()
elif event.type == USEREVENT:
blink_cursor(event)
else:
#we don't actually care
pass
#flip the screen
pygame.display.flip()
def clear():
if quit:
return
screen.fill((0, 0, 0))
global cell_data
cell_data = [
[
[0, 0, ' ']
for cell in range(max_x)
]
for row in range(max_y)
]
def resize(width, height):
global screen
screen = pygame.display.set_mode((width*W, height*H))
#we don't use alpha, and turning it off makes it a tad faster
screen.set_alpha(None)
#load the console images to blit later
load_sprites()
#set our max dimensions
global max_x, max_y
max_x, max_y = width, height
clear()
flip()
def reset():
pygame.display.quit()
global quit
quit = True
if cursor_thread:
cursor_thread.quitEvent.set()
cursor_thread.join()
def move_cursor(x, y):
global cursor_x, cursor_y
restore_character()
cursor_x = x
cursor_y = y
replace_character()
def set_title(title):
pygame.display.set_caption(title)
def set_cursor_type(i):
global cursor_type
cursor_map = {
0: None,
1: '_',
2: chr(0xDB)
}
restore_character()
cursor_type = cursor_map[i]
def cache_sprite(fg, bg, ch):
bg_sprite = sprites['bg']
fg_sprite = sprites[fg]
index = ord(ch)
#coordinates on the bg sprite map
bg_x = bg * W
#coordinates on the fg sprite map
fg_x = (index % 16) * W
fg_y = int(index / 16) * H
cell_sprite = pygame.Surface((W, H))
#voodoo: this helps a little bit.
cell_sprite.set_alpha(None)
#blit the background and foreground to the cell
cell_sprite.blit(bg_sprite, dest=(0, 0), area=pygame.Rect(bg_x, 0, W, H))
cell_sprite.blit(fg_sprite, dest=(0, 0), area=pygame.Rect(fg_x, fg_y, W, H))
sprites[(fg, bg, ch)] = cell_sprite
return cell_sprite
def blit_at(x, y, fg, bg, ch):
#blit one character to the screen.
#because function calls are pricey, this is also inlined (ew) in draw_buffer, so the contents are kept short.
#coordinates on the screen
screen_x = x * W
screen_y = y * H
#cache each (bg, fg, index) cell we draw into a surface so it's easier to redraw.
#it's a little bit of a memory waste, and takes longer on the first draw, but we're dealing with ascii here
#so there's probably a lot of reuse.
try:
cell_sprite = sprites[(fg, bg, ch)]
except KeyError:
#make a new one
cell_sprite = cache_sprite(fg, bg, ch)
#blit the cell to the screen
screen.blit(cell_sprite, dest=(screen_x, screen_y))
def draw_buffer(source, start_x, start_y):
"""
render the buffer to our backing.
This is a hotpath, and there's more microoptimization here than i'd like, but FPS is kindof important.
"""
y = start_y
#lookups we can cache into locals
#i know, it's such a microoptimization, but this path qualifies as hot
local_cell_data, local_sprites, local_screen_blit = cell_data, sprites, screen.blit
local_W, local_H = W, H
screen_width, screen_height = max_x, max_y
source_width = source.width
is_overlay = source.is_overlay
for row in source._data:
if y < 0:
y += 1
continue
if y >= screen_height:
break
x = start_x
#do something analogous to row[:source.width]
#but without the pointless copy that requires
w = 0
for fg, bg, ch in row:
if x >= screen_width or w >= source_width:
break
if x >= 0:
#no need to blit if it's already identical
old_data = local_cell_data[y][x]
new_data = [fg, bg, ch]
if new_data != old_data and not (is_overlay and ch == ' '):
#draw it and remember the info for our cache
#this used to call blit_at but now it's inline.
try:
cell_sprite = sprites[(fg, bg, ch)]
except KeyError:
#make a new one
cell_sprite = cache_sprite(fg, bg, ch)
#blit the cell to the screen
local_screen_blit(cell_sprite, dest=(x*local_W, y*local_H))
#remember the info for the cache
local_cell_data[y][x] = new_data
x += 1
w += 1
y += 1
source.dirty = False
return
def get_at(x, y):
if x < 0 or x >= max_x or y < 0 or y >= max_y:
raise ValueError("get_at: Invalid coordinate (%r, %r)" % (x,y))
global cell_data
return cell_data[y][x]
def prepare_raw_getkey():
"""
It looks like pygame fully intends for you to process _all_ keyboard input at the moment you
look at the event queue.
That won't do here. so we turn raw_getkey into a generator.
Worse, pygame.event.wait() can't filter by type and removes the event from the queue,
so we have to keep re-adding events we didn't want in the first place. Ugh.
"""
#this is weird - pygame turns off keyboard repeat by default, which you can re-enable
#by setting a delay in ms, but "what the system normally does" is not an option.
#it seems like 150ms delay and 15 keys-per-second is normalish.
pygame.key.set_repeat(150, 1000 / 15)
global raw_getkey
def translate(event):
if event.type == MOUSEMOTION:
x, y = event.pos
return ("mouse_motion", x / W, y / H)
if event.type == KEYDOWN:
log.debug("key event: %r", event.dict)
if event.key in key_map:
return key_map[event.key]
return event.unicode
if event.type == MOUSEBUTTONDOWN:
x, y = event.pos
return ("mouse_down", x / W, y / H)
def keypump():
items = []
event_types = [MOUSEMOTION, KEYDOWN, MOUSEBUTTONDOWN]
while True:
if not items:
if pygame.event.peek(event_types):
#there's keyboard input pending! great!
items.extend(pygame.event.get(event_types))
else:
#there's no keyboard input pending, so we need to take a nap until there is.
#if we get an event we dont care about, we have to put it back
#but if we put it back, .wait() will give it right back to us
#so we have to keep it around until we find what we want, then re-add it.
#ugh.
ignored_items = []
while True:
item = pygame.event.wait()
if item.type == USEREVENT:
blink_cursor(item)
elif item.type not in event_types:
ignored_items.append(item)
else:
items.append(item)
break
for ignored_item in ignored_items:
pygame.event.post(ignored_item)
yield translate(items.pop(0))
#assign the generator's next() method as raw_getkey
raw_getkey = keypump().next
| jtruscott/ld27 | pytality/term_pygame.py | Python | bsd-3-clause | 11,584 |
"""Benchmark Walk algorithm"""
import numpy as np
import bench
import obsoper.walk
class BenchmarkWalk(bench.Suite):
def setUp(self):
longitudes, latitudes = np.meshgrid([1, 2, 3],
[1, 2, 3],
indexing="ij")
self.fixture = obsoper.walk.Walk.from_lonlats(longitudes,
latitudes)
def bench_detect(self):
for _ in range(10):
self.fixture.detect((2.9, 2.9), i=0, j=0)
| met-office-ocean/obsoper | benchmarks/bench_walk.py | Python | bsd-3-clause | 551 |
"""flatty - marshaller/unmarshaller for light-schema python objects"""
VERSION = (0, 1, 2)
__version__ = ".".join(map(str, VERSION))
__author__ = "Christian Haintz"
__contact__ = "[email protected]"
__homepage__ = "http://packages.python.org/flatty"
__docformat__ = "restructuredtext"
from flatty import *
try:
import mongo
except ImportError:
pass
try:
import couch
except ImportError:
pass
| ceelian/Flatty | src/flatty/__init__.py | Python | bsd-3-clause | 423 |
from django.db import models
SETTING_NAME = (
('conf_space', 'Confluence Space Key'),
('conf_page', 'Confluence Page'),
('jira_project', 'JIRA Project Code Name'),
('github_project', 'GitHub Project'),
)
class AppSettings(models.Model):
name = models.CharField(max_length=50,
primary_key=True,
choices=SETTING_NAME)
content = models.CharField(max_length=255)
class Meta:
verbose_name_plural = "settings"
| pbs/powwow | powwow/apps/models.py | Python | bsd-3-clause | 467 |
#!/usr/bin/env python
import sys
from . import main
if __name__ == '__main__':
sys.exit(main.main())
| jfining/mincss | mincss/__main__.py | Python | bsd-3-clause | 109 |
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Principal search specific constants.
"""
__version__ = "$Revision-Id:$"
# principal search constants
SEARCH_MODE_USER_ONLY = 0
SEARCH_MODE_GROUP_ONLY = 1
SEARCH_MODE_USER_AND_GROUP = 2
# special principals
ALL_PRINCIPAL = "____allprincipal____"
AUTHENTICATED_PRINCIPAL = "____authenticatedprincipal____"
UNAUTHENTICATED_PRINCIPAL = "____unauthenticatedprincipal____"
OWNER_PRINCIPAL = "____ownerprincipal____"
# principal types
USER_PRINCIPAL_TYPE = "____user____"
GROUP_PRINCIPAL_TYPE = "____group____"
| DLR-SC/DataFinder | src/datafinder/persistence/principal_search/constants.py | Python | bsd-3-clause | 2,236 |
<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>Dialog</class>
<widget class="QDialog" name="Dialog">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>400</width>
<height>300</height>
</rect>
</property>
<property name="windowTitle">
<string>Dialog</string>
</property>
<widget class="QDialogButtonBox" name="buttonBox">
<property name="geometry">
<rect>
<x>30</x>
<y>240</y>
<width>341</width>
<height>32</height>
</rect>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="standardButtons">
<set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
</property>
</widget>
<widget class="QListWidget" name="lMetaData">
<property name="geometry">
<rect>
<x>10</x>
<y>10</y>
<width>256</width>
<height>192</height>
</rect>
</property>
</widget>
<widget class="QPushButton" name="bAdd">
<property name="geometry">
<rect>
<x>280</x>
<y>10</y>
<width>115</width>
<height>32</height>
</rect>
</property>
<property name="text">
<string>Add</string>
</property>
</widget>
</widget>
<resources/>
<connections>
<connection>
<sender>buttonBox</sender>
<signal>accepted()</signal>
<receiver>Dialog</receiver>
<slot>accept()</slot>
<hints>
<hint type="sourcelabel">
<x>248</x>
<y>254</y>
</hint>
<hint type="destinationlabel">
<x>157</x>
<y>274</y>
</hint>
</hints>
</connection>
<connection>
<sender>buttonBox</sender>
<signal>rejected()</signal>
<receiver>Dialog</receiver>
<slot>reject()</slot>
<hints>
<hint type="sourcelabel">
<x>316</x>
<y>260</y>
</hint>
<hint type="destinationlabel">
<x>286</x>
<y>274</y>
</hint>
</hints>
</connection>
</connections>
</ui>
| amnona/heatsequer | ui/plotmetadata.py | Python | bsd-3-clause | 1,925 |
import os
import shutil
class BasicOperations_TestClass:
TEST_ROOT =' __test_root__'
def setUp(self):
self.regenerate_root
print(self.TEST_ROOT)
assert os.path.isdir(self.TEST_ROOT)
def tearDown(self):
return True
def test_test(self):
assert self.bar == 1
def regenerate_root(self):
if os.path.isdir(self.TEST_ROOT):
shutil.rmtree(self.TEST_ROOTT)
os.makedirs(self.TEST_ROOT)
| aleksclark/replfs | nosetests/basic_operations_tests.py | Python | bsd-3-clause | 469 |
import Adafruit_BBIO.GPIO as GPIO
import time
a=0
b=0
def derecha(channel):
global a
a+=1
print 'cuenta derecha es {0}'.format(a)
def izquierda(channel):
global b
b+=1
print 'cuenta izquierda es {0}'.format(b)
GPIO.setup("P9_11", GPIO.IN)
GPIO.setup("P9_13", GPIO.IN)
GPIO.add_event_detect("P9_11", GPIO.BOTH)
GPIO.add_event_detect("P9_13", GPIO.BOTH)
GPIO.add_event_callback("P9_11",derecha)
GPIO.add_event_callback("P9_13",izquierda)
#if GPIO.event_detected("GPIO_31"):
# print "event detected"
while True:
print "cosas pasan"
time.sleep(1)
| edwarod/quickbot_bbb | test.py | Python | bsd-3-clause | 571 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.core.cache import cache
from django.test import TestCase
from django.contrib import admin
from physical.tests.factory import DiskOfferingFactory, EnvironmentFactory
from physical.errors import NoDiskOfferingGreaterError, NoDiskOfferingLesserError
from system.models import Configuration
from ..admin.disk_offering import DiskOfferingAdmin
from ..forms.disk_offerring import DiskOfferingForm
from ..models import DiskOffering
LOG = logging.getLogger(__name__)
SEARCH_FIELDS = ('name', )
LIST_FIELDS = ('name', 'size_gb', 'selected_environments')
SAVE_ON_TOP = True
UNICODE_FORMAT = '{}'
class DiskOfferingTestCase(TestCase):
def create_basic_disks(self):
for disk_offering in DiskOffering.objects.all():
for plan in disk_offering.plans.all():
plan.databaseinfras.all().delete()
disk_offering.plans.all().delete()
disk_offering.delete()
cache.clear()
self.bigger = DiskOfferingFactory()
self.bigger.size_kb *= 30
self.bigger.environments.add(self.environment)
self.bigger.save()
self.medium = DiskOfferingFactory()
self.medium.size_kb *= 20
self.medium.environments.add(self.environment)
self.medium.save()
self.smaller = DiskOfferingFactory()
self.smaller.size_kb *= 10
self.smaller.environments.add(self.environment)
self.smaller.save()
def setUp(self):
self.admin = DiskOfferingAdmin(DiskOffering, admin.sites.AdminSite())
self.auto_resize_max_size_in_gb = Configuration(
name='auto_resize_max_size_in_gb', value=100
)
self.auto_resize_max_size_in_gb.save()
self.environment = EnvironmentFactory()
def tearDown(self):
if self.auto_resize_max_size_in_gb.id:
self.auto_resize_max_size_in_gb.delete()
def test_search_fields(self):
self.assertEqual(SEARCH_FIELDS, self.admin.search_fields)
def test_list_fields(self):
self.assertEqual(LIST_FIELDS, self.admin.list_display)
def test_save_position(self):
self.assertEqual(SAVE_ON_TOP, self.admin.save_on_top)
def test_adding_gb_to_kb(self):
disk_offering_form = DiskOfferingForm(
data={
'name': 'disk_offering_small',
'size_gb': 0.5,
'environments': [self.environment.id]
}
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering_form.instance,
form=disk_offering_form, change=None
)
disk_offering = DiskOffering.objects.get(name='disk_offering_small')
self.assertEqual(disk_offering.size_gb(), 0.5)
self.assertEqual(disk_offering.size_kb, 524288)
def test_editing_gb_to_kb(self):
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
self.assertEqual(disk_offering.size_gb(), 1)
self.assertEqual(disk_offering.size_kb, 1048576)
disk_offering_form = DiskOfferingForm(
data={
'name': disk_offering.name,
'size_gb': 1.5,
'environments': [self.environment.id]
},
instance=disk_offering
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering,
form=disk_offering_form, change=None
)
self.assertEqual(disk_offering.size_gb(), 1.5)
self.assertEqual(disk_offering.size_kb, 1572864)
def test_edit_initial_values(self):
disk_offering_form = DiskOfferingForm()
self.assertNotIn('name', disk_offering_form.initial)
self.assertIn('size_gb', disk_offering_form.initial)
self.assertIsNone(disk_offering_form.initial['size_gb'])
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
disk_offering_form = DiskOfferingForm(instance=disk_offering)
self.assertEqual(
disk_offering_form.initial['name'], disk_offering.name
)
self.assertEqual(
disk_offering_form.initial['size_gb'], disk_offering.size_gb()
)
def test_model_sizes(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.size_kb, 1048576)
self.assertEqual(disk_factory.size_gb(), 1.0)
self.assertEqual(disk_factory.size_bytes(), 1073741824)
disk_offering = DiskOffering()
self.assertIsNone(disk_offering.size_kb)
self.assertIsNone(disk_offering.size_gb())
self.assertIsNone(disk_offering.size_bytes())
def test_model_converter(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.converter_kb_to_gb(1572864), 1.5)
self.assertEqual(disk_factory.converter_kb_to_bytes(524288), 536870912)
self.assertEqual(disk_factory.converter_gb_to_kb(0.75), 786432)
self.assertIsNone(disk_factory.converter_kb_to_gb(0))
self.assertIsNone(disk_factory.converter_kb_to_bytes(0))
self.assertIsNone(disk_factory.converter_gb_to_kb(0))
def test_unicode(self):
disk_offering = DiskOffering()
expected_unicode = UNICODE_FORMAT.format(disk_offering.name)
self.assertEqual(expected_unicode, str(disk_offering))
def test_disk_offering_is_in_admin(self):
self.assertIn(DiskOffering, admin.site._registry)
admin_class = admin.site._registry[DiskOffering]
self.assertIsInstance(admin_class, DiskOfferingAdmin)
def test_can_found_greater_disk(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment)
self.assertEqual(self.medium, found)
found = DiskOffering.first_greater_than(
self.medium.size_kb, self.environment)
self.assertEqual(self.bigger, found)
def test_cannot_found_greater_disk(self):
self.create_basic_disks()
self.assertRaises(
NoDiskOfferingGreaterError,
DiskOffering.first_greater_than, self.bigger.size_kb, self.environment
)
def test_can_found_greater_disk_with_exclude(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment, exclude_id=self.medium.id
)
self.assertEqual(self.bigger, found)
def test_can_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb())
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.bigger, found)
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.medium, found)
def test_cannot_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.smaller.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
self.assertRaises(
NoDiskOfferingLesserError,
DiskOffering.last_offering_available_for_auto_resize, self.environment
)
def test_compare_disks(self):
self.create_basic_disks()
self.assertGreater(self.bigger, self.smaller)
self.assertLess(self.smaller, self.bigger)
self.medium_twice = DiskOfferingFactory()
self.medium_twice.size_kb *= 20
self.medium_twice.save()
self.assertEqual(self.medium, self.medium)
self.assertNotEqual(self.medium, self.medium_twice)
self.medium_twice.delete()
def test_disk_is_last_offering(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.medium.size_gb()) + 1
self.auto_resize_max_size_in_gb.save()
self.assertFalse(
self.smaller.is_last_auto_resize_offering(self.environment)
)
self.assertTrue(
self.medium.is_last_auto_resize_offering(self.environment)
)
self.assertFalse(
self.bigger.is_last_auto_resize_offering(self.environment)
)
def test_disk_is_last_offering_without_param(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.delete()
self.assertFalse(
self.smaller.is_last_auto_resize_offering(self.environment)
)
self.assertFalse(
self.medium.is_last_auto_resize_offering(self.environment)
)
self.assertTrue(
self.bigger.is_last_auto_resize_offering(self.environment)
)
| globocom/database-as-a-service | dbaas/physical/tests/test_disk_offering.py | Python | bsd-3-clause | 9,097 |
from __future__ import print_function
from permuta import *
import permstruct
import permstruct.dag
from permstruct import *
from permstruct.dag import taylor_dag
import sys
# -- Example from Kuszmaul paper -- #
# STATUS ================================================ >
task = '1234_1243_2134_2431_4213'
patts = [ Permutation([ int(c) for c in p ]) for p in task.split('_') ]
# patts = [Permutation([5,2,3,4,1]), Permutation([5,3,2,4,1]), Permutation([5,2,4,3,1]), Permutation([3,5,1,4,2]), Permutation([4,2,5,1,3]), Permutation([3,5,1,6,2,4])]
struct(patts, size=6, perm_bound = 8, subpatts_len=4, subpatts_num=3)
# struct(patts, size = 4, verify_bound = 10, ask_verify_higher = True)
| PermutaTriangle/PermStruct | examples/classical_5x4/1234_1243_2134_2431_4213.py | Python | bsd-3-clause | 693 |
import sys
import os
import os.path as op
__version__ = '2.0.5'
from cfchecker.cfchecks import getargs, CFChecker
def cfchecks_main():
"""cfchecks_main is based on the main program block in cfchecks.py
"""
(badc,coards,uploader,useFileName,standardName,areaTypes,udunitsDat,version,files)=getargs(sys.argv)
inst = CFChecker(uploader=uploader, useFileName=useFileName, badc=badc, coards=coards, cfStandardNamesXML=standardName, cfAreaTypesXML=areaTypes, udunitsDat=udunitsDat, version=version)
for file in files:
rc = inst.checker(file)
sys.exit (rc)
| RosalynHatcher/CFChecker | src/cfchecker/__init__.py | Python | bsd-3-clause | 598 |
"""fitsdiff is now a part of Astropy.
Now this module just provides a wrapper around astropy.io.fits.diff for backwards
compatibility with the old interface in case anyone uses it.
"""
import os
import sys
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.scripts.fitsdiff import log, main
def fitsdiff(input1, input2, comment_excl_list='', value_excl_list='',
field_excl_list='', maxdiff=10, delta=0.0, neglect_blanks=True,
output=None):
if isinstance(comment_excl_list, str):
comment_excl_list = list_parse(comment_excl_list)
if isinstance(value_excl_list, str):
value_excl_list = list_parse(value_excl_list)
if isinstance(field_excl_list, str):
field_excl_list = list_parse(field_excl_list)
diff = FITSDiff(input1, input2, ignore_keywords=value_excl_list,
ignore_comments=comment_excl_list,
ignore_fields=field_excl_list, numdiffs=maxdiff,
tolerance=delta, ignore_blanks=neglect_blanks)
if output is None:
output = sys.stdout
diff.report(output)
return diff.identical
def list_parse(name_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')]
if __name__ == "__main__":
sys.exit(main())
| spacetelescope/stsci.tools | lib/stsci/tools/fitsdiff.py | Python | bsd-3-clause | 1,825 |
"""
Model class that unites theory with data.
"""
import logging
logger = logging.getLogger('Model_mod')
import copy
import scipy
import SloppyCell
import SloppyCell.Residuals as Residuals
import SloppyCell.Collections as Collections
import SloppyCell.Utility as Utility
from . import KeyedList_mod as KeyedList_mod
KeyedList = KeyedList_mod.KeyedList
_double_epsilon_ = scipy.finfo(scipy.float_).eps
class Model:
"""
A Model object connects a set of experimental data with the objects used to
model that data.
Most importantly, a Model can calculate a cost for a given set of
parameters, characterizing how well those parameters fit the data contained
within the model.
"""
imag_cutoff = 1e-8
def __init__(self, expts, calcs):
"""
expts A sequence of Experiments to be fit to.
calcs A sequence of calculation objects referred to by the
Experiments.
"""
self.calcVals = {}
self.calcSensitivityVals = {}
self.internalVars = {}
self.internalVarsDerivs = {}
self.residuals = KeyedList()
if isinstance(expts, list):
expts = Collections.ExperimentCollection(expts)
elif isinstance(expts, dict):
expts = Collections.ExperimentCollection(expts.values())
self.SetExperimentCollection(expts)
if isinstance(calcs, list):
calcs = Collections.CalculationCollection(calcs)
elif isinstance(calcs, dict):
calcs = Collections.CalculationCollection(calcs.values())
self.SetCalculationCollection(calcs)
self.observers = KeyedList()
self.parameter_bounds = {}
def compile(self):
"""
Compile all the calculations contained within the Model.
"""
for calc in self.get_calcs().values():
calc.compile()
def copy(self):
return copy.deepcopy(self)
def get_params(self):
"""
Return a copy of the current model parameters
"""
return self.calcColl.GetParameters()
def get_ICs(self):
"""
Get the initial conditions currently present in a model
for dynamic variables that are not assigned variables.
Outputs:
KeyedList with keys (calcName,varName) --> initialValue
"""
ics=KeyedList()
for calcName, calc in self.calcColl.items():
for varName in calc.dynamicVars.keys():
if varName in calc.assignedVars.keys(): continue
ics.set( (calcName,varName), calc.get_var_ic(varName))
return ics
def set_ICs(self, ics):
"""
Sets the initial conditions into the model. Uses the input
format defined by 'getICs'.
Inputs:
ics -- Initial conditions to set in KeyedList form:
keys: (calcName, varName) --> intialValue
Outputs:
None
"""
for (calcName, varName), initialValue in ics.items():
self.calcColl.get(calcName).set_var_ic(varName, initialValue)
def _evaluate(self, params, T=1):
"""
Evaluate the cost for the model, returning the intermediate residuals,
and chi-squared.
(Summing up the residuals is a negligible amount of work. This
arrangment makes notification of observers much simpler.)
"""
self.params.update(params)
self.check_parameter_bounds(params)
self.CalculateForAllDataPoints(params)
self.ComputeInternalVariables(T)
resvals = [res.GetValue(self.calcVals, self.internalVars, self.params)
for res in self.residuals.values()]
# Occasionally it's useful to use residuals with a sqrt(-1) in them,
# to get negative squares. Then, however, we might get small imaginary
# parts in our results, which this shaves off.
chisq = scipy.real_if_close(scipy.sum(scipy.asarray(resvals)**2),
tol=self.imag_cutoff)
if scipy.isnan(chisq):
logger.warn('Chi^2 is NaN, converting to Infinity.')
chisq = scipy.inf
cost = 0.5 * chisq
entropy = 0
for expt, sf_ents in self.internalVars['scaleFactor_entropies'].items():
for group, ent in sf_ents.items():
entropy += ent
self._notify(event = 'evaluation',
resvals = resvals,
chisq = chisq,
cost = cost,
free_energy = cost-T*entropy,
entropy = entropy,
params = self.params)
return resvals, chisq, cost, entropy
def res(self, params):
"""
Return the residual values of the model fit given a set of parameters
"""
return self._evaluate(params)[0]
def res_log_params(self, log_params):
"""
Return the residual values given the logarithm of the parameters
"""
return self.res(scipy.exp(log_params))
def res_dict(self, params):
"""
Return the residual values of the model fit given a set of parameters
in dictionary form.
"""
return dict(zip(self.residuals.keys(), self.res(params)))
def chisq(self, params):
"""
Return the sum of the squares of the residuals for the model
"""
return self._evaluate(params)[1]
def redchisq(self, params):
"""
Return chi-squared divided by the number of degrees of freedom
Question: Are priors to be included in the N data points?
How do scale factors change the number of d.o.f.?
"""
return self.chisq(params)/(len(self.residuals) - len(self.params))
def cost(self, params):
"""
Return the cost (1/2 chisq) of the model
"""
return self._evaluate(params)[2]
def cost_log_params(self, log_params):
"""
Return the cost given the logarithm of the input parameters
"""
return self.cost(scipy.exp(log_params))
def free_energy(self, params, T):
temp, temp, c, entropy = self._evaluate(params, T=T)
return c - T * entropy
def _notify(self, **args):
"""
Call all observers with the given arguments.
"""
for obs in self.observers:
obs(**args)
def attach_observer(self, obs_key, observer):
"""
Add an observer to be notified by this Model.
"""
self.observers.set(obs_key, observer)
def detach_observer(self, obs_key):
"""
Remove an observer from the Model.
"""
self.observers.remove_by_key(obs_key)
def get_observers(self):
"""
Return the KeyedList of observers for this model.
"""
return self.observers
def reset_observers(self):
"""
Call reset() for all attached observers.
"""
for obs in self.observers:
if hasattr(obs, 'reset'):
obs.reset()
resDict = res_dict
# ...
def AddResidual(self, res):
self.residuals.setByKey(res.key, res)
def Force(self, params, epsf, relativeScale=False, stepSizeCutoff=None):
"""
Force(parameters, epsilon factor) -> list
Returns a list containing the numerical gradient of the cost with
respect to each parameter (in the parameter order of the
CalculationCollection). Each element of the gradient is:
cost(param + eps) - cost(param - eps)/(2 * eps).
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
force = []
params = scipy.array(params)
if stepSizeCutoff==None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale is True:
eps = epsf * abs(params)
else:
eps = epsf * scipy.ones(len(params),scipy.float_)
for i in range(0,len(eps)):
if eps[i] < stepSizeCutoff:
eps[i] = stepSizeCutoff
for index, param in enumerate(params):
paramsPlus = params.copy()
paramsPlus[index] = param + eps[index]
costPlus = self.cost(paramsPlus)
paramsMinus = params.copy()
paramsMinus[index] = param - eps[index]
costMinus = self.cost(paramsMinus)
force.append((costPlus-costMinus)/(2.0*eps[index]))
return force
def gradient_sens(self, params):
"""
Return the gradient of the cost, d_cost/d_param as a KeyedList.
This method uses sensitivity integration, so it only applies to
ReactionNetworks.
"""
self.params.update(params)
# The cost is 0.5 * sum(res**2),
# so the gradient is sum(res * dres_dp)
jac_dict = self.jacobian_sens(params)
res_dict = self.res_dict(params)
force = scipy.zeros(len(params), scipy.float_)
for res_key, res_val in res_dict.items():
res_derivs = jac_dict.get(res_key)
force += res_val * scipy.asarray(res_derivs)
gradient = self.params.copy()
gradient.update(force)
return gradient
def gradient_log_params_sens(self, log_params):
"""
Return the gradient of the cost wrt log parameters, d_cost/d_log_param
as a KeyedList.
This method uses sensitivity integration, so it only applies to
ReactionNetworks.
"""
# We just need to multiply dcost_dp by p.
params = scipy.exp(log_params)
gradient = self.gradient_sens(params)
gradient_log = gradient.copy()
gradient_log.update(scipy.asarray(gradient) * scipy.asarray(params))
return gradient_log
def CalculateForAllDataPoints(self, params):
"""
CalculateForAllDataPoints(parameters) -> dictionary
Gets a dictionary of measured independent variables indexed by
calculation from the ExperimentCollection and passes it to the
CalculationCollection. The returned dictionary is of the form:
dictionary[experiment][calculation][dependent variable]
[independent variabled] -> calculated value.
"""
self.params.update(params)
varsByCalc = self.GetExperimentCollection().GetVarsByCalc()
self.calcVals = self.GetCalculationCollection().Calculate(varsByCalc,
params)
return self.calcVals
def CalculateSensitivitiesForAllDataPoints(self, params):
"""
CalculateSensitivitiesForAllDataPoints(parameters) -> dictionary
Gets a dictionary of measured independent variables indexed by
calculation from the ExperimentCollection and passes it to the
CalculationCollection. The returned dictionary is of the form:
dictionary[experiment][calculation][dependent variable]
[independent variabled][parameter] -> sensitivity.
"""
varsByCalc = self.GetExperimentCollection().GetVarsByCalc()
self.calcVals, self.calcSensitivityVals =\
self.GetCalculationCollection().CalculateSensitivity(varsByCalc,
params)
return self.calcSensitivityVals
def ComputeInternalVariables(self, T=1):
sf, sf_ents = self.compute_scale_factors(T)
self.internalVars['scaleFactors'] = sf
self.internalVars['scaleFactor_entropies'] = sf_ents
def compute_scale_factors(self, T):
"""
Compute the scale factors for the current parameters and return a dict.
The dictionary is of the form dict[exptId][varId] = scale_factor
"""
scale_factors = {}
scale_factor_entropies = {}
for exptId, expt in self.GetExperimentCollection().items():
scale_factors[exptId], scale_factor_entropies[exptId] =\
self._compute_sf_and_sfent_for_expt(expt, T)
return scale_factors, scale_factor_entropies
def _compute_sf_and_sfent_for_expt(self, expt, T):
# Compute the scale factors for a given experiment
scale_factors = {}
scale_factor_entropies = {}
exptData = expt.GetData()
expt_integral_data = expt.GetIntegralDataSets()
fixed_sf = expt.get_fixed_sf()
sf_groups = expt.get_sf_groups()
for group in sf_groups:
# Do any of the variables in this group have fixed scale factors?
fixed = set(group).intersection(set(fixed_sf.keys()))
fixedAt = set([fixed_sf[var] for var in fixed])
# We'll need to index the scale factor entropies on the *group*
# that shares a scale factor, since we only have one entropy per
# shared scale factor. So we need to index on the group of
# variables. We sort the group and make it hashable to avoid any
# double-counting.
hash_group = expt._hashable_group(group)
if len(fixedAt) == 1:
value = fixedAt.pop()
for var in group:
scale_factors[var] = value
scale_factor_entropies[hash_group] = 0
continue
elif len(fixedAt) > 1:
raise ValueError('Shared scale factors fixed at '
'inconsistent values in experiment '
'%s!' % expt.GetName())
# Finally, compute the scale factor for this group
theoryDotData, theoryDotTheory = 0, 0
# For discrete data
for calc in exptData:
# Pull out the vars we have measured for this calculation
for var in set(group).intersection(set(exptData[calc].keys())):
for indVar, (data, error) in exptData[calc][var].items():
theory = self.calcVals[calc][var][indVar]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
# Now for integral data
for dataset in expt_integral_data:
calc = dataset['calcKey']
theory_traj = self.calcVals[calc]['full trajectory']
data_traj = dataset['trajectory']
uncert_traj = dataset['uncert_traj']
interval = dataset['interval']
T = interval[1] - interval[0]
for var in group.intersection(set(dataset['vars'])):
TheorDotT = self._integral_theorytheory(var, theory_traj,
uncert_traj,
interval)
theoryDotTheory += TheorDotT/T
TheorDotD = self._integral_theorydata(var, theory_traj,
data_traj,
uncert_traj,
interval)
theoryDotData += TheorDotD/T
# Now for the extrema data
for ds in expt.scaled_extrema_data:
calc = ds['calcKey']
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
data, error = ds['val'], ds['sigma']
theory = self.calcVals[calc][var]\
[ds['minTime'],ds['maxTime']][1]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
for var in group:
if theoryDotTheory != 0:
scale_factors[var] = theoryDotData/theoryDotTheory
else:
scale_factors[var] = 1
entropy = expt.compute_sf_entropy(hash_group, theoryDotTheory,
theoryDotData, T)
scale_factor_entropies[hash_group] = entropy
return scale_factors, scale_factor_entropies
def _integral_theorytheory(self, var, theory_traj, uncert_traj, interval):
def integrand(t):
theory = theory_traj.evaluate_interpolated_traj(var, t)
uncert = uncert_traj.evaluate_interpolated_traj(var, t)
return theory**2/uncert**2
val, error = scipy.integrate.quad(integrand, interval[0], interval[1],
limit=int(1e5))
return val
def _integral_theorydata(self, var, theory_traj, data_traj, uncert_traj,
interval):
def integrand(t):
theory = theory_traj.evaluate_interpolated_traj(var, t)
data = data_traj.evaluate_interpolated_traj(var, t)
uncert = uncert_traj.evaluate_interpolated_traj(var, t)
return theory*data/uncert**2
val, error = scipy.integrate.quad(integrand, interval[0], interval[1],
limit=int(1e5))
return val
def ComputeInternalVariableDerivs(self):
"""
compute_scale_factorsDerivs() -> dictionary
Returns the scale factor derivatives w.r.t. parameters
appropriate for each chemical in each
experiment, given the current parameters. The returned dictionary
is of the form: internalVarsDerivs['scaleFactors'] \
= dict[experiment][chemical][parametername] -> derivative.
"""
self.internalVarsDerivs['scaleFactors'] = {}
p = self.GetCalculationCollection().GetParameters()
for exptName, expt in self.GetExperimentCollection().items():
self.internalVarsDerivs['scaleFactors'][exptName] = {}
exptData = expt.GetData()
# Get the dependent variables measured in this experiment
exptDepVars = set()
for calc in exptData:
exptDepVars.update(set(expt.GetData()[calc].keys()))
# Now for the extrema data
for ds in expt.scaled_extrema_data:
exptDepVars.add(ds['var'])
for depVar in exptDepVars:
self.internalVarsDerivs['scaleFactors'][exptName][depVar] = {}
if depVar in expt.GetFixedScaleFactors():
for pname in p.keys():
self.internalVarsDerivs['scaleFactors'][exptName]\
[depVar][pname] = 0.0
continue
theoryDotData, theoryDotTheory = 0, 0
for calc in exptData:
if depVar in exptData[calc].keys():
for indVar, (data, error)\
in exptData[calc][depVar].items():
theory = self.calcVals[calc][depVar][indVar]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
for ds in expt.scaled_extrema_data:
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
data, error = ds['val'], ds['sigma']
theory = self.calcVals[ds['calcKey']][var]\
[ds['minTime'],ds['maxTime']][1]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
# now get derivative of the scalefactor
for pname in p.keys():
theorysensDotData, theorysensDotTheory = 0, 0
for calc in exptData:
clc = self.calcColl.get(calc)
if depVar in exptData[calc].keys():
for indVar, (data, error)\
in exptData[calc][depVar].items():
theory = self.calcVals[calc][depVar][indVar]
# Default to 0 if sensitivity not calculated for
# that parameter (i.e. it's not in the
# Calculation)
theorysens = self.calcSensitivityVals[calc][depVar][indVar].get(pname, 0.0)
theorysensDotData += (theorysens * data) / error**2
theorysensDotTheory += (theorysens * theory) / error**2
for ds in expt.scaled_extrema_data:
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
theory = self.calcVals[ds['calcKey']][var]\
[ds['minTime'],ds['maxTime']][1]
data, error = ds['val'], ds['sigma']
theorysens = self.calcSensitivityVals[ds['calcKey']][var][ds['minTime'],ds['maxTime']].get(pname, 0.0)
theorysensDotData += (theorysens * data) / error**2
theorysensDotTheory += (theorysens * theory) / error**2
deriv_dict = self.internalVarsDerivs['scaleFactors'][exptName][depVar]
try:
deriv_dict[pname] = theorysensDotData/theoryDotTheory\
- 2*theoryDotData*theorysensDotTheory/(theoryDotTheory)**2
except ZeroDivisionError:
deriv_dict[pname] = 0
return self.internalVarsDerivs['scaleFactors']
def jacobian_log_params_sens(self, log_params):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
the lograithms of the parameters parameters.
The method uses the sensitivity integration. As such, it will only
work with ReactionNetworks.
The KeyedList is of the form:
kl.get(resId) = [dres/dlogp1, dres/dlogp2...]
"""
params = scipy.exp(log_params)
j = self.jacobian_sens(params)
j_log = j.copy()
j_log.update(scipy.asarray(j) * scipy.asarray(params))
return j_log
def jacobian_sens(self, params):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses the sensitivity integration. As such, it will only
work with ReactionNetworks.
The KeyedList is of the form:
kl[resId] = [dres/dp1, dres/dp2...]
"""
self.params.update(params)
# Calculate sensitivities
self.CalculateSensitivitiesForAllDataPoints(params)
self.ComputeInternalVariables()
self.ComputeInternalVariableDerivs()
# Calculate residual derivatives
deriv = [(resId, res.Dp(self.calcVals, self.calcSensitivityVals,
self.internalVars, self.internalVarsDerivs,
self.params))
for (resId, res) in self.residuals.items()]
return KeyedList(deriv)
def jacobian_fd(self, params, eps,
relativeScale=False, stepSizeCutoff=None):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses finite differences.
Inputs:
params -- Parameters about which to calculate the jacobian
eps -- Step size to take, may be vector or scalar.
relativeScale -- If true, the eps is taken to be the fractional
change in parameter to use in finite differences.
stepSizeCutoff -- Minimum step size to take.
"""
res = self.resDict(params)
orig_vals = scipy.array(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale:
eps_l = scipy.maximum(eps * abs(params), stepSizeCutoff)
else:
eps_l = scipy.maximum(eps * scipy.ones(len(params),scipy.float_),
stepSizeCutoff)
J = KeyedList() # will hold the result
for resId in res.keys():
J.set(resId, [])
# Two-sided finite difference
for ii in range(len(params)):
params[ii] = orig_vals[ii] + eps_l[ii]
resPlus = self.resDict(params)
params[ii] = orig_vals[ii] - eps_l[ii]
resMinus = self.resDict(params)
params[ii] = orig_vals[ii]
for resId in res.keys():
res_deriv = (resPlus[resId]-resMinus[resId])/(2.*eps_l[ii])
J.get(resId).append(res_deriv)
# NOTE: after call to ComputeResidualsWithScaleFactors the Model's
# parameters get updated, must reset this:
self.params.update(params)
return J
def GetJacobian(self,params):
"""
GetJacobian(parameters) -> dictionary
Gets a dictionary of the sensitivities at the time points of
the independent variables for the measured dependent variables
for each calculation and experiment.
Form:
dictionary[(experiment,calculation,dependent variable,
independent variable)] -> result
result is a vector of length number of parameters containing
the sensitivity at that time point, in the order of the ordered
parameters
"""
return self.jacobian_sens(params)
def Jacobian(self, params, epsf, relativeScale=False, stepSizeCutoff=None):
"""
Finite difference the residual dictionary to get a dictionary
for the Jacobian. It will be indexed the same as the residuals.
Note: epsf is either a scalar or an array.
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
return self.jacobian_fd(params, epsf,
relativeScale, stepSizeCutoff)
def GetJandJtJ(self,params):
j = self.GetJacobian(params)
mn = scipy.zeros((len(params),len(params)),scipy.float_)
for paramind in range(0,len(params)):
for paramind1 in range(0,len(params)):
sum = 0.0
for kys in j.keys():
sum = sum + j.get(kys)[paramind]*j.get(kys)[paramind1]
mn[paramind][paramind1] = sum
return j,mn
def GetJandJtJInLogParameters(self,params):
# Formula below is exact if you have perfect data. If you don't
# have perfect data (residuals != 0) you get an extra term when you
# compute d^2(cost)/(dlogp[i]dlogp[j]) which is
# sum_resname (residual[resname] * jac[resname][j] * delta_jk * p[k])
# but can be ignored when residuals are zeros, and maybe should be
# ignored altogether because it can make the Hessian approximation
# non-positive definite
pnolog = scipy.exp(params)
jac, jtj = self.GetJandJtJ(pnolog)
for i in range(len(params)):
for j in range(len(params)):
jtj[i][j] = jtj[i][j]*pnolog[i]*pnolog[j]
res = self.resDict(pnolog)
for resname in self.residuals.keys():
for j in range(len(params)):
# extra term --- not including it
# jtj[j][j] += res[resname]*jac[resname][j]*pnolog[j]
jac.get(resname)[j] = jac.get(resname)[j]*pnolog[j]
return jac,jtj
def hessian_elem(self, func, f0, params, i, j, epsi, epsj,
relativeScale, stepSizeCutoff, verbose):
"""
Return the second partial derivative for func w.r.t. parameters i and j
f0: The value of the function at params
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
"""
origPi, origPj = params[i], params[j]
if relativeScale:
# Steps sizes are given by eps*the value of the parameter,
# but the minimum step size is stepSizeCutoff
hi, hj = scipy.maximum((epsi*abs(origPi), epsj*abs(origPj)),
(stepSizeCutoff, stepSizeCutoff))
else:
hi, hj = epsi, epsj
if i == j:
params[i] = origPi + hi
fp = func(params)
params[i] = origPi - hi
fm = func(params)
element = (fp - 2*f0 + fm)/hi**2
else:
## f(xi + hi, xj + h)
params[i] = origPi + hi
params[j] = origPj + hj
fpp = func(params)
## f(xi + hi, xj - hj)
params[i] = origPi + hi
params[j] = origPj - hj
fpm = func(params)
## f(xi - hi, xj + hj)
params[i] = origPi - hi
params[j] = origPj + hj
fmp = func(params)
## f(xi - hi, xj - hj)
params[i] = origPi - hi
params[j] = origPj - hj
fmm = func(params)
element = (fpp - fpm - fmp + fmm)/(4 * hi * hj)
params[i], params[j] = origPi, origPj
self._notify(event = 'hessian element', i = i, j = j,
element = element)
if verbose:
print('hessian[%i, %i] = %g' % (i, j, element))
return element
def hessian(self, params, epsf, relativeScale = True,
stepSizeCutoff = None, jacobian = None,
verbose = False):
"""
Returns the hessian of the model.
epsf: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
jacobian: If the jacobian is passed, it will be used to estimate
the step size to take.
vebose: If True, a message will be printed with each hessian element
calculated
"""
nOv = len(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
params = scipy.asarray(params)
if relativeScale:
eps = epsf * abs(params)
else:
eps = epsf * scipy.ones(len(params),scipy.float_)
# Make sure we don't take steps smaller than stepSizeCutoff
eps = scipy.maximum(eps, stepSizeCutoff)
if jacobian is not None:
# Turn off the relative scaling since that would overwrite all this
relativeScale = False
jacobian = scipy.asarray(jacobian)
if len(jacobian.shape) == 0:
resDict = self.resDict(params)
new_jacobian = scipy.zeros(len(params),scipy.float_)
for key, value in resDict.items():
new_jacobian += 2.0*value*scipy.array(jacobian[0][key])
jacobian = new_jacobian
elif len(jacobian.shape) == 2: # Need to sum up the total jacobian
residuals = scipy.asarray(self.res(params))
# Changed by rng7. I'm not sure what is meant by "sum up the
# total jacobian". The following line failed due to shape
# mismatch. From the context below, it seems that the dot
# product is appropriate.
#jacobian = 2.0*residuals*jacobian
jacobian = 2.0 * scipy.dot(residuals, jacobian)
# If parameters are independent, then
# epsilon should be (sqrt(2)*J[i])^-1
factor = 1.0/scipy.sqrt(2)
for i in range(nOv):
if jacobian[i] == 0.0:
eps[i] = 0.5*abs(params[i])
else:
# larger than stepSizeCutoff, but not more than
# half of the original parameter value
eps[i] = min(max(factor/abs(jacobian[i]), stepSizeCutoff),
0.5*abs(params[i]))
## compute cost at f(x)
f0 = self.cost(params)
hess = scipy.zeros((nOv, nOv), scipy.float_)
## compute all (numParams*(numParams + 1))/2 unique hessian elements
for i in range(nOv):
for j in range(i, nOv):
hess[i][j] = self.hessian_elem(self.cost, f0,
params, i, j,
eps[i], eps[j],
relativeScale, stepSizeCutoff,
verbose)
hess[j][i] = hess[i][j]
return hess
def hessian_log_params(self, params, eps,
relativeScale=False, stepSizeCutoff=1e-6,
verbose=False):
"""
Returns the hessian of the model in log parameters.
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
vebose: If True, a message will be printed with each hessian element
calculated
"""
nOv = len(params)
if scipy.isscalar(eps):
eps = scipy.ones(len(params), scipy.float_) * eps
## compute cost at f(x)
f0 = self.cost_log_params(scipy.log(params))
hess = scipy.zeros((nOv, nOv), scipy.float_)
## compute all (numParams*(numParams + 1))/2 unique hessian elements
for i in range(nOv):
for j in range(i, nOv):
hess[i][j] = self.hessian_elem(self.cost_log_params, f0,
scipy.log(params),
i, j, eps[i], eps[j],
relativeScale, stepSizeCutoff,
verbose)
hess[j][i] = hess[i][j]
return hess
def CalcHessianInLogParameters(self, params, eps, relativeScale = False,
stepSizeCutoff = 1e-6, verbose = False):
return self.hessian_log_params(params, eps, relativeScale,
stepSizeCutoff, verbose)
def CalcHessian(self, params, epsf, relativeScale = True,
stepSizeCutoff = None, jacobian = None, verbose = False):
"""
Finite difference the residual dictionary to get a dictionary
for the Hessian. It will be indexed the same as the residuals.
Note: epsf is either a scalar or an array.
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
return self.hessian(params, epsf, relativeScale,
stepSizeCutoff, jacobian, verbose)
def CalcResidualResponseArray(self, j, h):
"""
Calculate the Residual Response array. This array represents the change
in a residual obtained by a finite change in a data value.
Inputs:
(self, j, h)
j -- jacobian matrix to use
h -- hessian matrix to use
Outputs:
response -- The response array
"""
j,h = scipy.asarray(j), scipy.asarray(h)
[m,n] = j.shape
response = scipy.zeros((m,m),scipy.float_)
ident = scipy.eye(m,typecode=scipy.float_)
hinv = scipy.linalg.pinv2(h,1e-40)
tmp = scipy.dot(hinv,scipy.transpose(j))
tmp2 = scipy.dot(j,tmp)
response = ident - tmp2
return response
def CalcParameterResponseToResidualArray(self,j,h):
"""
Calculate the parameter response to residual array. This array
represents the change in parameter resulting from a change in data
(residual).
Inputs:
(self, j, h)
j -- jacobian matrix to use
h -- hessian matrix to use
Outputs:
response -- The response array
"""
j,h = scipy.asarray(j), scipy.asarray(h)
[m,n] = j.shape
response = scipy.zeros((n,m),scipy.float_)
hinv = scipy.linalg.pinv2(h,1e-40)
response = -scipy.dot(hinv,scipy.transpose(j))
return response
############################################################################
# Getting/Setting variables below
def SetExperimentCollection(self, exptColl):
self.exptColl = exptColl
for exptKey, expt in exptColl.items():
exptData = expt.GetData()
for calcKey, calcData in exptData.items():
for depVarKey, depVarData in calcData.items():
sortedData = depVarData.items()
sortedData.sort()
for indVar, (value, uncert) in sortedData:
resName = (exptKey, calcKey, depVarKey, indVar)
res = Residuals.ScaledErrorInFit(resName, depVarKey,
calcKey, indVar, value,
uncert, exptKey)
self.residuals.setByKey(resName, res)
# Add in the PeriodChecks
for period in expt.GetPeriodChecks():
calcKey, depVarKey, indVarValue = period['calcKey'], \
period['depVarKey'], period['startTime']
resName = (exptKey, calcKey, depVarKey, indVarValue,
'PeriodCheck')
res = Residuals.PeriodCheckResidual(resName, calcKey, depVarKey,
indVarValue,
period['period'],
period['sigma'])
self.residuals.setByKey(resName, res)
# Add in the AmplitudeChecks
for amplitude in expt.GetAmplitudeChecks():
calcKey, depVarKey = amplitude['calcKey'], \
amplitude['depVarKey']
indVarValue0, indVarValue1 = amplitude['startTime'],\
amplitude['testTime']
resName = (exptKey, calcKey, depVarKey, indVarValue0,
indVarValue1, 'AmplitudeCheck')
res = Residuals.AmplitudeCheckResidual(resName, calcKey,
depVarKey, indVarValue0,
indVarValue1,
amplitude['period'],
amplitude['sigma'],
exptKey)
self.residuals.setByKey(resName, res)
# Add in the integral data
for ds in expt.GetIntegralDataSets():
for var in ds['vars']:
resName = (exptKey, ds['calcKey'], var, 'integral data')
res = Residuals.IntegralDataResidual(resName, var,
exptKey,
ds['calcKey'],
ds['trajectory'],
ds['uncert_traj'],
ds['interval'])
self.residuals.setByKey(resName, res)
for ds in expt.scaled_extrema_data:
ds['exptKey'] = expt.name
ds['key'] = '%s_%simum_%s_%s' % (ds['var'], ds['type'],
str(ds['minTime']),
str(ds['maxTime']))
res = Residuals.ScaledExtremum(**ds)
self.AddResidual(res)
def get_expts(self):
return self.exptColl
def set_var_optimizable(self, var, is_optimizable):
for calc in self.get_calcs().values():
try:
calc.set_var_optimizable(var, is_optimizable)
except KeyError:
pass
self.params = self.calcColl.GetParameters()
GetExperimentCollection = get_expts
def SetCalculationCollection(self, calcColl):
self.calcColl = calcColl
self.params = calcColl.GetParameters()
def get_calcs(self):
return self.calcColl
GetCalculationCollection = get_calcs
def GetScaleFactors(self):
return self.internalVars['scaleFactors']
def GetResiduals(self):
return self.residuals
def GetCalculatedValues(self):
return self.calcVals
def GetInternalVariables(self):
return self.internalVars
def add_parameter_bounds(self, param_id, pmin, pmax):
"""
Add bounds on a specific parameter.
Cost evaluations will raise an exception if these bounds are violated.
"""
self.parameter_bounds[param_id] = pmin, pmax
def check_parameter_bounds(self, params):
self.params.update(params)
for id, (pmin, pmax) in self.parameter_bounds.items():
if not pmin <= self.params.get(id) <= pmax:
err = 'Parameter %s has value %f, which is outside of given bounds %f to %f.' % (id, self.params.get(id), pmin, pmax)
raise Utility.SloppyCellException(err)
| GutenkunstLab/SloppyCell | SloppyCell/Model_mod.py | Python | bsd-3-clause | 42,681 |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""basic_modules defines basic VisTrails Modules that are used in most
pipelines."""
from __future__ import division
import vistrails.core.cache.hasher
from vistrails.core.debug import format_exception
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.vistrails_module import Module, new_module, \
Converter, NotCacheable, ModuleError
from vistrails.core.modules.config import ConstantWidgetConfig, \
QueryWidgetConfig, ParamExpWidgetConfig, ModuleSettings, IPort, OPort, \
CIPort
import vistrails.core.system
from vistrails.core.utils import InstanceObject
from vistrails.core import debug
from abc import ABCMeta
from ast import literal_eval
from itertools import izip
import mimetypes
import os
import pickle
import re
import shutil
import zipfile
import urllib
try:
import hashlib
sha_hash = hashlib.sha1
except ImportError:
import sha
sha_hash = sha.new
###############################################################################
version = '2.1.1'
name = 'Basic Modules'
identifier = 'org.vistrails.vistrails.basic'
old_identifiers = ['edu.utah.sci.vistrails.basic']
constant_config_path = "vistrails.gui.modules.constant_configuration"
query_config_path = "vistrails.gui.modules.query_configuration"
paramexp_config_path = "vistrails.gui.modules.paramexplore"
def get_port_name(port):
if hasattr(port, 'name'):
return port.name
else:
return port[0]
class meta_add_value_ports(type):
def __new__(cls, name, bases, dct):
"""This metaclass adds the 'value' input and output ports.
"""
mod = type.__new__(cls, name, bases, dct)
if '_input_ports' in mod.__dict__:
input_ports = mod._input_ports
if not any(get_port_name(port_info) == 'value'
for port_info in input_ports):
mod._input_ports = [('value', mod)]
mod._input_ports.extend(input_ports)
else:
mod._input_ports = [('value', mod)]
if '_output_ports' in mod.__dict__:
output_ports = mod._output_ports
if not any(get_port_name(port_info) == 'value'
for port_info in output_ports):
mod._output_ports = [('value', mod)]
mod._output_ports.extend(output_ports)
else:
mod._output_ports = [('value', mod)]
return mod
class Constant(Module):
"""Base class for all Modules that represent a constant value of
some type.
When implementing your own constant, You have to adhere to the
following interface:
Implement the following methods:
translate_to_python(x): Given a string, translate_to_python
must return a python value that will be the value seen by the
execution modules.
For example, translate_to_python called on a float parameter
with value '3.15' will return float('3.15').
translate_to_string(): Return a string representation of the
current constant, which will eventually be passed to
translate_to_python.
validate(v): return True if given python value is a plausible
value for the constant. It should be implemented such that
validate(translate_to_python(x)) == True for all valid x
A constant must also expose its default value, through the field
default_value.
There are fields you are not allowed to use in your constant classes.
These are: 'id', 'interpreter', 'logging' and 'change_parameter'
You can also define the constant's own GUI widget.
See core/modules/constant_configuration.py for details.
"""
_settings = ModuleSettings(abstract=True)
_output_ports = [OPort("value_as_string", "String")]
__metaclass__ = meta_add_value_ports
@staticmethod
def validate(x):
raise NotImplementedError
@staticmethod
def translate_to_python(x):
raise NotImplementedError
def compute(self):
"""Constant.compute() only checks validity (and presence) of
input value."""
v = self.get_input("value")
b = self.validate(v)
if not b:
raise ModuleError(self, "Internal Error: Constant failed validation")
self.set_output("value", v)
self.set_output("value_as_string", self.translate_to_string(v))
def setValue(self, v):
self.set_output("value", self.translate_to_python(v))
self.upToDate = True
@staticmethod
def translate_to_string(v):
return str(v)
@staticmethod
def get_widget_class():
# return StandardConstantWidget
return None
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '!=':
return (value_a != value_b)
return False
def new_constant(name, py_conversion=None, default_value=None, validation=None,
widget_type=None,
str_conversion=None, base_class=Constant,
compute=None, query_compute=None):
"""new_constant(name: str,
py_conversion: callable,
default_value: python_type,
validation: callable,
widget_type: (path, name) tuple or QWidget type,
str_conversion: callable,
base_class: class,
compute: callable,
query_compute: static callable) -> Module
new_constant dynamically creates a new Module derived from
Constant with given py_conversion and str_conversion functions, a
corresponding python type and a widget type. py_conversion is a
python callable that takes a string and returns a python value of
the type that the class should hold. str_conversion does the reverse.
This is the quickest way to create new Constant Modules."""
d = {}
if py_conversion is not None:
d["translate_to_python"] = py_conversion
elif base_class == Constant:
raise ValueError("Must specify translate_to_python for constant")
if validation is not None:
d["validate"] = validation
elif base_class == Constant:
raise ValueError("Must specify validation for constant")
if default_value is not None:
d["default_value"] = default_value
if str_conversion is not None:
d['translate_to_string'] = str_conversion
if compute is not None:
d['compute'] = compute
if query_compute is not None:
d['query_compute'] = query_compute
if widget_type is not None:
@staticmethod
def get_widget_class():
return widget_type
d['get_widget_class'] = get_widget_class
m = new_module(base_class, name, d)
m._input_ports = [('value', m)]
m._output_ports = [('value', m)]
return m
class Boolean(Constant):
_settings = ModuleSettings(
constant_widget='%s:BooleanWidget' % constant_config_path)
default_value = False
@staticmethod
def translate_to_python(x):
s = x.upper()
if s == 'TRUE':
return True
if s == 'FALSE':
return False
raise ValueError('Boolean from String in VisTrails should be either '
'"true" or "false", got "%s" instead' % x)
@staticmethod
def validate(x):
return isinstance(x, bool)
class Float(Constant):
_settings = ModuleSettings(constant_widgets=[
QueryWidgetConfig('%s:NumericQueryWidget' % query_config_path),
ParamExpWidgetConfig('%s:FloatExploreWidget' % paramexp_config_path)])
default_value = 0.0
@staticmethod
def translate_to_python(x):
return float(x)
@staticmethod
def validate(x):
return isinstance(x, (int, long, float))
@staticmethod
def query_compute(value_a, value_b, query_method):
value_a = float(value_a)
value_b = float(value_b)
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '<':
return (value_a < value_b)
elif query_method == '>':
return (value_a > value_b)
elif query_method == '<=':
return (value_a <= value_b)
elif query_method == '>=':
return (value_a >= value_b)
class Integer(Float):
_settings = ModuleSettings(constant_widgets=[
QueryWidgetConfig('%s:NumericQueryWidget' % query_config_path),
ParamExpWidgetConfig('%s:IntegerExploreWidget' % paramexp_config_path)])
default_value = 0
@staticmethod
def translate_to_python(x):
if x.startswith('0x'):
return int(x, 16)
else:
return int(x)
@staticmethod
def validate(x):
return isinstance(x, (int, long))
class String(Constant):
_settings = ModuleSettings(
configure_widget="vistrails.gui.modules.string_configure:TextConfigurationWidget",
constant_widgets=[
ConstantWidgetConfig('%s:MultiLineStringWidget' % constant_config_path,
widget_type='multiline'),
QueryWidgetConfig('%s:StringQueryWidget' % query_config_path)])
_output_ports = [OPort("value_as_string", "String", optional=True)]
default_value = ""
@staticmethod
def translate_to_python(x):
assert isinstance(x, (str, unicode))
return str(x)
@staticmethod
def validate(x):
return isinstance(x, str)
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '*[]*' or query_method is None:
return (value_b in value_a)
elif query_method == '==':
return (value_a == value_b)
elif query_method == '=~':
try:
m = re.match(value_b, value_a)
if m is not None:
return (m.end() ==len(value_a))
except re.error:
pass
return False
##############################################################################
# Rich display for IPython
try:
from IPython import display
except ImportError:
display = None
class PathObject(object):
def __init__(self, name):
self.name = name
self._ipython_repr = None
def __repr__(self):
return "PathObject(%r)" % self.name
__str__ = __repr__
def __getattr__(self, name):
if name.startswith('_repr_') and name.endswith('_'):
if self._ipython_repr is None:
filetype, encoding = mimetypes.guess_type(self.name)
if filetype and filetype.startswith('image/'):
self._ipython_repr = display.Image(filename=self.name)
else:
self._ipython_repr = False
if self._ipython_repr is not False:
return getattr(self._ipython_repr, name)
raise AttributeError
class Path(Constant):
_settings = ModuleSettings(constant_widget=("%s:PathChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "Path"),
IPort("name", "String", optional=True)]
_output_ports = [OPort("value", "Path")]
@staticmethod
def translate_to_python(x):
return PathObject(x)
@staticmethod
def translate_to_string(x):
return str(x.name)
@staticmethod
def validate(v):
return isinstance(v, PathObject)
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
Path.default_value = PathObject('')
def path_parameter_hasher(p):
def get_mtime(path):
t = int(os.path.getmtime(path))
if os.path.isdir(path):
for subpath in os.listdir(path):
subpath = os.path.join(path, subpath)
if os.path.isdir(subpath):
t = max(t, get_mtime(subpath))
return t
h = vistrails.core.cache.hasher.Hasher.parameter_signature(p)
try:
# FIXME: This will break with aliases - I don't really care that much
t = get_mtime(p.strValue)
except OSError:
return h
hasher = sha_hash()
hasher.update(h)
hasher.update(str(t))
return hasher.digest()
class File(Path):
"""File is a VisTrails Module that represents a file stored on a
file system local to the machine where VisTrails is running."""
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:FileChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "File"),
IPort("create_file", "Boolean", optional=True)]
_output_ports = [OPort("value", "File"),
OPort("local_filename", "String", optional=True)]
def compute(self):
n = self.get_name()
if (self.has_input("create_file") and self.get_input("create_file")):
vistrails.core.system.touch(n)
if not os.path.isfile(n):
raise ModuleError(self, 'File %r does not exist' % n)
self.set_results(n)
self.set_output("local_filename", n)
class Directory(Path):
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:DirectoryChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "Directory"),
IPort("create_directory", "Boolean", optional=True)]
_output_ports = [OPort("value", "Directory"),
OPort("itemList", "List")]
def compute(self):
n = self.get_name()
if (self.has_input("create_directory") and
self.get_input("create_directory")):
try:
vistrails.core.system.mkdir(n)
except Exception, e:
raise ModuleError(self, 'mkdir: %s' % format_exception(e))
if not os.path.isdir(n):
raise ModuleError(self, 'Directory "%s" does not exist' % n)
self.set_results(n)
dir_list = os.listdir(n)
output_list = []
for item in dir_list:
full_path = os.path.join(n, item)
output_list.append(PathObject(full_path))
self.set_output('itemList', output_list)
##############################################################################
class OutputPath(Path):
_settings = ModuleSettings(constant_widget=("%s:OutputPathChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "OutputPath")]
_output_ports = [OPort("value", "OutputPath")]
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
class FileSink(NotCacheable, Module):
"""FileSink takes a file and writes it to a user-specified
location in the file system. The file is stored at location
specified by the outputPath. The overwrite flag allows users to
specify whether an existing path should be overwritten."""
_input_ports = [IPort("file", File),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True,
default=True),
IPort("publishFile", Boolean, optional=True)]
def compute(self):
input_file = self.get_input("file")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.isfile(full_path):
if self.get_input('overwrite'):
try:
os.remove(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(self, msg)
else:
raise ModuleError(self,
"Could not copy file to '%s': file already "
"exists")
try:
vistrails.core.system.link_or_copy(input_file.name, full_path)
except OSError, e:
msg = "Could not create file '%s': %s" % (full_path, e)
raise ModuleError(self, msg)
if (self.has_input("publishFile") and
self.get_input("publishFile") or
not self.has_input("publishFile")):
if self.moduleInfo.has_key('extra_info'):
if self.moduleInfo['extra_info'].has_key('pathDumpCells'):
folder = self.moduleInfo['extra_info']['pathDumpCells']
base_fname = os.path.basename(full_path)
(base_fname, file_extension) = os.path.splitext(base_fname)
base_fname = os.path.join(folder, base_fname)
# make a unique filename
filename = base_fname + file_extension
counter = 2
while os.path.exists(filename):
filename = base_fname + "_%d%s" % (counter,
file_extension)
counter += 1
try:
vistrails.core.system.link_or_copy(input_file.name, filename)
except OSError, e:
msg = "Could not publish file '%s' \n on '%s':" % (
full_path, filename)
# I am not sure whether we should raise an error
# I will just print a warning for now (Emanuele)
debug.warning("%s" % msg, e)
class DirectorySink(NotCacheable, Module):
"""DirectorySink takes a directory and writes it to a
user-specified location in the file system. The directory is
stored at location specified by the outputPath. The overwrite
flag allows users to specify whether an existing path should be
overwritten."""
_input_ports = [IPort("dir", Directory),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True, default="True")]
def compute(self):
input_dir = self.get_input("dir")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.exists(full_path):
if self.get_input("overwrite"):
try:
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(
self,
'%s\n%s' % (msg, format_exception(e)))
else:
msg = ('Could not write to existing path "%s" '
'(overwrite off)' % full_path)
raise ModuleError(self, msg)
try:
shutil.copytree(input_dir.name, full_path)
except OSError, e:
msg = 'Could not copy path from "%s" to "%s"' % \
(input_dir.name, full_path)
raise ModuleError(self, '%s\n%s' % (msg, format_exception(e)))
##############################################################################
class WriteFile(Converter):
"""Writes a String to a temporary File.
"""
_input_ports = [IPort('in_value', String),
IPort('suffix', String, optional=True, default=""),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', File)]
def compute(self):
contents = self.get_input('in_value')
suffix = self.force_get_input('suffix', '')
result = self.interpreter.filePool.create_file(suffix=suffix)
if self.has_input('encoding'):
contents = contents.decode('utf-8') # VisTrails uses UTF-8
# internally (I hope)
contents = contents.encode(self.get_input('encoding'))
with open(result.name, 'wb') as fp:
fp.write(contents)
self.set_output('out_value', result)
class ReadFile(Converter):
"""Reads a File to a String.
"""
_input_ports = [IPort('in_value', File),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', String)]
def compute(self):
filename = self.get_input('in_value').name
with open(filename, 'rb') as fp:
contents = fp.read()
if self.has_input('encoding'):
contents = contents.decode(self.get_input('encoding'))
contents = contents.encode('utf-8') # VisTrails uses UTF-8
# internally (for now)
self.set_output('out_value', contents)
##############################################################################
class Color(Constant):
# We set the value of a color object to be an InstanceObject that
# contains a tuple because a tuple would be interpreted as a
# type(tuple) which messes with the interpreter
_settings = ModuleSettings(constant_widgets=[
'%s:ColorWidget' % constant_config_path,
ConstantWidgetConfig('%s:ColorEnumWidget' % \
constant_config_path,
widget_type='enum'),
QueryWidgetConfig('%s:ColorQueryWidget' % \
query_config_path),
ParamExpWidgetConfig('%s:RGBExploreWidget' % \
paramexp_config_path,
widget_type='rgb'),
ParamExpWidgetConfig('%s:HSVExploreWidget' % \
paramexp_config_path,
widget_type='hsv')])
_input_ports = [IPort("value", "Color")]
_output_ports = [OPort("value", "Color")]
default_value = InstanceObject(tuple=(1,1,1))
@staticmethod
def translate_to_python(x):
return InstanceObject(
tuple=tuple([float(a) for a in x.split(',')]))
@staticmethod
def translate_to_string(v):
return ','.join('%f' % c for c in v.tuple)
@staticmethod
def validate(x):
return isinstance(x, InstanceObject) and hasattr(x, 'tuple')
@staticmethod
def to_string(r, g, b):
return "%s,%s,%s" % (r,g,b)
@staticmethod
def query_compute(value_a, value_b, query_method):
# SOURCE: http://www.easyrgb.com/index.php?X=MATH
def rgb_to_xyz(r, g, b):
# r,g,b \in [0,1]
if r > 0.04045:
r = ( ( r + 0.055 ) / 1.055 ) ** 2.4
else:
r = r / 12.92
if g > 0.04045:
g = ( ( g + 0.055 ) / 1.055 ) ** 2.4
else:
g = g / 12.92
if b > 0.04045:
b = ( ( b + 0.055 ) / 1.055 ) ** 2.4
else:
b = b / 12.92
r *= 100
g *= 100
b *= 100
# Observer. = 2 deg, Illuminant = D65
x = r * 0.4124 + g * 0.3576 + b * 0.1805
y = r * 0.2126 + g * 0.7152 + b * 0.0722
z = r * 0.0193 + g * 0.1192 + b * 0.9505
return (x,y,z)
def xyz_to_cielab(x,y,z):
# Observer= 2 deg, Illuminant= D65
ref_x, ref_y, ref_z = (95.047, 100.000, 108.883)
x /= ref_x
y /= ref_y
z /= ref_z
if x > 0.008856:
x = x ** ( 1/3.0 )
else:
x = ( 7.787 * x ) + ( 16 / 116.0 )
if y > 0.008856:
y = y ** ( 1/3.0 )
else:
y = ( 7.787 * y ) + ( 16 / 116.0 )
if z > 0.008856:
z = z ** ( 1/3.0 )
else:
z = ( 7.787 * z ) + ( 16 / 116.0 )
L = ( 116 * y ) - 16
a = 500 * ( x - y )
b = 200 * ( y - z )
return (L, a, b)
def rgb_to_cielab(r,g,b):
return xyz_to_cielab(*rgb_to_xyz(r,g,b))
value_a_rgb = (float(a) for a in value_a.split(','))
value_b_rgb = (float(b) for b in value_b.split(','))
value_a_lab = rgb_to_cielab(*value_a_rgb)
value_b_lab = rgb_to_cielab(*value_b_rgb)
# cie76 difference
diff = sum((v_1 - v_2) ** 2
for v_1, v_2 in izip(value_a_lab, value_b_lab)) ** (0.5)
# print "CIE 76 DIFFERENCE:", diff
if query_method is None:
query_method = '2.3'
return diff < float(query_method)
##############################################################################
class StandardOutput(NotCacheable, Module):
"""StandardOutput is a VisTrails Module that simply prints the
value connected on its port to standard output. It is intended
mostly as a debugging device."""
_input_ports = [IPort("value", 'Variant')]
def compute(self):
v = self.get_input("value")
if isinstance(v, PathObject):
try:
fp = open(v.name, 'rb')
except IOError:
print v
else:
try:
CHUNKSIZE = 2048
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
while len(chunk) == CHUNKSIZE:
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
sys.stdout.write('\n')
finally:
fp.close()
else:
print v
##############################################################################
# Tuple will be reasonably magic right now. We'll integrate it better
# with vistrails later.
class Tuple(Module):
"""Tuple represents a tuple of values. Tuple might not be well
integrated with the rest of VisTrails, so don't use it unless
you know what you're doing."""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:TupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.input_ports_order = []
self.values = tuple()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
def compute(self):
values = tuple([self.get_input(p)
for p in self.input_ports_order])
self.values = values
self.set_output("value", values)
class Untuple(Module):
"""Untuple takes a tuple and returns the individual values. It
reverses the actions of Tuple.
"""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:UntupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.output_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.output_ports_order = [p.name for p in module.output_port_specs]
# output_ports are reversed for display purposes...
self.output_ports_order.reverse()
def compute(self):
if self.has_input("tuple"):
tuple = self.get_input("tuple")
values = tuple.values
else:
values = self.get_input("value")
for p, value in izip(self.output_ports_order, values):
self.set_output(p, value)
##############################################################################
class ConcatenateString(Module):
"""ConcatenateString takes many strings as input and produces the
concatenation as output. Useful for constructing filenames, for
example.
This class will probably be replaced with a better API in the
future."""
fieldCount = 4
_input_ports = [IPort("str%d" % i, "String")
for i in xrange(1, 1 + fieldCount)]
_output_ports = [OPort("value", "String")]
def compute(self):
result = "".join(self.force_get_input('str%d' % i, '')
for i in xrange(1, 1 + self.fieldCount))
self.set_output('value', result)
##############################################################################
class Not(Module):
"""Not inverts a Boolean.
"""
_input_ports = [IPort('input', 'Boolean')]
_output_ports = [OPort('value', 'Boolean')]
def compute(self):
value = self.get_input('input')
self.set_output('value', not value)
##############################################################################
# List
# If numpy is available, we consider numpy arrays to be lists as well
class ListType(object):
__metaclass__ = ABCMeta
ListType.register(list)
try:
import numpy
except ImportError:
numpy = None
else:
ListType.register(numpy.ndarray)
class List(Constant):
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.list_configuration:ListConfigurationWidget")
_input_ports = [IPort("value", "List"),
IPort("head", "Variant", depth=1),
IPort("tail", "List")]
_output_ports = [OPort("value", "List")]
default_value = []
def __init__(self):
Constant.__init__(self)
self.input_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
@staticmethod
def validate(x):
return isinstance(x, ListType)
@staticmethod
def translate_to_python(v):
return literal_eval(v)
@staticmethod
def translate_to_string(v, dims=None):
if dims is None:
if numpy is not None and isinstance(v, numpy.ndarray):
dims = v.ndim
else:
dims = 1
if dims == 1:
return '[%s]' % ', '.join(repr(c)
for c in v)
else:
return '[%s]' % ', '.join(List.translate_to_string(c, dims-1)
for c in v)
def compute(self):
head, middle, items, tail = [], [], [], []
got_value = False
if self.has_input('value'):
# run the regular compute here
Constant.compute(self)
middle = self.outputPorts['value']
got_value = True
if self.has_input('head'):
head = self.get_input('head')
got_value = True
if self.input_ports_order:
items = [self.get_input(p)
for p in self.input_ports_order]
got_value = True
if self.has_input('tail'):
tail = self.get_input('tail')
got_value = True
if not got_value:
self.get_input('value')
self.set_output('value', head + middle + items + tail)
##############################################################################
# Dictionary
class Dictionary(Constant):
default_value = {}
_input_ports = [CIPort("addPair", "Module, Module"),
IPort("addPairs", "List")]
@staticmethod
def translate_to_python(v):
return literal_eval(v)
@staticmethod
def validate(x):
return isinstance(x, dict)
def compute(self):
d = {}
if self.has_input('value'):
Constant.compute(self)
d.update(self.outputPorts['value'])
if self.has_input('addPair'):
pairs_list = self.get_input_list('addPair')
d.update(pairs_list)
if self.has_input('addPairs'):
d.update(self.get_input('addPairs'))
self.set_output("value", d)
##############################################################################
# TODO: Null should be a subclass of Constant?
class Null(Module):
"""Null is the class of None values."""
_settings = ModuleSettings(hide_descriptor=True)
def compute(self):
self.set_output("value", None)
##############################################################################
class Unpickle(Module):
"""Unpickles a string.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('input', 'String')]
_output_ports = [OPort('result', 'Variant')]
def compute(self):
value = self.get_input('input')
self.set_output('result', pickle.loads(value))
##############################################################################
class CodeRunnerMixin(object):
def __init__(self):
self.output_ports_order = []
super(CodeRunnerMixin, self).__init__()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.output_ports_order = [p.name for p in module.output_port_specs]
# output_ports are reversed for display purposes...
self.output_ports_order.reverse()
def run_code(self, code_str,
use_input=False,
use_output=False):
"""run_code runs a piece of code as a VisTrails module.
use_input and use_output control whether to use the inputport
and output port dictionary as local variables inside the
execution."""
import vistrails.core.packagemanager
def fail(msg):
raise ModuleError(self, msg)
def cache_this():
self.is_cacheable = lambda *args, **kwargs: True
locals_ = locals()
if use_input:
for k in self.inputPorts:
locals_[k] = self.get_input(k)
if use_output:
for output_portname in self.output_ports_order:
if output_portname not in self.inputPorts:
locals_[output_portname] = None
_m = vistrails.core.packagemanager.get_package_manager()
reg = get_module_registry()
locals_.update({'fail': fail,
'package_manager': _m,
'cache_this': cache_this,
'registry': reg,
'self': self})
if 'source' in locals_:
del locals_['source']
# Python 2.6 needs code to end with newline
exec code_str + '\n' in locals_, locals_
if use_output:
for k in self.output_ports_order:
if locals_.get(k) is not None:
self.set_output(k, locals_[k])
##############################################################################
class PythonSource(CodeRunnerMixin, NotCacheable, Module):
"""PythonSource is a Module that executes an arbitrary piece of
Python code.
It is especially useful for one-off pieces of 'glue' in a
pipeline.
If you want a PythonSource execution to fail, call
fail(error_message).
If you want a PythonSource execution to be cached, call
cache_this().
"""
_settings = ModuleSettings(
configure_widget=("vistrails.gui.modules.python_source_configure:"
"PythonSourceConfigurationWidget"))
_input_ports = [IPort('source', 'String', optional=True, default="")]
_output_pors = [OPort('self', 'Module')]
def compute(self):
s = urllib.unquote(str(self.get_input('source')))
self.run_code(s, use_input=True, use_output=True)
##############################################################################
def zip_extract_file(archive, filename_in_archive, output_filename):
z = zipfile.ZipFile(archive)
try:
fileinfo = z.getinfo(filename_in_archive) # Might raise KeyError
output_dirname, output_filename = os.path.split(output_filename)
fileinfo.filename = output_filename
z.extract(fileinfo, output_dirname)
finally:
z.close()
def zip_extract_all_files(archive, output_path):
z = zipfile.ZipFile(archive)
try:
z.extractall(output_path)
finally:
z.close()
class Unzip(Module):
"""Unzip extracts a file from a ZIP archive."""
_input_ports = [IPort('archive_file', 'File'),
IPort('filename_in_archive', 'String')]
_output_ports = [OPort('file', 'File')]
def compute(self):
self.check_input("archive_file")
self.check_input("filename_in_archive")
filename_in_archive = self.get_input("filename_in_archive")
archive_file = self.get_input("archive_file")
if not os.path.isfile(archive_file.name):
raise ModuleError(self, "archive file does not exist")
suffix = self.interpreter.filePool.guess_suffix(filename_in_archive)
output = self.interpreter.filePool.create_file(suffix=suffix)
zip_extract_file(archive_file.name,
filename_in_archive,
output.name)
self.set_output("file", output)
class UnzipDirectory(Module):
"""UnzipDirectory extracts every file from a ZIP archive."""
_input_ports = [IPort('archive_file', 'File')]
_output_ports = [OPort('directory', 'Directory')]
def compute(self):
self.check_input("archive_file")
archive_file = self.get_input("archive_file")
if not os.path.isfile(archive_file.name):
raise ModuleError(self, "archive file does not exist")
output = self.interpreter.filePool.create_directory()
zip_extract_all_files(archive_file.name,
output.name)
self.set_output("directory", output)
##############################################################################
class Round(Converter):
"""Turns a Float into an Integer.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('in_value', 'Float'),
IPort('floor', 'Boolean', optional=True, default="True")]
_output_ports = [OPort('out_value', 'Integer')]
def compute(self):
fl = self.get_input('in_value')
floor = self.get_input('floor')
if floor:
integ = int(fl) # just strip the decimals
else:
integ = int(fl + 0.5) # nearest
self.set_output('out_value', integ)
class TupleToList(Converter):
"""Turns a Tuple into a List.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('in_value', 'Variant')]
_output_ports = [OPort('out_value', 'List')]
@classmethod
def can_convert(cls, sub_descs, super_descs):
if len(sub_descs) <= 1:
return False
reg = get_module_registry()
return super_descs == [reg.get_descriptor(List)]
def compute(self):
tu = self.get_input('in_value')
if not isinstance(tu, tuple):
raise ModuleError(self, "Input is not a tuple")
self.set_output('out_value', list(tu))
##############################################################################
class Variant(Module):
"""
Variant is tracked internally for outputing a variant type on
output port. For input port, Module type should be used
"""
_settings = ModuleSettings(abstract=True)
##############################################################################
class Generator(object):
"""
Used to keep track of list iteration, it will execute a module once for
each input in the list/generator.
"""
_settings = ModuleSettings(abstract=True)
generators = []
def __init__(self, size=None, module=None, generator=None, port=None,
accumulated=False):
self.module = module
self.generator = generator
self.port = port
self.size = size
self.accumulated = accumulated
if generator and module not in Generator.generators:
# add to global list of generators
# they will be topologically ordered
module.generator = generator
Generator.generators.append(module)
def next(self):
""" return next value - the generator """
value = self.module.get_output(self.port)
if isinstance(value, Generator):
value = value.all()
return value
def all(self):
""" exhausts next() for Streams
"""
items = []
item = self.next()
while item is not None:
items.append(item)
item = self.next()
return items
@staticmethod
def stream():
""" executes all generators until inputs are exhausted
this makes sure branching and multiple sinks are executed correctly
"""
result = True
if not Generator.generators:
return
while result is not None:
for g in Generator.generators:
result = g.next()
Generator.generators = []
##############################################################################
class Assert(Module):
"""
Assert is a simple module that conditionally stops the execution.
"""
_input_ports = [IPort('condition', 'Boolean')]
def compute(self):
condition = self.get_input('condition')
if not condition:
raise ModuleError(self, "Assert: condition is False",
abort=True)
class AssertEqual(Module):
"""
AssertEqual works like Assert but compares two values.
It is provided for convenience.
"""
_input_ports = [IPort('value1', 'Variant'),
IPort('value2', 'Variant')]
def compute(self):
values = (self.get_input('value1'),
self.get_input('value2'))
if values[0] != values[1]:
reprs = tuple(repr(v) for v in values)
reprs = tuple('%s...' % v[:17] if len(v) > 20 else v
for v in reprs)
raise ModuleError(self, "AssertEqual: values are different: "
"%r, %r" % reprs,
abort=True)
##############################################################################
class StringFormat(Module):
"""
Builds a string from objects using Python's str.format().
"""
_settings = ModuleSettings(configure_widget=
'vistrails.gui.modules.stringformat_configuration:'
'StringFormatConfigurationWidget')
_input_ports = [IPort('format', String)]
_output_ports = [OPort('value', String)]
@staticmethod
def list_placeholders(fmt):
placeholders = set()
nb = 0
i = 0
n = len(fmt)
while i < n:
if fmt[i] == '{':
i += 1
if fmt[i] == '{': # KeyError:
i += 1
continue
e = fmt.index('}', i) # KeyError
f = e
for c in (':', '!', '[', '.'):
c = fmt.find(c, i)
if c != -1:
f = min(f, c)
if i == f:
nb += 1
else:
arg = fmt[i:f]
try:
arg = int(arg)
except ValueError:
placeholders.add(arg)
else:
nb = max(nb, arg + 1)
i = e
i += 1
return nb, placeholders
def compute(self):
fmt = self.get_input('format')
args, kwargs = StringFormat.list_placeholders(fmt)
f_args = [self.get_input('_%d' % n)
for n in xrange(args)]
f_kwargs = dict((n, self.get_input(n))
for n in kwargs)
self.set_output('value', fmt.format(*f_args, **f_kwargs))
##############################################################################
def init_constant(m):
reg = get_module_registry()
reg.add_module(m)
reg.add_input_port(m, "value", m)
reg.add_output_port(m, "value", m)
_modules = [Module, Converter, Constant, Boolean, Float, Integer, String, List,
Path, File, Directory, OutputPath,
FileSink, DirectorySink, WriteFile, ReadFile, StandardOutput,
Tuple, Untuple, ConcatenateString, Not, Dictionary, Null, Variant,
Unpickle, PythonSource, Unzip, UnzipDirectory, Color,
Round, TupleToList, Assert, AssertEqual, StringFormat]
def initialize(*args, **kwargs):
# initialize the sub_module modules, too
import vistrails.core.modules.sub_module
import vistrails.core.modules.output_modules
_modules.extend(vistrails.core.modules.sub_module._modules)
_modules.extend(vistrails.core.modules.output_modules._modules)
def handle_module_upgrade_request(controller, module_id, pipeline):
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler
reg = get_module_registry()
def outputName_remap(old_conn, new_module):
ops = []
old_src_module = pipeline.modules[old_conn.source.moduleId]
op_desc = reg.get_descriptor(OutputPath)
new_x = (old_src_module.location.x + new_module.location.x) / 2.0
new_y = (old_src_module.location.y + new_module.location.y) / 2.0
op_module = \
controller.create_module_from_descriptor(op_desc, new_x, new_y)
ops.append(('add', op_module))
create_new_connection = UpgradeWorkflowHandler.create_new_connection
new_conn_1 = create_new_connection(controller,
old_src_module,
old_conn.source,
op_module,
"name")
ops.append(('add', new_conn_1))
new_conn_2 = create_new_connection(controller,
op_module,
"value",
new_module,
"outputPath")
ops.append(('add', new_conn_2))
return ops
module_remap = {'FileSink':
[(None, '1.6', None,
{'dst_port_remap':
{'overrideFile': 'overwrite',
'outputName': outputName_remap},
'function_remap':
{'overrideFile': 'overwrite',
'outputName': 'outputPath'}})],
'GetItemsFromDirectory':
[(None, '1.6', 'Directory',
{'dst_port_remap':
{'dir': 'value'},
'src_port_remap':
{'itemlist': 'itemList'},
})],
'InputPort':
[(None, '1.6', None,
{'dst_port_remap': {'old_name': None}})],
'OutputPort':
[(None, '1.6', None,
{'dst_port_remap': {'old_name': None}})],
'PythonSource':
[(None, '1.6', None, {})],
'Tuple':
[(None, '2.1.1', None, {})],
'StandardOutput':
[(None, '2.1.1', None, {})],
'List':
[(None, '2.1.1', None, {})],
'AssertEqual':
[(None, '2.1.1', None, {})],
'Converter':
[(None, '2.1.1', None, {})],
}
return UpgradeWorkflowHandler.remap_module(controller, module_id, pipeline,
module_remap)
###############################################################################
class NewConstant(Constant):
"""
A new Constant module to be used inside the FoldWithModule module.
"""
def setValue(self, v):
self.set_output("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def get_module(value, signature=None):
"""
Creates a module for value, in order to do the type checking.
"""
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
if isinstance(value, list):
return List
elif isinstance(value, tuple):
# Variant supports signatures of any length
if signature is None or \
(len(signature) == 1 and signature[0][0] == Variant):
return (Variant,)*len(value)
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]),)
if None in v_modules: # Identification failed
return None
return v_modules
else: # pragma: no cover
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside "
"iterated module.")
return None
###############################################################################
import sys
import unittest
class TestConcatenateString(unittest.TestCase):
@staticmethod
def concatenate(**kwargs):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(ConcatenateString, 'value') as results:
errors = execute([
('ConcatenateString', 'org.vistrails.vistrails.basic', [
(name, [('String', value)])
for name, value in kwargs.iteritems()
]),
])
if errors:
return None
return results
def test_concatenate(self):
"""Concatenates strings"""
self.assertEqual(self.concatenate(
str1="hello ", str2="world"),
["hello world"])
self.assertEqual(self.concatenate(
str3="hello world"),
["hello world"])
self.assertEqual(self.concatenate(
str2="hello ", str4="world"),
["hello world"])
self.assertEqual(self.concatenate(
str1="hello", str3=" ", str4="world"),
["hello world"])
def test_empty(self):
"""Runs ConcatenateString with no input"""
self.assertEqual(self.concatenate(), [""])
class TestNot(unittest.TestCase):
def run_pipeline(self, functions):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(Not, 'value') as results:
errors = execute([
('Not', 'org.vistrails.vistrails.basic',
functions),
])
return errors, results
def test_true(self):
errors, results = self.run_pipeline([
('input', [('Boolean', 'True')])])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
self.assertIs(results[0], False)
def test_false(self):
errors, results = self.run_pipeline([
('input', [('Boolean', 'False')])])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
self.assertIs(results[0], True)
def test_notset(self):
errors, results = self.run_pipeline([])
self.assertTrue(errors)
class TestList(unittest.TestCase):
@staticmethod
def build_list(value=None, head=None, tail=None):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(List, 'value') as results:
functions = []
def add(n, v, t):
if v is not None:
for e in v:
functions.append(
(n, [(t, e)])
)
add('value', value, 'List')
add('head', head, 'String')
add('tail', tail, 'List')
errors = execute([
('List', 'org.vistrails.vistrails.basic', functions),
])
if errors:
return None
# List is a Constant, so the interpreter will set the result 'value'
# from the 'value' input port automatically
# Ignore these first results
return results[-1]
def test_simple(self):
"""Tests the default ports of the List module"""
self.assertEqual(self.build_list(
value=['["a", "b", "c"]']),
["a", "b", "c"])
self.assertEqual(self.build_list(
head=["d"],
value=['["a", "b", "c"]']),
["d", "a", "b", "c"])
self.assertEqual(self.build_list(
head=["d"],
value=['["a", "b", "c"]'],
tail=['["e", "f"]']),
["d", "a", "b", "c", "e", "f"])
self.assertEqual(self.build_list(
value=['[]'],
tail=['[]']),
[])
def test_multiple(self):
"""Tests setting multiple values on a port"""
# Multiple values on 'head'
self.assertEqual(self.build_list(
head=["a", "b"]),
["a", "b"])
self.assertEqual(self.build_list(
head=["a", "b"],
value=['["c", "d"]']),
["a", "b", "c", "d"])
# Multiple values on 'value'
res = self.build_list(value=['["a", "b"]', '["c", "d"]'])
# Connections of List type are merged
self.assertEqual(res, ["a", "b", "c", "d"])
def test_items(self):
"""Tests the multiple 'itemN' ports"""
from vistrails.tests.utils import execute, intercept_result
def list_with_items(nb_items, **kwargs):
with intercept_result(List, 'value') as results:
errors = execute([
('List', 'org.vistrails.vistrails.basic', [
(k, [('String', v)])
for k, v in kwargs.iteritems()
]),
],
add_port_specs=[
(0, 'input', 'item%d' % i,
'(org.vistrails.vistrails.basic:Module)')
for i in xrange(nb_items)
])
if errors:
return None
return results[-1]
self.assertEqual(
list_with_items(2, head="one", item0="two", item1="three"),
["one", "two", "three"])
# All 'itemN' ports have to be set
self.assertIsNone(
list_with_items(3, head="one", item0="two", item2="three"))
class TestPythonSource(unittest.TestCase):
def test_simple(self):
"""A simple PythonSource returning a string"""
import urllib2
from vistrails.tests.utils import execute, intercept_result
source = 'customout = "nb is %d" % customin'
source = urllib2.quote(source)
with intercept_result(PythonSource, 'customout') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', source)]),
('customin', [('Integer', '42')])
]),
('String', 'org.vistrails.vistrails.basic', []),
],
[
(0, 'customout', 1, 'value'),
],
add_port_specs=[
(0, 'input', 'customin',
'org.vistrails.vistrails.basic:Integer'),
(0, 'output', 'customout',
'org.vistrails.vistrails.basic:String'),
]))
self.assertEqual(results[-1], "nb is 42")
class TestNumericConversions(unittest.TestCase):
def test_full(self):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(Round, 'out_value') as results:
self.assertFalse(execute([
('Integer', 'org.vistrails.vistrails.basic', [
('value', [('Integer', '5')])
]),
('Float', 'org.vistrails.vistrails.basic', []),
('PythonCalc', 'org.vistrails.vistrails.pythoncalc', [
('value2', [('Float', '2.7')]),
('op', [('String', '+')]),
]),
('Round', 'org.vistrails.vistrails.basic', [
('floor', [('Boolean', 'True')]),
]),
],
[
(0, 'value', 1, 'value'),
(1, 'value', 2, 'value1'),
(2, 'value', 3, 'in_value'),
]))
self.assertEqual(results, [7])
class TestUnzip(unittest.TestCase):
def test_unzip_file(self):
from vistrails.tests.utils import execute, intercept_result
from vistrails.core.system import vistrails_root_directory
zipfile = os.path.join(vistrails_root_directory(),
'tests', 'resources',
'test_archive.zip')
with intercept_result(Unzip, 'file') as outfiles:
self.assertFalse(execute([
('Unzip', 'org.vistrails.vistrails.basic', [
('archive_file', [('File', zipfile)]),
('filename_in_archive', [('String', 'file1.txt')]),
]),
]))
self.assertEqual(len(outfiles), 1)
with open(outfiles[0].name, 'rb') as outfile:
self.assertEqual(outfile.read(), "some random\ncontent")
def test_unzip_all(self):
from vistrails.tests.utils import execute, intercept_result
from vistrails.core.system import vistrails_root_directory
zipfile = os.path.join(vistrails_root_directory(),
'tests', 'resources',
'test_archive.zip')
with intercept_result(UnzipDirectory, 'directory') as outdir:
self.assertFalse(execute([
('UnzipDirectory', 'org.vistrails.vistrails.basic', [
('archive_file', [('File', zipfile)]),
]),
]))
self.assertEqual(len(outdir), 1)
self.assertEqual(
[(d, f) for p, d, f in os.walk(outdir[0].name)],
[(['subdir'], ['file1.txt']),
([], ['file2.txt'])])
from vistrails.core.configuration import get_vistrails_configuration
class TestTypechecking(unittest.TestCase):
@classmethod
def setUpClass(cls):
conf = get_vistrails_configuration()
cls.error_all = conf.showConnectionErrors
cls.error_variant = conf.showVariantErrors
@classmethod
def tearDownClass(cls):
conf = get_vistrails_configuration()
conf.showConnectionErrors = cls.error_all
conf.showVariantErrors = cls.error_variant
@staticmethod
def set_settings(error_all, error_variant):
conf = get_vistrails_configuration()
conf.showConnectionErrors = error_all
conf.showVariantErrors = error_variant
def run_test_pipeline(self, result, expected_results, *args, **kwargs):
from vistrails.tests.utils import execute, intercept_result
for error_all, error_variant, expected in expected_results:
self.set_settings(error_all, error_variant)
with intercept_result(*result) as results:
error = execute(*args, **kwargs)
if not expected:
self.assertTrue(error)
else:
self.assertFalse(error)
self.assertEqual(results, expected)
def test_basic(self):
import urllib2
# Base case: no typing error
# This should succeed in every case
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, ["test"]),
(True, True, ["test"])],
[
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('o = "test"'))]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'o', 1, 'i'),
],
add_port_specs=[
(0, 'output', 'o',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String')
])
def test_fake(self):
import urllib2
# A module is lying, declaring a String but returning an int
# This should fail with showConnectionErrors=True (not the
# default)
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, [42]),
(False, True, [42]),
(True, True, False)],
[
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('o = 42'))]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'o', 1, 'i'),
],
add_port_specs=[
(0, 'output', 'o',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String')
])
def test_inputport(self):
import urllib2
# This test uses an InputPort module, whose output port should not be
# considered a Variant port (although it is)
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, [42]),
(False, True, [42]),
(True, True, [42])],
[
('InputPort', 'org.vistrails.vistrails.basic', [
('ExternalPipe', [('Integer', '42')]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'InternalPipe', 1, 'i'),
],
add_port_specs=[
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String'),
])
class TestStringFormat(unittest.TestCase):
def test_list_placeholders(self):
fmt = 'a {} b}} {c!s} {{d e}} {}f'
self.assertEqual(StringFormat.list_placeholders(fmt),
(2, set(['c'])))
def run_format(self, fmt, expected, **kwargs):
from vistrails.tests.utils import execute, intercept_result
functions = [('format', [('String', fmt)])]
functions.extend((n, [(t, v)])
for n, (t, v) in kwargs.iteritems())
with intercept_result(StringFormat, 'value') as results:
self.assertFalse(execute([
('StringFormat', 'org.vistrails.vistrails.basic',
functions),
],
add_port_specs=[
(0, 'input', n, t)
for n, (t, v) in kwargs.iteritems()
]))
self.assertEqual(results, [expected])
def test_format(self):
self.run_format('{{ {a} }} b {c!s}', '{ 42 } b 12',
a=('Integer', '42'),
c=('Integer', '12'))
# Python 2.6 doesn't support {}
@unittest.skipIf(sys.version_info < (2, 7), "No {} support on 2.6")
def test_format_27(self):
self.run_format('{} {}', 'a b',
_0=('String', 'a'), _1=('String', 'b'))
self.run_format('{{ {a} {} {b!s}', '{ 42 b 12',
a=('Integer', '42'), _0=('String', 'b'),
b=('Integer', '12'))
self.run_format('{} {} {!r}{ponc} {:.2f}', "hello dear 'world'! 1.33",
_0=('String', 'hello'), _1=('String', 'dear'),
_2=('String', 'world'), _3=('Float', '1.333333333'),
ponc=('String', '!'))
class TestConstantMetaclass(unittest.TestCase):
def test_meta(self):
"""Tests the __metaclass__ for Constant.
"""
mod1_in = [('value', 'basic:String'), IPort('other', 'basic:Float')]
mod1_out = [('someport', 'basic:Integer')]
class Mod1(Constant):
_input_ports = mod1_in
_output_ports = mod1_out
self.assertEqual(Mod1._input_ports, mod1_in)
self.assertEqual(Mod1._output_ports, [('value', Mod1)] + mod1_out)
mod2_in = [('another', 'basic:String')]
class Mod2(Mod1):
_input_ports = mod2_in
self.assertEqual(Mod2._input_ports, [('value', Mod2)] + mod2_in)
self.assertEqual(Mod2._output_ports, [('value', Mod2)])
class Mod3(Mod1):
_output_ports = []
self.assertEqual(Mod3._input_ports, [('value', Mod3)])
self.assertEqual(Mod3._output_ports, [('value', Mod3)])
| hjanime/VisTrails | vistrails/core/modules/basic_modules.py | Python | bsd-3-clause | 69,112 |
"""Base class for all the objects in SymPy"""
from __future__ import print_function, division
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import (iterable, Iterator, ordered,
string_types, with_metaclass, zip_longest, range)
from .singleton import S
from inspect import getmro
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'hermitian': True,
'imaginary': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True,
'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
from sympy import Pow
if self is other:
return True
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(self, UndefFunc) and isinstance(other, UndefFunc):
if self.class_key() == other.class_key():
return True
else:
return False
if type(self) is not type(other):
# issue 6100 a**1.0 == a like a**2.0 == a**2
if isinstance(self, Pow) and self.exp == 1:
return self.base == other
if isinstance(other, Pow) and other.exp == 1:
return self == other.base
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other
if isinstance(self, AppliedUndef) and isinstance(other,
AppliedUndef):
if self.class_key() != other.class_key():
return False
elif type(self) is not type(other):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""a != b -> Compare two symbolic trees and see whether they are different
this is the same as:
a.compare(b) != 0
but faster
"""
return not self.__eq__(other)
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
dummy_symbols = [s for s in self.free_symbols if s.is_Dummy]
if not dummy_symbols:
return self == other
elif len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
raise ValueError(
"only one dummy symbol allowed on the left-hand side")
if symbol is None:
symbols = other.free_symbols
if not symbols:
return self == other
elif len(symbols) == 1:
symbol = symbols.pop()
else:
raise ValueError("specify a symbol in which expressions should be compared")
tmp = dummy.__class__()
return self.subs(dummy, tmp) == other.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
set([1, 2, I, pi, x, y])
If one or more types are given, the results will contain only
those types of atoms.
Examples
========
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
set([x, y])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
set([1, 2])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
set([1, 2, pi])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
set([1, 2, I, pi])
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
set([x, y])
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
set([1])
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
set([1, 2])
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
set([f(x), sin(y + I*pi)])
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
set([f(x)])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
set([I*pi, 2*sin(y + I*pi)])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.variables`` as underscore-suffixed numbers
corresponding to their position in ``self.variables``. Enough
underscores are added to ensure that there will be no clash with
existing free symbols.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: 0_}
"""
from sympy import Symbol
if not hasattr(self, 'variables'):
return {}
u = "_"
while any(s.name.endswith(u) for s in self.free_symbols):
u += "_"
name = '%%i%s' % u
V = self.variables
return dict(list(zip(V, [Symbol(name % i, **v.assumptions0)
for i, v in enumerate(V)])))
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_real = self.is_real
if is_real is False:
return False
is_number = self.is_number
if is_number is False:
return False
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not i.is_Number or not n.is_Number:
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See docstring of Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, dict)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i in range(len(sequence)):
s = list(sequence[i])
for j, si in enumerate(s):
try:
si = sympify(si, strict=True)
except SympifyError:
if type(si) is str:
si = Symbol(si)
else:
# if it can't be sympified, skip it
sequence[i] = None
break
s[j] = si
else:
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy()
for old, new in sequence:
d = Dummy(commutative=new.is_commutative)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also: _subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
try:
a_xr = a._xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
except AttributeError:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
try:
match = pattern._has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
except AttributeError:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return self.__eq__
def replace(self, query, value, map=False, simultaneous=True, exact=False):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False. In addition, if an
expression containing more than one Wild symbol is being used to match
subexpressions and the ``exact`` flag is True, then the match will only
succeed if non-zero values are received for each Wild that appears in
the match pattern.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a = Wild('a')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
When the default value of False is used with patterns that have
more than one Wild symbol, non-intuitive results may be obtained:
>>> b = Wild('b')
>>> (2*x).replace(a*x + b, b - a)
2/x
For this reason, the ``exact`` option can be used to make the
replacement only when the match gives non-zero values for all
Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a, exact=True)
y - 2
>>> (2*x).replace(a*x + b, b - a, exact=True)
2*x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import bottom_up
try:
query = sympify(query)
except SympifyError:
pass
try:
value = sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
# XXX remove the exact flag and make multi-symbol
# patterns use exact=True semantics; to do this the query must
# be tested to find out how many Wild symbols are present.
# See https://groups.google.com/forum/
# ?fromgroups=#!topic/sympy/zPzo5FtRiqI
# for a method of inspecting a function to know how many
# parameters it has.
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**dict([(
str(key)[:-1], val) for key, val in result.items()]))
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**dict([(
str(key)[:-1], val) for key, val in result.items()]))
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this expression be changed during rebuilding
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy(commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
rv = rv.xreplace(r)
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = dict([(k.xreplace(r), v.xreplace(r))
for k, v in mapping.items()])
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
rule = '_eval_rewrite_as_' + args[-1].__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p.__class__ for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, ratio, measure):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
To SymPy, 2.0 == 2:
>>> from sympy import S
>>> 2.0 == S(2)
True
Since a simple 'same or not' result is sometimes useful, this routine was
written to provide that query:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .function import AppliedUndef, UndefinedFunction as UndefFunc
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
else:
return True
def _atomic(e):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
set([x, y])
>>> _atomic(x + f(y))
set([x, f(y)])
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
set([y, cos(x), Derivative(f(x), x)])
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
try:
free = e.free_symbols
except AttributeError:
return set([e])
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| mafiya69/sympy | sympy/core/basic.py | Python | bsd-3-clause | 59,455 |
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VideoSearchResult(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'channel': 'BasicChannelInfo',
'video': 'ChannelVideo'
}
attribute_map = {
'channel': 'channel',
'video': 'video'
}
def __init__(self, channel=None, video=None):
"""
VideoSearchResult - a model defined in Swagger
"""
self._channel = None
self._video = None
if channel is not None:
self.channel = channel
if video is not None:
self.video = video
@property
def channel(self):
"""
Gets the channel of this VideoSearchResult.
:return: The channel of this VideoSearchResult.
:rtype: BasicChannelInfo
"""
return self._channel
@channel.setter
def channel(self, channel):
"""
Sets the channel of this VideoSearchResult.
:param channel: The channel of this VideoSearchResult.
:type: BasicChannelInfo
"""
self._channel = channel
@property
def video(self):
"""
Gets the video of this VideoSearchResult.
:return: The video of this VideoSearchResult.
:rtype: ChannelVideo
"""
return self._video
@video.setter
def video(self, video):
"""
Sets the video of this VideoSearchResult.
:param video: The video of this VideoSearchResult.
:type: ChannelVideo
"""
self._video = video
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VideoSearchResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| Sythelux/Picarto.bundle | Contents/Libraries/Shared/PicartoClientAPI/models/video_search_result.py | Python | bsd-3-clause | 3,920 |
import six
from decimal import Decimal as D
from oscar.core.loading import get_model
from django.test import TestCase
from oscar.test import factories, decorators
from oscar.apps.partner import abstract_models
Partner = get_model('partner', 'Partner')
PartnerAddress = get_model('partner', 'PartnerAddress')
Country = get_model('address', 'Country')
class DummyWrapper(object):
def availability(self, stockrecord):
return 'Dummy response'
def dispatch_date(self, stockrecord):
return "Another dummy response"
class TestStockRecord(TestCase):
def setUp(self):
self.product = factories.create_product()
self.stockrecord = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'), num_in_stock=10)
@decorators.ignore_deprecation_warnings
def test_get_price_incl_tax_defaults_to_no_tax(self):
self.assertEqual(D('10.00'), self.stockrecord.price_incl_tax)
def test_get_price_excl_tax_returns_correct_value(self):
self.assertEqual(D('10.00'), self.stockrecord.price_excl_tax)
def test_net_stock_level_with_no_allocation(self):
self.assertEqual(10, self.stockrecord.net_stock_level)
def test_net_stock_level_with_allocation(self):
self.stockrecord.allocate(5)
self.assertEqual(10-5, self.stockrecord.net_stock_level)
def test_allocated_does_not_alter_num_in_stock(self):
self.stockrecord.allocate(5)
self.assertEqual(10, self.stockrecord.num_in_stock)
self.assertEqual(5, self.stockrecord.num_allocated)
def test_allocation_handles_null_value(self):
self.stockrecord.num_allocated = None
self.stockrecord.allocate(5)
def test_consuming_allocation(self):
self.stockrecord.allocate(5)
self.stockrecord.consume_allocation(3)
self.assertEqual(2, self.stockrecord.num_allocated)
self.assertEqual(7, self.stockrecord.num_in_stock)
def test_cancelling_allocation(self):
self.stockrecord.allocate(5)
self.stockrecord.cancel_allocation(4)
self.assertEqual(1, self.stockrecord.num_allocated)
self.assertEqual(10, self.stockrecord.num_in_stock)
def test_cancelling_allocation_ignores_too_big_allocations(self):
self.stockrecord.allocate(5)
self.stockrecord.cancel_allocation(6)
self.assertEqual(0, self.stockrecord.num_allocated)
self.assertEqual(10, self.stockrecord.num_in_stock)
@decorators.ignore_deprecation_warnings
def test_max_purchase_quantity(self):
self.assertEqual(10, self.stockrecord.max_purchase_quantity())
@decorators.ignore_deprecation_warnings
class CustomWrapperTests(TestCase):
"""
Partner wrappers are deprecated. This testcase will be removed/rewritten
in Oscar 0.7.
"""
def setUp(self):
abstract_models.partner_wrappers = {1: DummyWrapper()}
def tearDown(self):
abstract_models.partner_wrappers = None
def test_wrapper_availability_gets_called(self):
product = factories.create_product(
price=D('10.00'), partner_name="Acme", num_in_stock=10)
stockrecord = product.stockrecords.all()[0]
self.assertEqual(u"Dummy response",
six.text_type(stockrecord.availability))
def test_wrapper_dispatch_date_gets_called(self):
product = factories.create_product(
price=D('10.00'), partner_name="Acme", num_in_stock=10)
stockrecord = product.stockrecords.all()[0]
self.assertEqual("Another dummy response",
stockrecord.dispatch_date)
class TestPartnerAddress(TestCase):
def setUp(self):
self.partner = Partner._default_manager.create(
name="Dummy partner")
self.country = Country._default_manager.create(
iso_3166_1_a2='GB', name="UNITED KINGDOM")
self.address = PartnerAddress._default_manager.create(
title="Dr",
first_name="Barry",
last_name="Barrington",
country=self.country,
postcode="LS1 2HA",
partner=self.partner)
def test_can_get_primary_address(self):
self.assertEqual(self.partner.primary_address, self.address)
def test_fails_on_two_addresses(self):
self.address = PartnerAddress._default_manager.create(
title="Mrs",
first_name="Jane",
last_name="Barrington",
postcode="LS1 2HA",
country=self.country,
partner=self.partner)
self.assertRaises(
NotImplementedError, getattr, self.partner, 'primary_address')
| DrOctogon/unwash_ecom | tests/unit/partner/model_tests.py | Python | bsd-3-clause | 4,657 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from libmozdata import utils as lmdutils
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
from auto_nag.escalation import Escalation, NoActivityDays
from auto_nag.nag_me import Nag
from auto_nag.round_robin import RoundRobin
class P1NoAssignee(BzCleaner, Nag):
def __init__(self):
super(P1NoAssignee, self).__init__()
self.escalation = Escalation(
self.people,
data=utils.get_config(self.name(), "escalation"),
skiplist=utils.get_config("workflow", "supervisor_skiplist", []),
)
self.round_robin = RoundRobin.get_instance()
self.components_skiplist = utils.get_config("workflow", "components_skiplist")
def description(self):
return "P1 Bugs, no assignee and no activity for few days"
def nag_template(self):
return self.template()
def get_extra_for_template(self):
return {"ndays": self.ndays}
def get_extra_for_nag_template(self):
return self.get_extra_for_template()
def get_extra_for_needinfo_template(self):
return self.get_extra_for_template()
def ignore_meta(self):
return True
def has_last_comment_time(self):
return True
def has_product_component(self):
return True
def columns(self):
return ["component", "id", "summary", "last_comment"]
def handle_bug(self, bug, data):
# check if the product::component is in the list
if utils.check_product_component(self.components_skiplist, bug):
return None
return bug
def get_mail_to_auto_ni(self, bug):
# For now, disable the needinfo
return None
# Avoid to ni everyday...
if self.has_bot_set_ni(bug):
return None
mail, nick = self.round_robin.get(bug, self.date)
if mail and nick:
return {"mail": mail, "nickname": nick}
return None
def set_people_to_nag(self, bug, buginfo):
priority = "high"
if not self.filter_bug(priority):
return None
owners = self.round_robin.get(bug, self.date, only_one=False, has_nick=False)
real_owner = bug["triage_owner"]
self.add_triage_owner(owners, real_owner=real_owner)
if not self.add(owners, buginfo, priority=priority):
self.add_no_manager(buginfo["id"])
return bug
def get_bz_params(self, date):
self.ndays = NoActivityDays(self.name()).get(
(utils.get_next_release_date() - self.nag_date).days
)
self.date = lmdutils.get_date_ymd(date)
fields = ["triage_owner", "flags"]
params = {
"bug_type": "defect",
"include_fields": fields,
"resolution": "---",
"f1": "priority",
"o1": "equals",
"v1": "P1",
"f2": "days_elapsed",
"o2": "greaterthaneq",
"v2": self.ndays,
}
utils.get_empty_assignees(params)
return params
if __name__ == "__main__":
P1NoAssignee().run()
| mozilla/relman-auto-nag | auto_nag/scripts/workflow/p1_no_assignee.py | Python | bsd-3-clause | 3,281 |
# Experiment with boundary for collections
L = [0, 1, 2, 3]
print('-------- Part A --------')
# Index beyond, generates a IndexError exception
try:
L[4] # part (a) of question
except IndexError as err:
print('IndexError Exception', err)
print('-------- Part B --------')
# Slice out of bounds
sliced = L[-10:10]
print(sliced)
print('slicing out of bounds results in a new list equal in value to original')
print('if slices includes indices of original)')
print('-------- Part C --------')
# part(c), reverse slicing
sliced = L[3:1]
print(sliced)
# this is not same effect as out of bound slicing,
# results in a empty list above as start is greater than the end with a positive stride
# to actually reserve the values in a new list need to specify a negative stride
sliced = L[3:1:-1]
print(sliced) | skellykiernan/pylearn | II/q2.py | Python | bsd-3-clause | 809 |
from django_nose.tools import assert_equal
from pontoon.base.tests import TestCase
from pontoon.base.utils import NewlineEscapePlaceable, mark_placeables
class PlaceablesTests(TestCase):
def test_newline_escape_placeable(self):
"""Test detecting newline escape sequences"""
placeable = NewlineEscapePlaceable
assert_equal(placeable.parse(u'A string\\n')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'\\nA string')[0], placeable([u'\\n']))
assert_equal(placeable.parse(u'A\\nstring')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'A string'), None)
assert_equal(placeable.parse(u'A\nstring'), None)
def test_mark_newline_escape_placeables(self):
"""Test detecting newline escape sequences"""
assert_equal(
mark_placeables(u'A string\\n'),
u'A string<mark class="placeable" title="Escaped newline">\\n</mark>'
)
assert_equal(
mark_placeables(u'\\nA string'),
u'<mark class="placeable" title="Escaped newline">\\n</mark>A string'
)
assert_equal(
mark_placeables(u'A\\nstring'),
u'A<mark class="placeable" title="Escaped newline">\\n</mark>string'
)
assert_equal(
mark_placeables(u'A string'),
u'A string'
)
assert_equal(
mark_placeables(u'A\nstring'),
u'A\nstring'
)
def test_python_new_format_placeables(self):
"""Test detection of the new format string in python strings."""
assert_equal(
mark_placeables(u'Hello {name}'),
u'Hello <mark class="placeable" title="Python format string">{name}</mark>'
)
assert_equal(
mark_placeables(u'Hello {name!s}'),
u'Hello <mark class="placeable" title="Python format string">{name!s}</mark>'
)
assert_equal(
mark_placeables(u'Hello {someone.name}'),
u'Hello <mark class="placeable" title="Python format string">{someone.name}</mark>'
)
assert_equal(
mark_placeables(u'Hello {name[0]}'),
u'Hello <mark class="placeable" title="Python format string">{name[0]}</mark>'
)
assert_equal(
mark_placeables(u'Hello {someone.name[0]}'),
u'Hello <mark class="placeable" title="Python format string">{someone.name[0]}</mark>'
)
def test_python_format_named_placeables(self):
"""Test detection of format string with named placeables."""
assert_equal(
mark_placeables(u'Hello %(name)s'),
u'Hello <mark class="placeable" title="Python format string">%(name)s</mark>'
)
assert_equal(
mark_placeables(u'Rolling %(number)d dices'),
u'Rolling <mark class="placeable" title="Python format string">%(number)d</mark> dices'
)
assert_equal(
mark_placeables(u'Hello %(name)S'),
u'Hello <mark class="placeable" title="Python format string">%(name)S</mark>'
)
assert_equal(
mark_placeables(u'Rolling %(number)D dices'),
u'Rolling <mark class="placeable" title="Python format string">%(number)D</mark> dices'
) | participedia/pontoon | pontoon/base/tests/test_placeables.py | Python | bsd-3-clause | 3,308 |
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
# $Id$
#
# Copyright (C) 2003-2008 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
"""
"""
from __future__ import print_function
import numpy
from rdkit.ML.DecTree import SigTree
from rdkit.ML import InfoTheory
try:
from rdkit.ML.FeatureSelect import CMIM
except ImportError:
CMIM=None
from rdkit.DataStructs.VectCollection import VectCollection
import copy
import random
def _GenerateRandomEnsemble(nToInclude,nBits):
""" Generates a random subset of a group of indices
**Arguments**
- nToInclude: the size of the desired set
- nBits: the maximum index to be included in the set
**Returns**
a list of indices
"""
# Before Python 2.3 added the random.sample() function, this was
# way more complicated:
res = random.sample(range(nBits),nToInclude)
return res
def BuildSigTree(examples,nPossibleRes,ensemble=None,random=0,
metric=InfoTheory.InfoType.BIASENTROPY,
biasList=[1],
depth=0,maxDepth=-1,
useCMIM=0,allowCollections=False,
verbose=0,**kwargs):
"""
**Arguments**
- examples: the examples to be classified. Each example
should be a sequence at least three entries long, with
entry 0 being a label, entry 1 a BitVector and entry -1
an activity value
- nPossibleRes: the number of result codes possible
- ensemble: (optional) if this argument is provided, it
should be a sequence which is used to limit the bits
which are actually considered as potential descriptors.
The default is None (use all bits).
- random: (optional) If this argument is nonzero, it
specifies the number of bits to be randomly selected
for consideration at this node (i.e. this toggles the
growth of Random Trees).
The default is 0 (no random descriptor selection)
- metric: (optional) This is an _InfoTheory.InfoType_ and
sets the metric used to rank the bits.
The default is _InfoTheory.InfoType.BIASENTROPY_
- biasList: (optional) If provided, this provides a bias
list for the bit ranker.
See the _InfoTheory.InfoBitRanker_ docs for an explanation
of bias.
The default value is [1], which biases towards actives.
- maxDepth: (optional) the maximum depth to which the tree
will be grown
The default is -1 (no depth limit).
- useCMIM: (optional) if this is >0, the CMIM algorithm
(conditional mutual information maximization) will be
used to select the descriptors used to build the trees.
The value of the variable should be set to the number
of descriptors to be used. This option and the
ensemble option are mutually exclusive (CMIM will not be
used if the ensemble is set), but it happily coexsts
with the random argument (to only consider random subsets
of the top N CMIM bits)
The default is 0 (do not use CMIM)
- depth: (optional) the current depth in the tree
This is used in the recursion and should not be set
by the client.
**Returns**
a SigTree.SigTreeNode with the root of the decision tree
"""
if verbose: print(' '*depth,'Build')
tree=SigTree.SigTreeNode(None,'node',level=depth)
tree.SetData(-666)
#tree.SetExamples(examples)
# counts of each result code:
#resCodes = map(lambda x:int(x[-1]),examples)
resCodes = [int(x[-1]) for x in examples]
#print('resCodes:',resCodes)
counts = [0]*nPossibleRes
for res in resCodes:
counts[res] += 1
#print(' '*depth,'counts:',counts)
nzCounts = numpy.nonzero(counts)[0]
if verbose: print(' '*depth,'\tcounts:',counts)
if len(nzCounts) == 1:
# bottomed out because there is only one result code left
# with any counts (i.e. there's only one type of example
# left... this is GOOD!).
res = nzCounts[0]
tree.SetLabel(res)
tree.SetName(str(res))
tree.SetTerminal(1)
elif maxDepth>=0 and depth>maxDepth:
# Bottomed out: max depth hit
# We don't really know what to do here, so
# use the heuristic of picking the most prevalent
# result
v = numpy.argmax(counts)
tree.SetLabel(v)
tree.SetName('%d?'%v)
tree.SetTerminal(1)
else:
# find the variable which gives us the best improvement
# We do this with an InfoBitRanker:
fp = examples[0][1]
nBits = fp.GetNumBits()
ranker = InfoTheory.InfoBitRanker(nBits,nPossibleRes,metric)
if biasList: ranker.SetBiasList(biasList)
if CMIM is not None and useCMIM > 0 and not ensemble:
ensemble = CMIM.SelectFeatures(examples,useCMIM,bvCol=1)
if random:
if ensemble:
if len(ensemble)>random:
picks = _GenerateRandomEnsemble(random,len(ensemble))
availBits = list(take(ensemble,picks))
else:
availBits = range(len(ensemble))
else:
availBits = _GenerateRandomEnsemble(random,nBits)
else:
availBits=None
if availBits:
ranker.SetMaskBits(availBits)
#print(' 2:'*depth,availBits)
useCollections=isinstance(examples[0][1],VectCollection)
for example in examples:
#print(' '*depth,example[1].ToBitString(),example[-1])
if not useCollections:
ranker.AccumulateVotes(example[1],example[-1])
else:
example[1].Reset()
ranker.AccumulateVotes(example[1].orVect,example[-1])
try:
bitInfo = ranker.GetTopN(1)[0]
best = int(bitInfo[0])
gain = bitInfo[1]
except Exception:
import traceback
traceback.print_exc()
print('get top n failed')
gain = -1.0
if gain <= 0.0:
v = numpy.argmax(counts)
tree.SetLabel(v)
tree.SetName('?%d?'%v)
tree.SetTerminal(1)
return tree
best = int(bitInfo[0])
#print(' '*depth,'\tbest:',bitInfo)
if verbose: print(' '*depth,'\tbest:',bitInfo)
# set some info at this node
tree.SetName('Bit-%d'%(best))
tree.SetLabel(best)
#tree.SetExamples(examples)
tree.SetTerminal(0)
# loop over possible values of the new variable and
# build a subtree for each one
onExamples = []
offExamples = []
for example in examples:
if example[1][best]:
if allowCollections and useCollections:
sig = copy.copy(example[1])
sig.DetachVectsNotMatchingBit(best)
ex = [example[0],sig]
if len(example)>2:
ex.extend(example[2:])
example = ex
onExamples.append(example)
else:
offExamples.append(example)
#print(' '*depth,len(offExamples),len(onExamples))
for ex in (offExamples,onExamples):
if len(ex) == 0:
v = numpy.argmax(counts)
tree.AddChild('%d??'%v,label=v,data=0.0,isTerminal=1)
else:
child = BuildSigTree(ex,nPossibleRes,random=random,
ensemble=ensemble,
metric=metric,biasList=biasList,
depth=depth+1,maxDepth=maxDepth,
verbose=verbose)
if child is None:
v = numpy.argmax(counts)
tree.AddChild('%d???'%v,label=v,data=0.0,isTerminal=1)
else:
tree.AddChildNode(child)
return tree
def SigTreeBuilder(examples,attrs,nPossibleVals,initialVar=None,ensemble=None,
randomDescriptors=0,
**kwargs):
nRes = nPossibleVals[-1]
return BuildSigTree(examples,nRes,random=randomDescriptors,**kwargs)
| adalke/rdkit | rdkit/ML/DecTree/BuildSigTree.py | Python | bsd-3-clause | 7,724 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Logit/trend_MovingAverage/cycle_12/ar_12/test_artificial_128_Logit_MovingAverage_12_12_100.py | Python | bsd-3-clause | 267 |
from django.core.urlresolvers import reverse, resolve
from django.utils.html import escape
from .base import Widget
from ..libs import List as TogaList, SimpleListElement as TogaSimpleListElement
class SimpleListElement(Widget):
def __init__(self, content, detail=None, **style):
super(SimpleListElement, self).__init__(**style)
self.content = content
self.detail = detail
self.startup()
def startup(self):
pass
def materialize(self):
return TogaSimpleListElement(
widget_id=self.widget_id,
content=escape(self.content),
delete_url=reverse(self.detail, kwargs={'pk': self.content.id})
)
def _set_window(self, window):
super()._set_window(window)
if self.on_press:
self.window.callbacks[(self.widget_id, 'on_press')] = self.on_press
class List(Widget):
IMPL_CLASS = TogaList
def __init__(self, source=None, detail=None, item_class=None, on_item_press=None, **style):
super(List, self).__init__(**style)
self.source = source
self.detail = detail
self.item_class = item_class
self.on_item_press = on_item_press
self.children = []
self.startup()
def startup(self):
pass
def materialize(self):
children = []
if self.source:
api_view = resolve(reverse(self.source)).func
for child in api_view.view_class().get_queryset():
children.append(self.item_class(child, self.detail).materialize())
else:
for child in self.children:
children.add(child.materialize())
return TogaList(
widget_id=self.widget_id,
children=children,
create_url=reverse(self.source),
on_item_press=self.handler(self.on_item_press, 'on_item_press') if self.on_item_press else None
)
def add(self, content):
if self.source:
raise Exception("Can't manually add to an API-sourced list")
self.children.append(self.item_class(content, self.detail))
def _set_app(self, app):
for child in self.children:
child.app = app
def _set_window(self, window):
for child in self.children:
child.window = window
if self.on_item_press:
self.window.callbacks[(self.widget_id, 'on_item_press')] = self.on_item_press
# def _hint_size(self, width, height, min_width=None, min_height=None):
# if width is not None:
# self.width = width
# else:
# del(self.width)
# if min_width is not None:
# self.min_width = min_width
# else:
# del(self.min_width)
# if height is not None:
# self.height = height
# else:
# del(self.height)
# if min_height is not None:
# self.min_height = min_height
# else:
# del(self.min_height)
# def _update_child_layout(self, **style):
# """Force a layout update on children of this container.
# The update request can be accompanied by additional style information
# (probably min_width, min_height, width or height) to control the
# layout.
# """
# for child in self.children:
# if child.is_container:
# child._update_layout()
# def _set_frame(self, frame):
# print("SET FRAME", self, frame.origin.x, frame.origin.y, frame.size.width, frame.size.height)
# self._impl.setFrame_(frame)
# self._impl.setNeedsDisplay_(True)
# for child in self.children:
# layout = child.layout
# child._set_frame(NSRect(NSPoint(layout.left, layout.top), NSSize(layout.width, layout.height)))
| freakboy3742/toga_web_demo | toga_django/widgets/list.py | Python | bsd-3-clause | 3,836 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounting', '0027_more_prbac_bootstrap'),
('accounting', '0030_remove_softwareplan_visibility_trial_internal'),
]
operations = [
]
| qedsoftware/commcare-hq | corehq/apps/accounting/migrations/0031_merge.py | Python | bsd-3-clause | 324 |
import msgpackrpc
import time
class SumServer(object):
def sum(self, x, y):
return x + y
def sleepy_sum(self, x, y):
time.sleep(1)
return x + y
server = msgpackrpc.Server(SumServer())
server.listen(msgpackrpc.Address("localhost", 18800))
server.start()
| jpfairbanks/streaming | server.py | Python | bsd-3-clause | 286 |
import logging
import pytest
from traitlets.config import Config
from dockerspawner import DockerSpawner
def test_deprecated_config(caplog):
cfg = Config()
cfg.DockerSpawner.image_whitelist = {"1.0": "jupyterhub/singleuser:1.0"}
log = logging.getLogger("testlog")
spawner = DockerSpawner(config=cfg, log=log)
assert caplog.record_tuples == [
(
log.name,
logging.WARNING,
'DockerSpawner.image_whitelist is deprecated in DockerSpawner 12.0, use '
'DockerSpawner.allowed_images instead',
)
]
assert spawner.allowed_images == {"1.0": "jupyterhub/singleuser:1.0"}
async def test_deprecated_methods():
cfg = Config()
cfg.DockerSpawner.image_whitelist = {"1.0": "jupyterhub/singleuser:1.0"}
spawner = DockerSpawner(config=cfg)
assert await spawner.check_allowed("1.0")
with pytest.deprecated_call():
assert await spawner.check_image_whitelist("1.0")
| jupyter/dockerspawner | tests/test_deprecations.py | Python | bsd-3-clause | 972 |
from django.conf.urls import url
from .views import GetAuthToken, GetAuthTokenFacebook
urlpatterns = [
url(r'^$', GetAuthToken.as_view()),
url(r'^facebook/$', GetAuthTokenFacebook.as_view()),
]
| jsmesami/naovoce | src/user/api/token_auth/urls.py | Python | bsd-3-clause | 205 |
import os
def get(var, default, type_=None):
"""Return a function to recover latest env variable contents."""
def _env_getter():
"""Recover the latest setting from the environment."""
val = os.environ.get(var, default)
if type_:
val = type_(val)
return val
return _env_getter
MONGO_DBNAME = get('MONGO_DBNAME', 'nozama-cloudsearch')
MONGO_HOST = get('MONGO_HOST', 'localhost')
MONGO_PORT = get('MONGO_PORT', 27017, int)
ELASTICSEARCH_HOST = get('ELASTICSEARCH_HOST', 'localhost')
ELASTICSEARCH_PORT = get('ELASTICSEARCH_PORT', 9200, int)
| oisinmulvihill/nozama-cloudsearch | nozama/cloudsearch/service/environ_settings.py | Python | bsd-3-clause | 599 |
"""Helper module for parsing AWS ini config files."""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
AWS_CLI_CREDENTIALS_PATH = "~/.aws/credentials"
AWS_CLI_CONFIG_PATH = "~/.aws/config"
DEFAULT_PROFILE_NAME = os.getenv("AWS_DEFAULT_PROFILE", "default")
class NoConfigFoundException(Exception):
"""Config file not present."""
pass
def _get_config_parser(path):
"""Open and parse given config.
:type path: basestring
:rtype: ConfigParser.ConfigParser
"""
config_parser = configparser.ConfigParser()
try:
with open(os.path.expanduser(path), "rb") as f:
config_parser.readfp(f)
except IOError:
raise NoConfigFoundException("Can't find the config file: %s" % path)
else:
return config_parser
def _get_credentials_from_environment():
key = os.environ.get("AWS_ACCESS_KEY_ID")
secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
return key, secret
def get_credentials(profile=None):
"""Returns AWS credentials.
Reads ~/.aws/credentials if the profile name is given or tries
to get them from environment otherwise. Returns a (key, secret)
tuple.
:type profile: basestring
:rtype: tuple
"""
if profile is None:
key, secret = _get_credentials_from_environment()
if key is not None and secret is not None:
return key, secret
raise NoConfigFoundException("AWS credentials not found.")
config = _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH)
key = config.get(profile, "aws_access_key_id")
secret = config.get(profile, "aws_secret_access_key")
return key, secret
def get_credentials_dict(profile):
"""Returns credentials as a dict (for use as kwargs).
:type profile: basestring
:rtype: dict
"""
key, secret = get_credentials(profile)
return {"aws_access_key_id": key,
"aws_secret_access_key": secret}
def get_profile_names():
"""Get available profile names.
:rtype: list
:returns: list of profile names (strings)
"""
try:
return _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH).sections()
except NoConfigFoundException:
return []
def has_default_profile():
"""Is default profile present?
:rtype: bool
"""
return DEFAULT_PROFILE_NAME in get_profile_names()
def get_default_region(profile):
"""Get the default region for given profile from AWS CLI tool's config.
:type profile: basestring
:rtype: basestring
:returns: name of defalt region if defined in config, None otherwise
"""
try:
config = _get_config_parser(path=AWS_CLI_CONFIG_PATH)
except NoConfigFoundException:
return None
try:
return config.get("profile %s" % profile, "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
return config.get("default", "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return None
| bearops/ebzl | ebzl/lib/config.py | Python | bsd-3-clause | 3,082 |
from django.test import TestCase
from trix.trix_core import trix_markdown
class TestTrixMarkdown(TestCase):
def test_simple(self):
self.assertEqual(
trix_markdown.assignment_markdown('# Hello world\n'),
'<h1>Hello world</h1>')
def test_nl2br(self):
self.assertEqual(
trix_markdown.assignment_markdown('Hello\nworld'),
'<p>Hello<br>\nworld</p>')
| devilry/trix2 | trix/trix_core/tests/test_trix_markdown.py | Python | bsd-3-clause | 421 |
import os
import mimetypes
from django.conf import settings as django_settings
from django.db import models
from django.template.defaultfilters import slugify
from django.core.files.images import get_image_dimensions
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from adminfiles import settings
if 'tagging' in django_settings.INSTALLED_APPS:
from tagging.fields import TagField
else:
TagField = None
class FileUpload(models.Model):
upload_date = models.DateTimeField(_('upload date'), auto_now_add=True)
upload = models.FileField(_('file'), upload_to=settings.ADMINFILES_UPLOAD_TO)
title = models.CharField(_('title'), max_length=100)
slug = models.SlugField(_('slug'), max_length=100, unique=True)
description = models.CharField(_('description'), blank=True, max_length=200)
content_type = models.CharField(editable=False, max_length=100)
sub_type = models.CharField(editable=False, max_length=100)
if TagField:
tags = TagField(_('tags'))
class Meta:
ordering = ['upload_date', 'title']
verbose_name = _('file upload')
verbose_name_plural = _('file uploads')
def __unicode__(self):
return self.title
def mime_type(self):
return '%s/%s' % (self.content_type, self.sub_type)
mime_type.short_description = _('mime type')
def type_slug(self):
return slugify(self.sub_type)
def is_image(self):
return self.content_type == 'image'
def _get_dimensions(self):
try:
return self._dimensions_cache
except AttributeError:
if self.is_image():
self._dimensions_cache = get_image_dimensions(self.upload.path)
else:
self._dimensions_cache = (None, None)
return self._dimensions_cache
def width(self):
return self._get_dimensions()[0]
def height(self):
return self._get_dimensions()[1]
def save(self, *args, **kwargs):
try:
uri = self.upload.path
except NotImplementedError:
uri = self.upload.url
(mime_type, encoding) = mimetypes.guess_type(uri)
try:
[self.content_type, self.sub_type] = mime_type.split('/')
except:
self.content_type = 'text'
self.sub_type = 'plain'
super(FileUpload, self).save()
def insert_links(self):
links = []
for key in [self.mime_type(), self.content_type, '']:
if key in settings.ADMINFILES_INSERT_LINKS:
links = settings.ADMINFILES_INSERT_LINKS[key]
break
for link in links:
ref = self.slug
opts = ':'.join(['%s=%s' % (k,v) for k,v in link[1].items()])
if opts:
ref += ':' + opts
yield {'desc': link[0],
'ref': ref}
def mime_image(self):
if not settings.ADMINFILES_STDICON_SET:
return None
return ('http://www.stdicon.com/%s/%s?size=64'
% (settings.ADMINFILES_STDICON_SET, self.mime_type()))
class FileUploadReference(models.Model):
"""
Tracks which ``FileUpload``s are referenced by which content models.
"""
upload = models.ForeignKey(FileUpload)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
class Meta:
unique_together = ('upload', 'content_type', 'object_id')
| carljm/django-adminfiles | adminfiles/models.py | Python | bsd-3-clause | 3,651 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Cabu documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 15 00:48:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Cabu'
copyright = '2016, Théotime Leveque'
author = 'Théotime Leveque'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'logo.jpeg',
'github_user': 'thylong',
'github_repo': 'cabu',
'logo_name': True,
'github_banner': True,
'travis_button': False,
'show_powered_by': False
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cabudoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Cabu.tex', 'Cabu Documentation',
'Théotime Leveque', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cabu', 'Cabu Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Cabu', 'Cabu Documentation',
author, 'Cabu', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| thylong/cabu | docs/conf.py | Python | bsd-3-clause | 9,963 |
import factory
import factory.django
from faker import Faker
from machina.core.db.models import get_model
from machina.test.factories.auth import UserFactory
from machina.test.factories.conversation import TopicFactory
faker = Faker()
TopicPoll = get_model('forum_polls', 'TopicPoll')
TopicPollOption = get_model('forum_polls', 'TopicPollOption')
TopicPollVote = get_model('forum_polls', 'TopicPollVote')
class TopicPollFactory(factory.django.DjangoModelFactory):
topic = factory.SubFactory(TopicFactory)
question = faker.text(max_nb_chars=200)
class Meta:
model = TopicPoll
class TopicPollOptionFactory(factory.django.DjangoModelFactory):
poll = factory.SubFactory(TopicPollFactory)
text = faker.text(max_nb_chars=100)
class Meta:
model = TopicPollOption
class TopicPollVoteFactory(factory.django.DjangoModelFactory):
poll_option = factory.SubFactory(TopicPollOptionFactory)
voter = factory.SubFactory(UserFactory)
class Meta:
model = TopicPollVote
| ellmetha/django-machina | machina/test/factories/polls.py | Python | bsd-3-clause | 1,023 |
def _types_gen(T):
yield T
if hasattr(T, 't'):
for l in T.t:
yield l
if hasattr(l, 't'):
for ll in _types_gen(l):
yield ll
class Type(type):
""" A rudimentary extension to `type` that provides polymorphic
types for run-time type checking of JSON data types. IE:
assert type(u'') == String
assert type('') == String
assert type('') == Any
assert Any.kind('') == String
assert Any.decode('str') == String
assert Any.kind({}) == Object
"""
def __init__(self, *args, **kwargs):
type.__init__(self, *args, **kwargs)
def __eq__(self, other):
for T in _types_gen(self):
if isinstance(other, Type):
if T in other.t:
return True
if type.__eq__(T, other):
return True
return False
def __str__(self):
return getattr(self, '_name', 'unknown')
def N(self, n):
self._name = n
return self
def I(self, *args):
self.t = list(args)
return self
def kind(self, t):
if type(t) is Type:
return t
ty = lambda t: type(t)
if type(t) is type:
ty = lambda t: t
return reduce(
lambda L, R: R if (hasattr(R, 't') and ty(t) == R) else L,
filter(lambda T: T is not Any,
_types_gen(self)))
def decode(self, n):
return reduce(
lambda L, R: R if (str(R) == n) else L,
_types_gen(self))
# JSON primatives and data types
Object = Type('Object', (object,), {}).I(dict).N('obj')
Number = Type('Number', (object,), {}).I(int, long).N('num')
Boolean = Type('Boolean', (object,), {}).I(bool).N('bit')
String = Type('String', (object,), {}).I(str, unicode).N('str')
Array = Type('Array', (object,), {}).I(list, set, tuple).N('arr')
Nil = Type('Nil', (object,), {}).I(type(None)).N('nil')
Any = Type('Any', (object,), {}).I(
Object, Number, Boolean, String, Array, Nil).N('any')
| regmi/codenode-unr | codenode/external/jsonrpc/types.py | Python | bsd-3-clause | 1,860 |
#!/usr/bin/env python
'''
Main entry to worch from a waf wscript file.
Use the following in the options(), configure() and build() waf wscript methods:
ctx.load('orch.tools', tooldir='.')
'''
def options(opt):
opt.add_option('--orch-config', action = 'store', default = 'orch.cfg',
help='Give an orchestration configuration file.')
opt.add_option('--orch-start', action = 'store', default = 'start',
help='Set the section to start the orchestration')
def configure(cfg):
import orch.configure
orch.configure.configure(cfg)
def build(bld):
import orch.build
orch.build.build(bld)
# the stuff below is for augmenting waf
import time
from orch.wafutil import exec_command
from orch.util import string2list
default_step_cwd = dict(
download = '{download_dir}',
unpack = '{source_dir}',
patch = '{source_dir}',
prepare = '{build_dir}',
build = '{build_dir}',
install = '{build_dir}',
)
# Main interface to worch configuration items
class WorchConfig(object):
def __init__(self, **pkgcfg):
self._config = pkgcfg
def __getattr__(self, name):
return self._config[name]
def get(self, name, default = None):
return self._config.get(name,default)
def format(self, string, **kwds):
'''
Return a string formatted with kwds and configuration items
'''
d = dict(self._config, **kwds)
return string.format(**d)
def depends_step(self, step):
'''
Return a list of steps that this step depends on
'''
d = self._config.get('depends')
if not d: return list()
ds = [x[1] for x in [s.split(':') for s in string2list(d)] if x[0] == step]
return ds
def dependencies(self):
'''
Return all dependencies set via "depends" configuration items
return list of tuples: (mystep, package, package_step)
eg: ('prepare', 'gcc', 'install')
'''
ret = list()
try:
deps = getattr(self, 'depends', None)
except KeyError:
return list()
for dep in string2list(deps):
mystep, other = dep.split(':')
pkg,pkg_step = other.split('_',1)
ret.append((mystep, pkg, pkg_step))
return ret
def exports(self):
'''
Return all environment settings via export_* configuration items
return list of tuples: (variable, value, operator) for exports
eg: ('PATH', '/blah/blah', 'prepend')
'''
ret = list()
for key,val in self._config.items():
if not key.startswith('export_'):
continue
var = key[len('export_'):]
oper = 'set'
for maybe in ['prepend', 'append', 'set']:
if val.startswith(maybe+':'):
oper = maybe
val = val[len(maybe)+1:]
ret.append((var, val, oper))
return ret
# Augment the task generator with worch-specific methods
from waflib.TaskGen import taskgen_method
@taskgen_method
def worch_hello(self):
'Just testing'
print ("%s" % self.worch.format('Hi from worch, my name is "{package}/{version}" and I am using "{dumpenv_cmd}" with extra {extra}', extra='spice'))
print ('My bld.env: %s' % (self.bld.env.keys(),))
print ('My all_envs: %s' % (sorted(self.bld.all_envs.keys()),))
print ('My env: %s' % (self.env.keys(),))
print ('My groups: %s' % (self.env['orch_group_dict'].keys(),))
print ('My packages: %s' % (self.env['orch_package_list'],))
# print ('My package dict: %s' % '\n'.join(['%s=%s' %kv for kv in sorted(self.bld.env['orch_package_dict'][self.worch.package].items())]))
@taskgen_method
def step(self, name, rule, **kwds):
'''
Make a worch installation step.
This invokes the build context on the rule with the following augmentations:
- the given step name is prefixed with the package name
- if the rule is a string (scriptlet) then the worch exec_command is used
- successful execution of the rule leads to a worch control file being produced.
'''
step_name = '%s_%s' % (self.worch.package, name)
# append control file as an additional output
target = string2list(kwds.get('target', ''))
if not isinstance(target, list):
target = [target]
cn = self.control_node(name)
if not cn in target:
target.append(cn)
kwds['target'] = target
kwds.setdefault('env', self.env)
cwd = kwds.get('cwd')
if not cwd:
cwd = default_step_cwd.get(name)
if cwd:
cwd = self.worch.format(cwd)
cwd = self.make_node(cwd)
msg.debug('orch: using cwd for step "%s": %s' % (step_name, cwd.abspath()))
kwds['cwd'] = cwd.abspath()
depends = self.worch.depends_step(name)
after = string2list(kwds.get('after',[])) + depends
if after:
kwds['after'] = after
msg.debug('orch: run %s AFTER: %s' % (step_name, after))
# functionalize scriptlet
rulefun = rule
if isinstance(rule, type('')):
rulefun = lambda t: exec_command(t, rule)
# curry the real rule function in order to write control file if successful
def runit(t):
rc = rulefun(t)
if not rc:
msg.debug('orch: successfully ran %s' % step_name)
cn.write(time.asctime(time.localtime()) + '\n')
return rc
# msg.debug('orch: step "%s" with %s in %s\nsource=%s\ntarget=%s' % \
# (step_name, rulefun, cwd, kwds.get('source'), kwds.get('target')))
# have to switch group each time as steps are called already asynchronously
self.bld.set_group(self.worch.group)
return self.bld(name=step_name, rule = runit, **kwds)
@taskgen_method
def control_node(self, step, package = None):
'''
Return a node for the control file given step of this package or optionally another package.
'''
if not package:
package = self.worch.package
filename = '%s_%s' % (package, step)
path = self.worch.format('{control_dir}/{filename}', filename=filename)
return self.path.find_or_declare(path)
@taskgen_method
def make_node(self, path, parent_node=None):
if not parent_node:
if path.startswith('/'):
parent_node = self.bld.root
else:
parent_node = self.bld.bldnode
return parent_node.make_node(path)
import waflib.Logs as msg
from waflib.Build import BuildContext
def worch_package(ctx, worch_config, *args, **kw):
# transfer waf-specific keywords explicitly
kw['name'] = worch_config['package']
kw['features'] = ' '.join(string2list(worch_config['features']))
kw['use'] = worch_config.get('use')
# make the TaskGen object for the package
worch=WorchConfig(**worch_config)
tgen = ctx(*args, worch=worch, **kw)
tgen.env = ctx.all_envs[worch.package]
tgen.env.env = tgen.env.munged_env
msg.debug('orch: package "%s" with features: %s' % \
(kw['name'], ', '.join(kw['features'].split())))
return tgen
BuildContext.worch_package = worch_package
del worch_package
| hwaf/hwaf | py-hwaftools/orch/tools.py | Python | bsd-3-clause | 7,185 |
from djpcms import sites
from djpcms.http import get_http
from djpcms.template import RequestContext, loader
from djpcms.views.baseview import djpcmsview
class badview(djpcmsview):
def __init__(self, template, httphandler):
self.template = template
self.httphandler = httphandler
super(badview,self).__init__()
def response(self, request):
t = loader.get_template(self.template)
c = {'request_path': request.path,
'grid': self.grid960()}
return self.httphandler(t.render(RequestContext(request, c)))
def http404view(request, *args, **kwargs):
http = get_http(sites.settings.HTTP_LIBRARY)
return badview('404.html',
http.HttpResponseNotFound).response(request)
def http500view(request, *args, **kwargs):
http = get_http(sites.settings.HTTP_LIBRARY)
return badview('500.html',
http.HttpResponseServerError).response(request) | strogo/djpcms | djpcms/views/specials.py | Python | bsd-3-clause | 963 |
""" Models for controlling the text and visual formatting of tick
labels on Bokeh plot axes.
"""
from __future__ import absolute_import
from .tickers import Ticker
from ..model import Model
from ..core.properties import abstract
from ..core.properties import Bool, Int, String, Enum, Auto, List, Dict, Either, Instance
from ..core.enums import DatetimeUnits, RoundingFunction, NumeralLanguage
@abstract
class TickFormatter(Model):
""" A base class for all tick formatter types. ``TickFormatter`` is
not generally useful to instantiate on its own.
"""
pass
class BasicTickFormatter(TickFormatter):
""" Display tick values from continuous ranges as "basic numbers",
using scientific notation when appropriate by default.
"""
precision = Either(Auto, Int, help="""
How many digits of precision to display in tick labels.
""")
use_scientific = Bool(True, help="""
Whether to ever display scientific notation. If ``True``, then
when to use scientific notation is controlled by ``power_limit_low``
and ``power_limit_high``.
""")
power_limit_high = Int(5, help="""
Limit the use of scientific notation to when::
log(x) >= power_limit_high
""")
power_limit_low = Int(-3, help="""
Limit the use of scientific notation to when::
log(x) <= power_limit_low
""")
class NumeralTickFormatter(TickFormatter):
""" Tick formatter based on a human-readable format string. """
format = String("0,0", help="""
The number format, as defined in the following tables:
**NUMBERS**:
============ ============== ===============
Number Format String
============ ============== ===============
10000 '0,0.0000' 10,000.0000
10000.23 '0,0' 10,000
10000.23 '+0,0' +10,000
-10000 '0,0.0' -10,000.0
10000.1234 '0.000' 10000.123
10000.1234 '0[.]00000' 10000.12340
-10000 '(0,0.0000)' (10,000.0000)
-0.23 '.00' -.23
-0.23 '(.00)' (.23)
0.23 '0.00000' 0.23000
0.23 '0.0[0000]' 0.23
1230974 '0.0a' 1.2m
1460 '0 a' 1 k
-104000 '0a' -104k
1 '0o' 1st
52 '0o' 52nd
23 '0o' 23rd
100 '0o' 100th
============ ============== ===============
**CURRENCY**:
=========== =============== =============
Number Format String
=========== =============== =============
1000.234 '$0,0.00' $1,000.23
1000.2 '0,0[.]00 $' 1,000.20 $
1001 '$ 0,0[.]00' $ 1,001
-1000.234 '($0,0)' ($1,000)
-1000.234 '$0.00' -$1000.23
1230974 '($ 0.00 a)' $ 1.23 m
=========== =============== =============
**BYTES**:
=============== =========== ============
Number Format String
=============== =========== ============
100 '0b' 100B
2048 '0 b' 2 KB
7884486213 '0.0b' 7.3GB
3467479682787 '0.000 b' 3.154 TB
=============== =========== ============
**PERCENTAGES**:
============= ============= ===========
Number Format String
============= ============= ===========
1 '0%' 100%
0.974878234 '0.000%' 97.488%
-0.43 '0 %' -43 %
0.43 '(0.000 %)' 43.000 %
============= ============= ===========
**TIME**:
============ ============== ============
Number Format String
============ ============== ============
25 '00:00:00' 0:00:25
238 '00:00:00' 0:03:58
63846 '00:00:00' 17:44:06
============ ============== ============
For the complete specification, see http://numbrojs.com/format.html
""")
language = Enum(NumeralLanguage, default="en", help="""
The language to use for formatting language-specific features (e.g. thousands separator).
""")
rounding = Enum(RoundingFunction, help="""
Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).
""")
class PrintfTickFormatter(TickFormatter):
""" Tick formatter based on a printf-style format string. """
format = String("%s", help="""
The number format, as defined as follows: the placeholder in the format
string is marked by % and is followed by one or more of these elements,
in this order:
* An optional ``+`` sign
Causes the result to be preceded with a plus or minus sign on numeric
values. By default, only the ``-`` sign is used on negative numbers.
* An optional padding specifier
Specifies what (if any) character to use for padding. Possible values
are 0 or any other character preceded by a ``'`` (single quote). The
default is to pad with spaces.
* An optional ``-`` sign
Causes sprintf to left-align the result of this placeholder. The default
is to right-align the result.
* An optional number
Specifies how many characters the result should have. If the value to be
returned is shorter than this number, the result will be padded.
* An optional precision modifier
Consists of a ``.`` (dot) followed by a number, specifies how many digits
should be displayed for floating point numbers. When used on a string, it
causes the result to be truncated.
* A type specifier
Can be any of:
- ``%`` --- yields a literal ``%`` character
- ``b`` --- yields an integer as a binary number
- ``c`` --- yields an integer as the character with that ASCII value
- ``d`` or ``i`` --- yields an integer as a signed decimal number
- ``e`` --- yields a float using scientific notation
- ``u`` --- yields an integer as an unsigned decimal number
- ``f`` --- yields a float as is
- ``o`` --- yields an integer as an octal number
- ``s`` --- yields a string as is
- ``x`` --- yields an integer as a hexadecimal number (lower-case)
- ``X`` --- yields an integer as a hexadecimal number (upper-case)
""")
class LogTickFormatter(TickFormatter):
""" Display tick values from continuous ranges as powers
of some base.
Most often useful in conjunction with a ``LogTicker``.
"""
ticker = Instance(Ticker, help="""
The corresponding ``LogTicker``, used to determine the correct
base to use. If unset, the formatter will use base 10 as a default.
""")
class CategoricalTickFormatter(TickFormatter):
""" Display tick values from categorical ranges as string
values.
"""
pass
DEFAULT_DATETIME_FORMATS = lambda : {
'microseconds': ['%fus'],
'milliseconds': ['%3Nms', '%S.%3Ns'],
'seconds': ['%Ss'],
'minsec': [':%M:%S'],
'minutes': [':%M', '%Mm'],
'hourmin': ['%H:%M'],
'hours': ['%Hh', '%H:%M'],
'days': ['%m/%d', '%a%d'],
'months': ['%m/%Y', '%b%y'],
'years': ['%Y'],
}
class DatetimeTickFormatter(TickFormatter):
""" Display tick values from a continuous range as formatted
datetimes.
"""
formats = Dict(Enum(DatetimeUnits), List(String), default=DEFAULT_DATETIME_FORMATS, help="""
User defined formats for displaying datetime values.
The enum values correspond roughly to different "time scales". The
corresponding value is a list of `strftime`_ formats to use for
formatting datetime tick values that fall in in that "time scale".
By default, only the first format string passed for each time scale
will be used. By default, all leading zeros are stripped away from
the formatted labels. These behaviors cannot be changed as of now.
An example of specifying the same date format over a range of time scales::
DatetimeTickFormatter(
formats=dict(
hours=["%B %Y"],
days=["%B %Y"],
months=["%B %Y"],
years=["%B %Y"],
)
)
This list of supported `strftime`_ formats is reproduced below.
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full compliment
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `github issue`_,
so that the documentation can be updated appropriately.
%a
The abbreviated name of the day of the week according to the
current locale.
%A
The full name of the day of the week according to the current
locale.
%b
The abbreviated month name according to the current locale.
%B
The full month name according to the current locale.
%c
The preferred date and time representation for the current
locale.
%C
The century number (year/100) as a 2-digit integer.
%d
The day of the month as a decimal number (range 01 to 31).
%D
Equivalent to %m/%d/%y. (Americans should note that in many
other countries %d/%m/%y is rather common. This means that in
international context this format is ambiguous and should not
be used.)
%e
Like %d, the day of the month as a decimal number, but a
leading zero is replaced by a space.
%f
Microsecond as a decimal number, zero-padded on the left (range
000000-999999). This is an extension to the set of directives
available to `timezone`_.
%F
Equivalent to %Y-%m-%d (the ISO 8601 date format).
%G
The ISO 8601 week-based year with century as a decimal number.
The 4-digit year corresponding to the ISO week number (see %V).
This has the same format and value as %Y, except that if the
ISO week number belongs to the previous or next year, that year
is used instead.
%g
Like %G, but without century, that is, with a 2-digit year (00-99).
%h
Equivalent to %b.
%H
The hour as a decimal number using a 24-hour clock (range 00
to 23).
%I
The hour as a decimal number using a 12-hour clock (range 01
to 12).
%j
The day of the year as a decimal number (range 001 to 366).
%k
The hour (24-hour clock) as a decimal number (range 0 to 23).
Single digits are preceded by a blank. (See also %H.)
%l
The hour (12-hour clock) as a decimal number (range 1 to 12).
Single digits are preceded by a blank. (See also %I.) (TZ)
%m
The month as a decimal number (range 01 to 12).
%M
The minute as a decimal number (range 00 to 59).
%n
A newline character. Bokeh text does not currently support
newline characters.
%N
Nanosecond as a decimal number, zero-padded on the left (range
000000000-999999999). Supports a padding width specifier, i.e.
%3N displays 3 leftmost digits. However, this is only accurate
to the millisecond level of precision due to limitations of
`timezone`_.
%p
Either "AM" or "PM" according to the given time value, or the
corresponding strings for the current locale. Noon is treated
as "PM" and midnight as "AM".
%P
Like %p but in lowercase: "am" or "pm" or a corresponding
string for the current locale.
%r
The time in a.m. or p.m. notation. In the POSIX locale this
is equivalent to %I:%M:%S %p.
%R
The time in 24-hour notation (%H:%M). For a version including
the seconds, see %T below.
%s
The number of seconds since the Epoch, 1970-01-01 00:00:00
+0000 (UTC).
%S
The second as a decimal number (range 00 to 60). (The range
is up to 60 to allow for occasional leap seconds.)
%t
A tab character. Bokeh text does not currently support tab
characters.
%T
The time in 24-hour notation (%H:%M:%S).
%u
The day of the week as a decimal, range 1 to 7, Monday being 1.
See also %w.
%U
The week number of the current year as a decimal number, range
00 to 53, starting with the first Sunday as the first day of
week 01. See also %V and %W.
%V
The ISO 8601 week number (see NOTES) of the current year as a
decimal number, range 01 to 53, where week 1 is the first week
that has at least 4 days in the new year. See also %U and %W.
%w
The day of the week as a decimal, range 0 to 6, Sunday being 0.
See also %u.
%W
The week number of the current year as a decimal number, range
00 to 53, starting with the first Monday as the first day of
week 01.
%x
The preferred date representation for the current locale
without the time.
%X
The preferred time representation for the current locale
without the date.
%y
The year as a decimal number without a century (range 00 to 99).
%Y
The year as a decimal number including the century.
%z
The +hhmm or -hhmm numeric timezone (that is, the hour and
minute offset from UTC).
%Z
The timezone name or abbreviation.
%%
A literal '%' character.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
""")
| quasiben/bokeh | bokeh/models/formatters.py | Python | bsd-3-clause | 13,993 |
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = set()
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def get_expression_column(self, evaluator):
"""
Helper method to return the quoted column string from the evaluator
for its expression.
"""
for expr, col_tup in evaluator.cols:
if expr is evaluator.expression:
return '%s.%s' % tuple(map(self.quote_name, col_tup))
raise Exception("Could not find the column for the expression.")
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_lookup_sql() method')
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide geometry_columns() method')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m:
return (float(m.group('major')), float(m.group('flattening')))
else:
return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __str__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except Exception:
return six.text_type(self.wkt)
| ericholscher/django | django/contrib/gis/db/backends/base.py | Python | bsd-3-clause | 11,584 |
__author__ = 'oglebrandon'
import logging as logger
import types
from ib.ext.EWrapper import EWrapper
def showmessage(message, mapping):
try:
del(mapping['self'])
except (KeyError, ):
pass
items = mapping.items()
items.sort()
print '### %s' % (message, )
for k, v in items:
print ' %s:%s' % (k, v)
class Observable(object):
"""
Sender -> dispatches messages to interested callables
"""
def __init__(self):
self.listeners = {}
self.logger = logger.getLogger()
def register(self,listener,events=None):
"""
register a listener function
Parameters
-----------
listener : external listener function
events : tuple or list of relevant events (default=None)
"""
if events is not None and type(events) not in \
(types.TupleType,types.ListType):
events = (events,)
self.listeners[listener] = events
def dispatch(self,event=None, msg=None):
"""notify listeners """
for listener,events in self.listeners.items():
if events is None or event is None or event in events:
try:
listener(self,event,msg)
except (Exception,):
self.unregister(listener)
errmsg = "Exception in message dispatch: Handler '{0}' " \
"unregistered for event " \
"'{1}' ".format(listener.func_name,event)
self.logger.exception(errmsg)
def unregister(self,listener):
""" unregister listener function """
del self.listeners[listener]
class ReferenceWrapper(EWrapper,Observable):
# contract = None
# tickerId
# field
# price
def __init__ (self,subs={}):
super(ReferenceWrapper, self).__init__()
self.orderID = None
self.subscriptions = subs
def setSubscriptions (self,subs):
self.subscriptions = subs
def tickGeneric(self, tickerId, field, price):
pass
def tickPrice(self, tickerId, field, price, canAutoExecute):
showmessage('tickPrice', vars())
def tickSize(self, tickerId, field, size):
showmessage('tickSize', vars())
def tickString(self, tickerId, tickType, value):
#showmessage('tickString', vars())
pass
def tickOptionComputation(self, tickerId, field,
impliedVolatility, delta,
x, c, q, w, e, r):
#showmessage('tickOptionComputation', vars())
pass
def openOrderEnd(self):
pass
def orderStatus(self, orderId, status, filled, remaining,
avgFillPrice, permId, parentId, lastFillPrice,
clientId, whyHeId):
if filled:
self.dispatch(event='execution',msg=[1,2,3])
showmessage('orderStatus', vars())
def openOrder(self, orderId, contract, order, state):
showmessage('openOrder', vars())
def connectionClosed(self):
showmessage('connectionClosed', {})
def updateAccountValue(self, key, value, currency, accountName):
showmessage('updateAccountValue', vars())
def updatePortfolio(self, contract, position, marketPrice,
marketValue, averageCost, unrealizedPNL,
realizedPNL, accountName):
showmessage('updatePortfolio', vars())
def updateAccountTime(self, timeStamp):
showmessage('updateAccountTime', vars())
def nextValidId(self, orderId):
self.orderID = orderId
showmessage('nextValidId', vars())
def contractDetails(self, reqId, contractDetails):
showmessage('contractDetails', vars())
print contractDetails.__dict__
def bondContractDetails(self, reqId, contractDetails):
showmessage('bondContractDetails', vars())
def execDetails(self, orderId, contract, execution):
showmessage('execDetails', vars())
def error(self, id=None, errorCode=None, errorMsg=None):
showmessage('error', vars())
def updateMktDepth(self, tickerId, position, operation, side, price, size):
showmessage('updateMktDepth', vars())
def updateMktDepthL2(self, tickerId, position,
marketMaker, operation,
side, price, size):
showmessage('updateMktDepthL2', vars())
def updateNewsBulletin(self, msgId, msgType, message, origExchange):
showmessage('updateNewsBulletin', vars())
def managedAccounts(self, accountsList):
showmessage('managedAccounts', vars())
def receiveFA(self, faDataType, xml):
showmessage('receiveFA', vars())
def historicalData(self, reqId, date,
open, high, low, close,
volume, count, WAP, hasGaps):
showmessage('historicalData', vars())
def scannerParameters(self, xml):
showmessage('scannerParameters', vars())
def scannerData(self, reqId, rank, contractDetails,
distance, benchmark, projection, legsStr):
showmessage('scannerData', vars())
def accountDownloadEnd(self, accountName):
showmessage('accountDownloadEnd', vars())
def contractDetailsEnd(self, reqId):
showmessage('contractDetailsEnd', vars())
def currentTime(self):
showmessage('currentTime', vars())
def deltaNeutralValidation(self):
showmessage('deltaNeutralValidation', vars())
def error_0(self):
showmessage('error_0', vars())
def error_1(self):
showmessage('error_1', vars())
def execDetailsEnd(self):
showmessage('execDetailsEnd', vars())
def fundamentalData(self):
showmessage('fundamentalData', vars())
def realtimeBar(self):
showmessage('realtimeBar', vars())
def scannerDataEnd(self):
showmessage('scannerDataEnd', vars())
def tickEFP(self):
showmessage('tickEFP', vars())
def tickSnapshotEnd(self):
showmessage('tickSnapshotEnd', vars())
def marketDataType(self):
showmessage('marketDataType', vars())
def commissionReport(self, commissionReport):
showmessage('commissionReport', vars()) | CarterBain/Medici | ib/client/msg_wrapper.py | Python | bsd-3-clause | 6,312 |
#!/usr/bin/env python
#(c) 2014 Emory University. All Rights Reserved
# Code written by: Michael Sauria ([email protected])
import sys
import os
import subprocess
file_list, tmp_dir, out_dir, fastq_dump = sys.argv[1:5]
files = []
for line in open(file_list, 'r'):
line = line.strip()
if not line or line.startswith('#'):
continue
fields = line.split()
srx = fields[1]
for srr in fields[2].split(','):
files.append([srr, srx])
for file in files:
srr, srx = file
if (not os.path.exists("%s/%s_1.fastq" % (out_dir, srr)) or
not os.path.exists("%s/%s_2.fastq" % (out_dir, srr))):
if not os.path.exists("%s/%s.sra" % (tmp_dir, srr)):
subprocess.call('wget ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByExp/sra/%s/%s/%s/%s/%s.sra -O %s' % (srx[:3], srx[:6], srx, srr, srr, "%s/%s.sra" % (tmp_dir, srr)), shell=True)
for file in files:
srr, srx = file
if (not os.path.exists("%s/%s_1.fastq" % (out_dir, srr)) or
not os.path.exists("%s/%s_2.fastq" % (out_dir, srr))):
subprocess.call('cd %s; %s %s.sra --split-3' % (tmp_dir, fastq_dump, srr), shell=True)
subprocess.call('mv %s/%s_1.fastq %s/' % (tmp_dir, srr, out_dir), shell=True)
subprocess.call('mv %s/%s_2.fastq %s/' % (tmp_dir, srr, out_dir), shell=True)
subprocess.call('rm %s/%s.sra' % (tmp_dir, srr), shell=True)
| bxlab/HiFive_Paper | Scripts/Support/fetch_sra_files.py | Python | bsd-3-clause | 1,432 |
# encoding: utf-8
'''
Various vertical coordinates
Presently, only ocean s-coordinates are supported. Future plans will be to
include all of the vertical coordinate systems defined by the CF conventions.
'''
__docformat__ = "restructuredtext en"
import numpy as np
import warnings
class s_coordinate(object):
"""
Song and Haidvogel (1994) vertical coordinate transformation (Vtransform=1) and
stretching functions (Vstretching=1).
return an object that can be indexed to return depths
s = s_coordinate(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = min(self.hmin, self.Tcline)
self.Vtrans = 1
if (self.Tcline > self.hmin):
warnings.warn('Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1.' %(self.Tcline,self.hmin))
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
ds = 1.0 / self.N
self.s_rho = -self.c1 + (lev - self.p5) * ds
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
ds = 1.0 / (self.Np-1)
self.s_w = -self.c1 + lev * ds
def _get_Cs_r(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_rho) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_rho + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_r = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_w) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_w + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_w = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_w = self.s_w
class s_coordinate_2(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=2).
return an object that can be indexed to return depths
s = s_coordinate_2(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 2
self.Aweight = 1.0
self.Bweight = 1.0
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_2, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_2, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_rho + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_rho + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_rho + self.c1)**self.Bweight))
self.Cs_r = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_r = Csur
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_w + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_w + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_w + self.c1)**self.Bweight))
self.Cs_w = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_w = Csur
else:
self.Cs_w = self.s_w
class s_coordinate_4(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=4).
return an object that can be indexed to return depths
s = s_coordinate_4(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 4
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_4, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_4, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_rho**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
(self.c1 - np.exp(-self.theta_b))
self.Cs_r = Cbot
else:
self.Cs_r = Csur
def _get_Cs_w(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_w**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
( self.c1 - np.exp(-self.theta_b) )
self.Cs_w = Cbot
else:
self.Cs_w = Csur
class s_coordinate_5(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=5).
return an object that can be indexed to return depths
s = s_coordinate_5(h, theta_b, theta_s, Tcline, N)
Brian Powell's surface stretching.
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 5
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
s = -(lev * lev - 2 * lev * self.N + lev + self.N * self.N - self.N) / \
(self.N * self.N - self.N) - \
0.01 * (lev * lev - lev * self.N) / (self.c1 - self.N)
# (self.c1 * self.N * self.N - self.N) - \
self.s_rho = s
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
s = -(lev * lev - 2 * lev * self.N + lev + self.N * self.N - self.N) / \
(self.N * self.N - self.N) - \
0.01 * (lev * lev - lev * self.N) / (self.c1 - self.N)
# (self.c1 * self.N * self.N - self.N) - \
self.s_w = s
def _get_Cs_r(self):
if self.theta_s > 0:
csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
csur = -(self.s_rho * self.s_rho)
if self.theta_b > 0:
self.Cs_r = (np.exp(self.theta_b * (csur + self.c1)) - self.c1) / \
(np.exp(self.theta_b) - self.c1) - self.c1
else:
self.Cs_r = csur
def _get_Cs_w(self):
if self.theta_s > 0:
csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
csur = -(self.s_w * self.s_w)
if self.theta_b > 0:
self.Cs_w = (np.exp(self.theta_b * (csur + self.c1)) - self.c1) / \
(np.exp(self.theta_b) - self.c1) - self.c1
else:
self.Cs_w = csur
class z_r(object):
"""
return an object that can be indexed to return depths of rho point
z_r = z_r(h, hc, N, s_rho, Cs_r, zeta, Vtrans)
"""
def __init__(self, h, hc, N, s_rho, Cs_r, zeta, Vtrans):
self.h = h
self.hc = hc
self.N = N
self.s_rho = s_rho
self.Cs_r = Cs_r
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_r = np.empty((ti, self.N) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.N):
z0 = self.hc * self.s_rho[k] + (self.h - self.hc) * self.Cs_r[k]
z_r[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4 or self.Vtrans == 5:
for n in range(ti):
for k in range(self.N):
z0 = (self.hc * self.s_rho[k] + self.h * self.Cs_r[k]) / \
(self.hc + self.h)
z_r[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_r[res_index])
class z_w(object):
"""
return an object that can be indexed to return depths of w point
z_w = z_w(h, hc, Np, s_w, Cs_w, zeta, Vtrans)
"""
def __init__(self, h, hc, Np, s_w, Cs_w, zeta, Vtrans):
self.h = h
self.hc = hc
self.Np = Np
self.s_w = s_w
self.Cs_w = Cs_w
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_w = np.empty((ti, self.Np) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.Np):
z0 = self.hc * self.s_w[k] + (self.h - self.hc) * self.Cs_w[k]
z_w[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.Np):
z0 = (self.hc * self.s_w[k] + self.h * self.Cs_w[k]) / \
(self.hc + self.h)
z_w[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_w[res_index])
class z_coordinate(object):
"""
return an object that can be indexed to return depths
z = z_coordinate(h, depth, N)
"""
def __init__(self, h, depth, N):
self.h = np.asarray(h)
self.N = int(N)
ndim = len(h.shape)
# print h.shape, ndim
if ndim == 2:
Mm, Lm = h.shape
self.z = np.zeros((N, Mm, Lm))
elif ndim == 1:
Sm = h.shape[0]
self.z = np.zeros((N, Sm))
for k in range(N):
self.z[k,:] = depth[k]
| kshedstrom/pyroms | pyroms/pyroms/vgrid.py | Python | bsd-3-clause | 14,274 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['BestCycle'] , ['MLP'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_BestCycle_MLP.py | Python | bsd-3-clause | 151 |
from eisoil.core.exception import CoreException
class ScheduleException(CoreException):
def __init__(self, desc):
self._desc = desc
def __str__(self):
return "Schedule: %s" % (self._desc,)
class ScheduleOverbookingError(ScheduleException):
def __init__(self, schedule_subject, resource_id, start_time, end_time):
"""All parameters should be strings or be able to str(...) itself."""
super(ScheduleOverbookingError, self).__init__("There are already reservations for %s during [%s - %s] in the %s schedule." % (str(resource_id), str(start_time), str(end_time), str(schedule_subject)))
class ScheduleNoSuchReservationError(ScheduleException):
def __init__(self, reservation_id):
super(ScheduleNoSuchReservationError, self).__init__("Could not find reservation with id %d." % (reservation_id)) | EICT/C-BAS | src/vendor/schedule/scheduleexceptions.py | Python | bsd-3-clause | 857 |
from __future__ import unicode_literals
from collections import Counter
from itertools import groupby
from operator import itemgetter
import numpy
from django.db.models import F
from tracpro.charts.formatters import format_number
from .utils import get_numeric_values
from . import rules
def get_map_data(responses, question):
answers = get_answers(responses, question)
if question.question_type == question.TYPE_NUMERIC:
map_data = numeric_map_data(answers, question)
elif question.question_type == question.TYPE_MULTIPLE_CHOICE:
map_data = multiple_choice_map_data(answers, question)
else:
map_data = None
if map_data:
return {
'map-data': map_data,
'all-categories': rules.get_all_categories(question, answers),
}
else:
return None
def get_answers(responses, question):
"""Return answers to the question from the responses, annotated with `boundary`.
Excludes answers that are not associated with a boundary.
"""
answers = question.answers.filter(response__in=responses)
answers = answers.annotate(boundary=F('response__contact__region__boundary'))
answers = answers.exclude(boundary=None)
return answers
def numeric_map_data(answers, question):
"""For each boundary, display the category of the average answer value."""
map_data = {}
answer_data = [
{
'boundary': answer.boundary,
'value_to_use': answer.value_to_use
}
for answer in answers.order_by('boundary')
]
for boundary_id, _answers in groupby(answer_data, itemgetter('boundary')):
values = get_numeric_values(a['value_to_use'] for a in _answers)
if len(values) > 0:
average = round(numpy.mean(values), 2)
map_data[boundary_id] = {
'average': format_number(average, digits=2),
'category': question.categorize(average),
}
return map_data
def multiple_choice_map_data(answers, question):
"""For each boundary, display the most common answer category."""
map_data = {}
answer_data = answers.exclude(category=None).exclude(category="")
answer_data = answer_data.order_by('boundary').values('boundary', 'category')
for boundary_id, _answers in groupby(answer_data, itemgetter('boundary')):
top_category = Counter(a['category'] for a in _answers).most_common(1)[0][0]
map_data[boundary_id] = {
'category': top_category,
}
return map_data
| rapidpro/tracpro | tracpro/polls/maps.py | Python | bsd-3-clause | 2,550 |
from setuptools import setup, find_packages
setup(
name="gevent-websocket",
version="0.3.6",
description="Websocket handler for the gevent pywsgi server, a Python network library",
long_description=open("README.rst").read(),
author="Jeffrey Gelens",
author_email="[email protected]",
license="BSD",
url="https://bitbucket.org/Jeffrey/gevent-websocket",
download_url="https://bitbucket.org/Jeffrey/gevent-websocket",
install_requires=("gevent", "greenlet"),
packages=find_packages(exclude=["examples","tests"]),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
)
| imankulov/gevent-websocket | setup.py | Python | bsd-3-clause | 946 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
__doc__ = """
# General Concepts
## Introduction
Bambou provides a set of objects that allow the manipulation of ReST entities very easily. It deals with all possible CRUD operations.
It is based on the library `Bambou`, which defines all these low level operations in a single place.
`Bambou` is composed of the following important classes:
* `bambou.NURESTSession`
Class representing an authenticated session.
* `bambou.NURESTObject`
Parent class of all ReST entities. All ReST exposed object objects inherit from this class.
* `bambou.NURESTFetcher`
Class used to get children of a `bambou.NURESTObject`.
* `bambou.NURESTPushCenter`
Class that deals with intercepting and rerouting ReST Push Notifications.
> There are more objects in `Bambou`, but you don't need to know all of them for now.
## NURESTSession
The `bambou.NURESTSession` represents some user credentials coupled with an API URL. All ReST calls are done using
the current active session. `bambou.NURESTSession` is an abstract class that must be reimplemented by anything using `Bambou`.
In a `MySDK` using bambou, you use a class named `mysdk.v3_2.MySession` which will be used in the following examples.
#!python
session = My}Session(username="user", password="secret", enterprise="organization", api_url="https://server")
session.start()
# your script
When you start the session, a ReST call will be sent to the API endpoint in order to get the API key.
If the credentials are valid, the attribute `MySDK.v3_2.MySession.root` will be populated with information such as your name,
your phone number, your avatar, your enterprise name and ID etc. This `user` is the root object of everything as all subsequent
calls need to be done in the context of your account (for instance, your `/enterprises` are different from another account's `/enterprises`)
It is also possible to create sub sessions with the python statement `with`:
#!python
cspsession = MySession(username="user", password="secret", enterprise="organization", api_url="https://server")
adminsession = MySession(username="admin", password="secret", enterprise="enterprise", api_url="https://server")
cspsession.start()
# this part of the code will use the CSP root user
with adminsession.start():
# this code block will be executed as admin of `enterprise`
# back to csp root session
> You **must** use `start()` when using the `with` statement, even if the session has already been started in the main context.
## NURESTObject
`bambou.NURESTObject` is the parent class of all `MySDK` entities.
### ReST Names
All `bambou.NURESTObject` subclasses implements a given method that will return the actual ReST name of the objects. For instance, the ReST name of an Unicorn object is `unicorn`.
These names are used to forge the correct URI when doing CRUD operations on them.
> ReST names can be used as unique resource identifier for a given object.
> ReST names are auto generated. You never need to manually define them.
### ReST API URI Generation
`bambou.NURESTObject` is able to forge all the URI needed to interact with the server through the ReST API.
For instance, if an object with a ReST name set to `object` needs to get the list of children with ReST name set to `subobject`, `Bambou` will use the following endpoint URL:
`GET {api_base_url}/objects/{id}/subobjects`
If an object with a ReST name set to `entity` needs to fetch itself, the generated URL will be
`GET {api_base_url}/entities/{id}`
> `Bambou` automagically deals with plurals.
> The ReST base URL is pulled from the current active `bambou.NURESTSession`.
> URI are auto generated. You never need to deal with them manually.
### Exposing ReST Attributes
Exposed attributes will be converted and sent to the server when you do CRUD operations. That way, if an object has an attribute `name`, it can be marked as a ReST attribute.
When saving the object, the value of `name` will be put into the generated JSON structure that will be sent to the server, or automatically populated from a JSON structure that is coming from the server.
Not only the attribute can be exposed, but also its type and other informations like if it is read only, its allowed values, its format, its default value and so on.
> exposing ReST Attributes is auto generated. You never need to manually expose new attributes.
### CRUD Operations
`bambou.NURESTObject` allows to perform all sorts of CRUD operations.
* `bambou.NURESTObject.fetch`
* `bambou.NURESTObject.save`
* `bambou.NURESTObject.delete`
* `bambou.NURESTObject.create_child`
* `bambou.NURESTObject.assign`
* `bambou.NURESTObject.instantiate_child`
> All these methods require the current `bambou.NURESTObject` to have a valid `bambou.NURESTObject.ID`.
> You may notice that there is no creation method. Creation is always happening from a parent object and is done using `create_child`.
> You may notice that an optional parameter `callback` is present. This is because `MySDK` can work completely asynchronously.
### Converting to and from a Python Dictionary
`bambou.NURESTObject` allows quick and easy conversion from and to python dictionaries
* `bambou.NURESTObject.from_dict`
* `bambou.NURESTObject.to_dict`
> you never need to process to the actual JSON conversion when sending info to the server. `bambou.NURESTConnection` will do that automatically, but you can use these methods to print an object, or copy information of an object into one another.
## NURESTFetcher
`bambou.NURESTFetcher` is a class allowing a `bambou.NURESTObject` to fetch its children. All `bambou.NURESTObject` have one or more fetchers, unless it's a final object in the model hierarchy. `bambou.NURESTFetcher` provides a lot of possibility regarding the way you want to get a given children list. It can deal with simple object fetching, pagination, filtering, request headers, grouping etc.
### Fetching Children List
`bambou.NURESTFetcher` has three importants methods:
* `bambou.NURESTFetcher.fetch`
* `bambou.NURESTFetcher.get`
* `bambou.NURESTFetcher.get_first`
### Discussion about Fetchers
Fetcher is a powerfull concept that makes the process of getting child objects completely generic and code friendly. `bambou.NURESTObject` provides methods that allow to deal programatically with its fetchers in a completely generic way.
* `bambou.NURESTObject.fetcher_for_rest_name`
* `bambou.NURESTObject.fetchers`
* `bambou.NURESTObject.children_rest_names`
This allows complete abstract programatic operations on any objects.
For instance, the following function will create a new `MySDK.v3_2.Metadata` to the entire hierarchy of children from a given object that has been created after a certain date:
#!python
def apply_metatada_to_all_children(root_object, metadata, filter=None):
# Loop on all declared children fetchers
for fetcher in root_object.fetchers:
# Fetch the list of the children
children = fetcher.get(filter=filter)
# Loop on all fetched children
for child in children:
# Add the metadata to the current children
child.create_child(metadata)
# Start over recursively on the children of the current child
apply_metadata_to_all_children(child, metadata)
enterprise = Enterprise(id="xxxx-xxxx-xxx-xxxx")
metadata = Metadata(name="my metadata", blob="hello world!")
apply_metadata_to_all_children(enterprise, metadata, filter="creationDate > '01-01-2015'")
## NURESTPushCenter
The API supports client side push through a long polling connection. ReST clients can connect to that channel and will get a notification as soon as he or someone else in the system changes something. This events are filtered by permissions, which means that if someone change a property of an object you cannot see, you won't get notified. `MySDK` provides the `bambou.NURESTPushCenter`, which encapsulates all the logic to deal with the event channel. It runs in its own thread and will call registered callbacks when it receives a push.
A `bambou.NURESTPushCenter` is automatically created with each `bambou.NURESTSession` and it is available from the attribute `bambou.NURESTSession.push_center`.
#!python
session = MySession(username="user", password="secret", enterprise="organization", api_url="https://server")
session.start()
session.push_center.start()
> You need to explicitely start the push center.
### Using the NURESTPushCenter
Only the following methods are important:
* `bambou.NURESTPushCenter.start`
* `bambou.NURESTPushCenter.add_delegate`
* `bambou.NURESTPushCenter.remove_delegate`
### Example
Here is a really simple code sample that will print the push data on every push:
#!python
from MySDK import *
from pprint import pprint
from time import sleep
session = MySession(username="csproot", password="secret", enterprise="csp", api_url="https://server")
session.start()
def on_receive_push(data):
pprint(data);
session.push_center.add_delegate(on_receive_push);
session.push_center.start()
# default stupid run loop. don't do that in real life :)
while True:
sleep(1000)
## Conclusion
Now you know the basics of `Bambou` and so, of the `MySDK`. Remember that all objects in `MySDK` are subclasses of `bambou.NURESTObject` so they **all** work exactly the same.
There is a lot more to know about `Bambou` like the asynchronous mode, auto model parsing, easy controllers creation thanks introspection and so on. We'll cover this in a different advanced section.
"""
try:
import requests
requests.packages.urllib3.disable_warnings()
except:
pass
import logging
bambou_logger = logging.getLogger('bambou')
pushcenter_logger = logging.getLogger('pushcenter')
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
bambou_logger.addHandler(NullHandler())
__all__ = ['NURESTRootObject', 'NURESTConnection', 'NURESTModelController', 'NURESTFetcher', 'NURESTLoginController', 'NURESTObject', 'NURESTPushCenter', 'NURESTRequest', 'NURESTResponse', 'NURESTSession', 'BambouConfig']
from bambou.nurest_session import NURESTSession
from bambou.nurest_root_object import NURESTRootObject
from bambou.nurest_connection import NURESTConnection
from bambou.nurest_fetcher import NURESTFetcher
from bambou.nurest_login_controller import NURESTLoginController
from bambou.nurest_object import NURESTObject
from bambou.nurest_push_center import NURESTPushCenter
from bambou.nurest_request import NURESTRequest
from bambou.nurest_response import NURESTResponse
from bambou.nurest_modelcontroller import NURESTModelController
from bambou.config import BambouConfig
| nuagenetworks/bambou | bambou/__init__.py | Python | bsd-3-clause | 12,559 |
import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show()
| lixun910/pysal | pysal/model/mgwr/utils.py | Python | bsd-3-clause | 6,190 |
"""Template loader for app-namespace"""
import errno
import io
import os
from collections import OrderedDict
import django
from django.apps import apps
try:
from django.template import Origin
except ImportError: # pragma: no cover
class Origin(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
from django.template import TemplateDoesNotExist
from django.template.loaders.base import Loader as BaseLoader
from django.utils._os import safe_join
from django.utils._os import upath
from django.utils.functional import cached_property
class NamespaceOrigin(Origin):
def __init__(self, app_name, *args, **kwargs):
self.app_name = app_name
super(NamespaceOrigin, self).__init__(*args, **kwargs)
class Loader(BaseLoader):
"""
App namespace loader for allowing you to both extend and override
a template provided by an app at the same time.
"""
is_usable = True
def __init__(self, *args, **kwargs):
super(Loader, self).__init__(*args, **kwargs)
self._already_used = []
def reset(self, mandatory_on_django_18):
"""
Empty the cache of paths already used.
"""
if django.VERSION[1] == 8:
if not mandatory_on_django_18:
return
self._already_used = []
def get_app_template_path(self, app, template_name):
"""
Return the full path of a template name located in an app.
"""
return safe_join(self.app_templates_dirs[app], template_name)
@cached_property
def app_templates_dirs(self):
"""
Build a cached dict with settings.INSTALLED_APPS as keys
and the 'templates' directory of each application as values.
"""
app_templates_dirs = OrderedDict()
for app_config in apps.get_app_configs():
templates_dir = os.path.join(
getattr(app_config, 'path', '/'), 'templates')
if os.path.isdir(templates_dir):
templates_dir = upath(templates_dir)
app_templates_dirs[app_config.name] = templates_dir
app_templates_dirs[app_config.label] = templates_dir
return app_templates_dirs
def get_contents(self, origin):
"""
Try to load the origin.
"""
try:
path = self.get_app_template_path(
origin.app_name, origin.template_name)
with io.open(path, encoding=self.engine.file_charset) as fp:
return fp.read()
except KeyError:
raise TemplateDoesNotExist(origin)
except IOError as error:
if error.errno == errno.ENOENT:
raise TemplateDoesNotExist(origin)
raise
def get_template_sources(self, template_name):
"""
Build a list of Origin to load 'template_name' splitted with ':'.
The first item is the name of the application and the last item
is the true value of 'template_name' provided by the specified
application.
"""
if ':' not in template_name:
self.reset(True)
return
app, template_path = template_name.split(':')
if app:
yield NamespaceOrigin(
app_name=app,
name='app_namespace:%s:%s' % (app, template_name),
template_name=template_path,
loader=self)
return
self.reset(False)
for app in self.app_templates_dirs:
file_path = self.get_app_template_path(app, template_path)
if file_path in self._already_used:
continue
self._already_used.append(file_path)
yield NamespaceOrigin(
app_name=app,
name='app_namespace:%s:%s' % (app, template_name),
template_name=template_path,
loader=self)
def load_template_source(self, *ka):
"""
Backward compatible method for Django < 2.0.
"""
template_name = ka[0]
for origin in self.get_template_sources(template_name):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
| Fantomas42/django-app-namespace-template-loader | app_namespace/loader.py | Python | bsd-3-clause | 4,361 |
from sympy.core import (Basic, Expr, S, C, Symbol, Wild, Add, sympify, diff,
oo, Tuple, Dummy, Equality, Interval)
from sympy.core.symbol import Dummy
from sympy.core.compatibility import ordered_iter
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import heurisch
from sympy.utilities import xthreaded, flatten, any, all
from sympy.polys import Poly, PolynomialError
from sympy.solvers import solve
from sympy.functions import Piecewise, sign
from sympy.geometry import Curve
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series import limit
def _free_symbols(function, limits):
"""
Return the symbols that will exist when the function is evaluated as
an Integral or a Sum. This is useful if one is trying to determine
whether the result is dependent on a certain symbol or not.
This is written as a private function so it can be used from Sum as well
as from Integral.
"""
if function.is_zero:
return set()
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
if len(xab) == 3 and xab[1] == xab[2]:
# if two limits are the same the integral is 0
# and there are no symbols
return set()
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
def _process_limits(*symbols):
"""Convert the symbols-related limits into propert limits,
storing them as Tuple(symbol, lower, upper). The sign of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the sign is changed.
"""
limits = []
sign = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif ordered_iter(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
sign *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, sign
class Integral(Expr):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if symbols:
limits, sign = _process_limits(*symbols)
else:
# no symbols provided -- let's compute full anti-derivative
limits, sign = [Tuple(s) for s in function.free_symbols], 1
if len(limits) != 1:
raise ValueError("specify integration variables to integrate %s" % function)
while isinstance(function, Integral):
# denest the integrand
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = all(s.is_commutative for s in obj.free_symbols)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def function(self):
return self._args[0]
@property
def limits(self):
return self._args[1:]
@property
def variables(self):
"""Return a list of the integration variables.
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).variables
[i]
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral is dependent on a certain
symbol or not.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
set([y])
"""
return _free_symbols(self.function, self.limits)
@property
def is_zero(self):
"""Since Integral doesn't autosimplify it it useful to see if
it would simplify to zero or not in a trivial manner, i.e. when
the function is 0 or two limits of a definite integral are the same.
This is a very naive and quick test, not intended to check for special
patterns like Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)) == 0.
"""
if (self.function.is_zero or
any(len(xab) == 3 and xab[1] == xab[2] for xab in self.limits)):
return True
if not self.free_symbols and self.function.is_number:
# the integrand is a number and the limits are numerical
return False
@property
def is_number(self):
"""
Return True if the Integral will result in a number, else False.
sympy considers anything that will result in a number to have
is_number == True.
>>> from sympy import log
>>> log(2).is_number
True
Integrals are a special case since they contain symbols that can
be replaced with numbers. Whether the integral can be done or not is
another issue. But answering whether the final result is a number is
not difficult.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).is_number
False
>>> Integral(x, y).is_number
False
>>> Integral(x, (y, 1, x)).is_number
False
>>> Integral(x, (y, 1, 2)).is_number
False
>>> Integral(x, (y, 1, 1)).is_number
True
>>> Integral(x, (x, 1, 2)).is_number
True
>>> Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number
True
>>> Integral(1, x, (x, 1, 2)).is_number
True
"""
integrand, limits = self.function, self.limits
isyms = integrand.atoms(Symbol)
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue # it may be removed later
elif len(xab) == 3 and xab[1] == xab[2]: # XXX naive equality test
return True # integral collapsed
if xab[0] in isyms:
# take it out of the symbols since it will be replace
# with whatever the limits of the integral are
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
# if there are no surviving symbols then the result is a number
return len(isyms) == 0
def as_dummy(self):
"""
Replace instances of the integration variables with their dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an Integral. The "integral at" limit
that has a length of 1 will be explicated with its length-2
equivalent.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).as_dummy()
Integral(_x, (_x, x))
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If there were no dummies in the original expression, then the
output of this function will show which symbols cannot be
changed by subs(), those with an underscore prefix.
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
xab = xab*2
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return Integral(f, *limits)
def transform(self, x, mapping, inverse=False):
"""
Replace the integration variable x in the integrand with the
expression given by `mapping`, e.g. 2*x or 1/x. The integrand and
endpoints are rescaled to preserve the value of the original
integral.
In effect, this performs a variable substitution (although the
symbol remains unchanged; follow up with subs to obtain a
new symbol.)
With inverse=True, the inverse transformation is performed.
The mapping must be uniquely invertible (e.g. a linear or linear
fractional transformation).
"""
if x not in self.variables:
return self
limits = self.limits
function = self.function
y = Dummy('y')
inverse_mapping = solve(mapping.subs(x, y) - x, y)
if len(inverse_mapping) != 1 or x not in inverse_mapping[0].free_symbols:
raise ValueError("The mapping must be uniquely invertible")
inverse_mapping = inverse_mapping[0]
if inverse:
mapping, inverse_mapping = inverse_mapping, mapping
function = function.subs(x, mapping) * mapping.diff(x)
def calc_limit(a, b):
"""replace x with a, using subs if possible, otherwise limit
where sign of b is considered"""
wok = inverse_mapping.subs(x, a)
if wok is S.NaN or wok.is_bounded is False and a.is_bounded:
return limit(sign(b)*inverse_mapping, x, a)
return wok
newlimits = []
for xab in limits:
sym = xab[0]
if sym == x and len(xab) == 3:
a, b = xab[1:]
a, b = calc_limit(a, b), calc_limit(b, a)
if a == b:
raise ValueError("The mapping must transform the "
"endpoints into separate points")
if a > b:
a, b = b, a
function = -function
newlimits.append((sym, a, b))
else:
newlimits.append(xab)
return Integral(function, *newlimits)
def doit(self, **hints):
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
# check for the trivial case of equal upper and lower limits
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# There is no trivial answer, so continue
undone_limits = []
ulj = set() # free symbols of any undone limits' upper and lower limits
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
continue
antideriv = self._eval_integral(function, xab[0])
if antideriv is None:
undone_limits.append(xab)
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
if len(xab) == 2:
x, b = xab
a = None
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
function = antideriv._eval_interval(x, a, b)
if undone_limits:
return self.func(*([function] + undone_limits))
return function
def _eval_expand_basic(self, deep=True, **hints):
from sympy import flatten
if not deep:
return self
else:
return Integral(self.function.expand(deep=deep, **hints),\
flatten(*self.limits))
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, (y, y), (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
set([x])
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 1116
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = Integral(f, *tuple(limits))
# assemble the pieces
rv = 0
if b is not None:
rv += f.subs(x, b)*diff(b, sym)
if a is not None:
rv -= f.subs(x, a)*diff(a, sym)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += Integral(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x):
"""Calculate the anti-derivative to the function f(x).
This is a powerful function that should in theory be able to integrate
everything that can be integrated. If you find something, that it
doesn't, it is easy to implement it.
(1) Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials)
- functions non-integrable by any of the following algorithms (e.g.
exp(-x**2))
(2) Integration of rational functions:
(a) using apart() - apart() is full partial fraction decomposition
procedure based on Bronstein-Salvy algorithm. It gives formal
decomposition with no polynomial factorization at all (so it's fast
and gives the most general results). However it needs much better
implementation of RootsOf class (if fact any implementation).
(b) using Trager's algorithm - possibly faster than (a) but needs
implementation :)
(3) Whichever implementation of pmInt (Mateusz, Kirill's or a
combination of both).
- this way we can handle efficiently huge class of elementary and
special functions
(4) Recursive Risch algorithm as described in Bronstein's integration
tutorial.
- this way we can handle those integrable functions for which (3)
fails
(5) Powerful heuristics based mostly on user defined rules.
- handle complicated, rarely used cases
"""
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly):
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if f.func is Piecewise:
return f._eval_integral(x)
# let's cut it short if `f` does not depend on `x`
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None:
return poly.integrate().as_expr()
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One:
parts.append(coeff*x)
continue
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x):
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = C.log(g.base)
else:
h = g.base**(g.exp + 1) / (g.exp + 1)
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x):
parts.append(coeff * ratint(g, x))
continue
# g(x) = Mul(trig)
h = trigintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# fall back to the more general algorithm
try:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# out the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = f.expand(mul=True, deep=False)
if f.is_Add:
return self._eval_integral(f, x)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x):
for term in self.function.lseries(x):
yield integrate(term, *self.limits)
def _eval_nseries(self, x, n, logx):
terms, order = self.function.nseries(x, n=n, logx=logx).as_coeff_add(C.Order)
return integrate(terms, *self.limits) + Add(*order)*x
def _eval_subs(self, old, new):
"""
Substitute old with new in the integrand and the limits, but don't
change anything that is (or corresponds to) a variable of integration.
The normal substitution semantics -- traversing all arguments looking
for matching patterns -- should not be applied to the Integrals since
changing the integration variables should also entail a change in the
integration limits (which should be done with the transform method). So
this method just makes changes in the integrand and the limits.
Not all instances of a given variable are conceptually the same: the
first argument of the limit tuple and any corresponding variable in
the integrand are dummy variables while every other symbol is a symbol
that will be unchanged when the integral is evaluated. For example, in
Integral(x + a, (a, a, b))
the dummy variables are shown below with angle-brackets around them and
will not be changed by this function:
Integral(x + <a>, (<a>, a, b))
If you want to change the lower limit to 1 there is no reason to
prohibit this since it is not conceptually related to the integration
variable, <a>. Nor is there reason to disallow changing the b to 1.
If a second limit were added, however, as in:
Integral(x + a, (a, a, b), (b, 1, 2))
the dummy variables become:
Integral(x + <a>, (<a>, a, <b>), (<b>, a, b))
Note that the `b` of the first limit is now a dummy variable since `b` is a
dummy variable in the second limit.
Summary: no variable of the integrand or limit can be the target of
substitution if it appears as a variable of integration in a limit
positioned to the right of it.
>>> from sympy import Integral
>>> from sympy.abc import a, b, c, x, y
>>> i = Integral(a + x, (a, a, 3), (b, x, c))
>>> list(i.free_symbols) # only these can be changed
[x, a, c]
>>> i.subs(a, c) # note that the variable of integration is unchanged
Integral(a + x, (a, c, 3), (b, x, c))
>>> i.subs(a + x, b) == i # there is no x + a, only x + <a>
True
>>> i.subs(x, y - c)
Integral(a - c + y, (a, a, 3), (b, -c + y, c))
"""
if self == old:
return new
integrand, limits = self.function, self.limits
old_atoms = old.free_symbols
limits = list(limits)
# make limits explicit if they are to be targeted by old:
# Integral(x, x) -> Integral(x, (x, x)) if old = x
if old.is_Symbol:
for i, l in enumerate(limits):
if len(l) == 1 and l[0] == old:
limits[i] = Tuple(l[0], l[0])
dummies = set()
for i in xrange(-1, -len(limits) - 1, -1):
xab = limits[i]
if not dummies.intersection(old_atoms):
limits[i] = Tuple(xab[0],
*[l.subs(old, new) for l in xab[1:]])
dummies.add(xab[0])
if not dummies.intersection(old_atoms):
integrand = integrand.subs(old, new)
return Integral(integrand, *limits)
def as_sum(self, n, method="midpoint"):
"""
Approximates the integral by a sum.
method ... one of: left, right, midpoint
This is basically just the rectangle method [1], the only difference is
where the function value is taken in each interval.
[1] http://en.wikipedia.org/wiki/Rectangle_method
**method = midpoint**:
Uses the n-order midpoint rule to evaluate the integral.
Midpoint rule uses rectangles approximation for the given area (e.g.
definite integral) of the function with heights equal to the point on
the curve exactly in the middle of each interval (thus midpoint
method). See [1] for more information.
Examples:
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> from sympy.integrals import Integral
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral((x**3 + 1)**(1/2), (x, 2, 10))
>>> e.as_sum(4, method="midpoint")
4*7**(1/2) + 6*14**(1/2) + 4*86**(1/2) + 2*730**(1/2)
>>> e.as_sum(4, method="midpoint").n()
124.164447891310
>>> e.n()
124.616199194723
**method=left**:
Uses the n-order rectangle rule to evaluate the integral, at each
interval the function value is taken at the left hand side of the
interval.
Examples:
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral((x**3 + 1)**(1/2), (x, 2, 10))
>>> e.as_sum(4, method="left")
6 + 2*65**(1/2) + 2*217**(1/2) + 6*57**(1/2)
>>> e.as_sum(4, method="left").n()
96.8853618335341
>>> e.n()
124.616199194723
"""
limits = self.limits
if len(limits) > 1:
raise NotImplementedError("Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if n <= 0:
raise ValueError("n must be > 0")
if n == oo:
raise NotImplementedError("Infinite summation not yet implemented")
sym, lower_limit, upper_limit = limit
dx = (upper_limit - lower_limit)/n
result = 0.
for i in range(n):
if method == "midpoint":
xi = lower_limit + i*dx + dx/2
elif method == "left":
xi = lower_limit + i*dx
elif method == "right":
xi = lower_limit + i*dx + dx
else:
raise NotImplementedError("Unknown method %s" % method)
result += self.function.subs(sym, xi)
return result*dx
@xthreaded
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is multiple
integration.
Also, if no var is specified at all, then the full anti-derivative of f is
returned. This is equivalent to integrating f over all its variables.
**Examples**
>>> from sympy import integrate, log
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
See also the doctest of Integral._eval_integral(), which explains
thoroughly the strategy that SymPy uses for integration.
"""
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep = False)
else:
return integral
@xthreaded
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
--------
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*2**(1/2)
"""
F = sympify(field)
if not F:
raise ValueError("Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not ordered_iter(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * dldt**(S(1)/2)
integral = Integral(Ft, curve.limits).doit(deep = False)
return integral
| minrk/sympy | sympy/integrals/integrals.py | Python | bsd-3-clause | 31,617 |
#! /usr/bin/env python
from zplot import *
t = table('horizontalintervals.data')
canvas = postscript('horizontalintervals.eps')
d = drawable(canvas, coord=[50,30], xrange=[0,900],
yrange=[0,t.getmax('nodes')])
axis(d, xtitle='Throughput (MB)', xauto=[0,900,300],
ytitle='Nodes', yauto=[0,t.getmax('nodes'),1])
# ylofield and yhifield specify the interval range
p = plotter()
p.horizontalintervals(d, t, yfield='nodes', xlofield='min', xhifield='max')
canvas.render()
| z-plot/z-plot | examples/basics/horizontalintervals.py | Python | bsd-3-clause | 492 |
Subsets and Splits