repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
achow101/bitcoin | contrib/devtools/test-security-check.py | 13 | 5500 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import os
import subprocess
import unittest
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def clean_files(source, executable):
os.remove(source)
os.remove(executable)
def call_security_check(cc, source, executable, options):
subprocess.run([cc,source,'-o',executable] + options, check=True)
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = 'gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(0, ''))
clean_files(source, executable)
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = 'x86_64-w64-mingw32-gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed HIGH_ENTROPY_VA RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']),
(0, ''))
clean_files(source, executable)
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = 'clang'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
(1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
(1, executable+': failed PIE LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
(1, executable+': failed PIE CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
(0, ''))
clean_files(source, executable)
if __name__ == '__main__':
unittest.main()
| mit | 1,314,844,903,281,383,400 | 55.122449 | 184 | 0.628545 | false |
archix/SimpleAPIBoilerplate | utils/models/meta.py | 1 | 9690 | from flask import json
from sqlalchemy import not_
import datetime
from extensions import db
class Model(db.Model):
"""Base SQLAlchemy Model for automatic serialization and
deserialization of columns and nested relationships.
Usage::
>>> class User(Model):
>>> id = db.Column(db.Integer(), primary_key=True)
>>> email = db.Column(db.String(), index=True)
>>> name = db.Column(db.String())
>>> password = db.Column(db.String())
>>> posts = db.relationship('Post', backref='user', lazy='dynamic')
>>> ...
>>> default_fields = ['email', 'name']
>>> hidden_fields = ['password']
>>> readonly_fields = ['email', 'password']
>>>
>>> class Post(Model):
>>> id = db.Column(db.Integer(), primary_key=True)
>>> user_id = db.Column(db.String(), db.ForeignKey('user.id'), nullable=False)
>>> title = db.Column(db.String())
>>> ...
>>> default_fields = ['title']
>>> readonly_fields = ['user_id']
>>>
>>> model = User(email='john@localhost')
>>> db.session.add(model)
>>> db.session.commit()
>>>
>>> # update name and create a new post
>>> validated_input = {'name': 'John', 'posts': [{'title':'My First Post'}]}
>>> model.set_columns(**validated_input)
>>> db.session.commit()
>>>
>>> print(model.to_dict(show=['password', 'posts']))
>>> {u'email': u'john@localhost',
>>> u'posts': [{u'id': 1, u'title': u'My First Post'}],
>>> u'name': u'John', u'id': 1}
"""
__abstract__ = True
# Stores changes made to this model's attributes. Can be retrieved
# with model.changes
_changes = {}
def __init__(self, **kwargs):
kwargs['_force'] = True
self._set_columns(**kwargs)
def _set_columns(self, **kwargs):
force = kwargs.get('_force')
readonly = []
if hasattr(self, 'readonly_fields'):
readonly = self.readonly_fields
if hasattr(self, 'hidden_fields'):
readonly += self.hidden_fields
readonly += [
'id',
'created',
'updated',
'modified',
'created_at',
'updated_at',
'modified_at',
]
changes = {}
columns = self.__table__.columns.keys()
relationships = self.__mapper__.relationships.keys()
for key in columns:
allowed = True if force or key not in readonly else False
exists = True if key in kwargs else False
if allowed and exists:
val = getattr(self, key)
if val != kwargs[key]:
changes[key] = {'old': val, 'new': kwargs[key]}
setattr(self, key, kwargs[key])
for rel in relationships:
allowed = True if force or rel not in readonly else False
exists = True if rel in kwargs else False
if allowed and exists:
is_list = self.__mapper__.relationships[rel].uselist
if is_list:
valid_ids = []
query = getattr(self, rel)
cls = self.__mapper__.relationships[rel].argument()
for item in kwargs[rel]:
if 'id' in item and query.filter_by(id=item['id']).limit(1).count() == 1:
obj = cls.query.filter_by(id=item['id']).first()
col_changes = obj.set_columns(**item)
if col_changes:
col_changes['id'] = str(item['id'])
if rel in changes:
changes[rel].append(col_changes)
else:
changes.update({rel: [col_changes]})
valid_ids.append(str(item['id']))
else:
col = cls()
col_changes = col.set_columns(**item)
query.append(col)
db.session.flush()
if col_changes:
col_changes['id'] = str(col.id)
if rel in changes:
changes[rel].append(col_changes)
else:
changes.update({rel: [col_changes]})
valid_ids.append(str(col.id))
# delete related rows that were not in kwargs[rel]
for item in query.filter(not_(cls.id.in_(valid_ids))).all():
col_changes = {
'id': str(item.id),
'deleted': True,
}
if rel in changes:
changes[rel].append(col_changes)
else:
changes.update({rel: [col_changes]})
db.session.delete(item)
else:
val = getattr(self, rel)
if self.__mapper__.relationships[rel].query_class is not None:
if val is not None:
col_changes = val.set_columns(**kwargs[rel])
if col_changes:
changes.update({rel: col_changes})
else:
if val != kwargs[rel]:
setattr(self, rel, kwargs[rel])
changes[rel] = {'old': val, 'new': kwargs[rel]}
return changes
def set_columns(self, **kwargs):
self._changes = self._set_columns(**kwargs)
if 'modified' in self.__table__.columns:
self.modified = datetime.utcnow()
if 'updated' in self.__table__.columns:
self.updated = datetime.utcnow()
if 'modified_at' in self.__table__.columns:
self.modified_at = datetime.utcnow()
if 'updated_at' in self.__table__.columns:
self.updated_at = datetime.utcnow()
return self._changes
@property
def changes(self):
return self._changes
def reset_changes(self):
self._changes = {}
def to_dict(self, show=None, hide=None, path=None, show_all=None):
""" Return a dictionary representation of this model.
"""
if not show:
show = []
if not hide:
hide = []
hidden = []
if hasattr(self, 'hidden_fields'):
hidden = self.hidden_fields
default = []
if hasattr(self, 'default_fields'):
default = self.default_fields
ret_data = {}
if not path:
path = self.__tablename__.lower()
def prepend_path(item):
item = item.lower()
if item.split('.', 1)[0] == path:
return item
if len(item) == 0:
return item
if item[0] != '.':
item = '.%s' % item
item = '%s%s' % (path, item)
return item
show[:] = [prepend_path(x) for x in show]
hide[:] = [prepend_path(x) for x in hide]
columns = self.__table__.columns.keys()
relationships = self.__mapper__.relationships.keys()
properties = dir(self)
for key in columns:
check = '%s.%s' % (path, key)
if check in hide or key in hidden:
continue
if show_all or key is 'id' or check in show or key in default:
ret_data[key] = getattr(self, key)
for key in relationships:
check = '%s.%s' % (path, key)
if check in hide or key in hidden:
continue
if show_all or check in show or key in default:
hide.append(check)
is_list = self.__mapper__.relationships[key].uselist
if is_list:
ret_data[key] = []
for item in getattr(self, key):
ret_data[key].append(item.to_dict(
show=show,
hide=hide,
path=('%s.%s' % (path, key.lower())),
show_all=show_all,
))
else:
if self.__mapper__.relationships[key].query_class is not None:
if getattr(self, key, None):
ret_data[key] = getattr(self, key).to_dict(
show=show,
hide=hide,
path=('%s.%s' % (path, key.lower())),
show_all=show_all,
)
else:
ret_data[key] = getattr(self, key, None)
for key in list(set(properties) - set(columns) - set(relationships)):
if key.startswith('_'):
continue
check = '%s.%s' % (path, key)
if check in hide or key in hidden:
continue
if show_all or check in show or key in default:
val = getattr(self, key)
try:
ret_data[key] = json.loads(json.dumps(val))
except:
pass
return ret_data
| gpl-3.0 | -7,038,442,577,098,879,000 | 37.300395 | 97 | 0.44066 | false |
jforge/brick-mqtt-proxy | brick-mqtt-proxy.py | 1 | 99496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Brick MQTT Proxy
Copyright (C) 2015-2017 Matthias Bolte <[email protected]>
Copyright (C) 2017 Ishraq Ibne Ashraf <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
BRICKD_HOST = 'localhost'
BRICKD_PORT = 4223
BROKER_HOST = 'localhost'
BROKER_PORT = 1883 # 8883 for TLS
GLOBAL_TOPIC_PREFIX = 'tinkerforge/'
UPDATE_INTERVAL = 3.0 # seconds
ENUMERATE_INTERVAL = 15.0 # seconds
import argparse
import json
import sys
import time
import threading
import logging
import paho.mqtt.client as mqtt # pip install paho-mqtt
from tinkerforge.ip_connection import IPConnection, Error
# Bricks
from tinkerforge.brick_dc import BrickDC
from tinkerforge.brick_imu import BrickIMU
from tinkerforge.brick_imu_v2 import BrickIMUV2
from tinkerforge.brick_master import BrickMaster
from tinkerforge.brick_servo import BrickServo
from tinkerforge.brick_silent_stepper import BrickSilentStepper
from tinkerforge.brick_stepper import BrickStepper
# Bricklets
from tinkerforge.bricklet_accelerometer import BrickletAccelerometer
from tinkerforge.bricklet_ambient_light import BrickletAmbientLight
from tinkerforge.bricklet_ambient_light_v2 import BrickletAmbientLightV2
from tinkerforge.bricklet_analog_in import BrickletAnalogIn
from tinkerforge.bricklet_analog_in_v2 import BrickletAnalogInV2
from tinkerforge.bricklet_analog_out import BrickletAnalogOut
from tinkerforge.bricklet_analog_out_v2 import BrickletAnalogOutV2
from tinkerforge.bricklet_analog_in_v3 import BrickletAnalogInV3
from tinkerforge.bricklet_barometer import BrickletBarometer
from tinkerforge.bricklet_can import BrickletCAN
from tinkerforge.bricklet_co2 import BrickletCO2
from tinkerforge.bricklet_color import BrickletColor
from tinkerforge.bricklet_current12 import BrickletCurrent12
from tinkerforge.bricklet_current25 import BrickletCurrent25
from tinkerforge.bricklet_distance_ir import BrickletDistanceIR
from tinkerforge.bricklet_distance_us import BrickletDistanceUS
from tinkerforge.bricklet_dmx import BrickletDMX
from tinkerforge.bricklet_dual_button import BrickletDualButton
from tinkerforge.bricklet_dual_relay import BrickletDualRelay
from tinkerforge.bricklet_dust_detector import BrickletDustDetector
from tinkerforge.bricklet_gps import BrickletGPS
from tinkerforge.bricklet_gps_v2 import BrickletGPSV2
from tinkerforge.bricklet_hall_effect import BrickletHallEffect
from tinkerforge.bricklet_humidity import BrickletHumidity
from tinkerforge.bricklet_humidity_v2 import BrickletHumidityV2
from tinkerforge.bricklet_industrial_analog_out import BrickletIndustrialAnalogOut
from tinkerforge.bricklet_industrial_digital_in_4 import BrickletIndustrialDigitalIn4
from tinkerforge.bricklet_industrial_digital_out_4 import BrickletIndustrialDigitalOut4
from tinkerforge.bricklet_industrial_dual_0_20ma import BrickletIndustrialDual020mA
from tinkerforge.bricklet_industrial_dual_analog_in import BrickletIndustrialDualAnalogIn
from tinkerforge.bricklet_industrial_quad_relay import BrickletIndustrialQuadRelay
from tinkerforge.bricklet_io16 import BrickletIO16
from tinkerforge.bricklet_io4 import BrickletIO4
from tinkerforge.bricklet_joystick import BrickletJoystick
from tinkerforge.bricklet_laser_range_finder import BrickletLaserRangeFinder
from tinkerforge.bricklet_lcd_16x2 import BrickletLCD16x2
from tinkerforge.bricklet_lcd_20x4 import BrickletLCD20x4
from tinkerforge.bricklet_led_strip import BrickletLEDStrip
from tinkerforge.bricklet_led_strip_v2 import BrickletLEDStripV2
from tinkerforge.bricklet_line import BrickletLine
from tinkerforge.bricklet_linear_poti import BrickletLinearPoti
from tinkerforge.bricklet_load_cell import BrickletLoadCell
from tinkerforge.bricklet_moisture import BrickletMoisture
from tinkerforge.bricklet_motion_detector import BrickletMotionDetector
from tinkerforge.bricklet_motion_detector_v2 import BrickletMotionDetectorV2
from tinkerforge.bricklet_motorized_linear_poti import BrickletMotorizedLinearPoti
from tinkerforge.bricklet_multi_touch import BrickletMultiTouch
from tinkerforge.bricklet_nfc_rfid import BrickletNFCRFID
from tinkerforge.bricklet_oled_128x64 import BrickletOLED128x64
from tinkerforge.bricklet_oled_64x48 import BrickletOLED64x48
from tinkerforge.bricklet_piezo_buzzer import BrickletPiezoBuzzer
from tinkerforge.bricklet_piezo_speaker import BrickletPiezoSpeaker
from tinkerforge.bricklet_outdoor_weather import BrickletOutdoorWeather
from tinkerforge.bricklet_ptc import BrickletPTC
from tinkerforge.bricklet_real_time_clock import BrickletRealTimeClock
from tinkerforge.bricklet_remote_switch import BrickletRemoteSwitch
from tinkerforge.bricklet_remote_switch_v2 import BrickletRemoteSwitchV2
from tinkerforge.bricklet_rgb_led import BrickletRGBLED
from tinkerforge.bricklet_rgb_led_button import BrickletRGBLEDButton
from tinkerforge.bricklet_rgb_led_matrix import BrickletRGBLEDMatrix
from tinkerforge.bricklet_rotary_encoder import BrickletRotaryEncoder
from tinkerforge.bricklet_rotary_encoder_v2 import BrickletRotaryEncoderV2
from tinkerforge.bricklet_rotary_poti import BrickletRotaryPoti
from tinkerforge.bricklet_rs232 import BrickletRS232
from tinkerforge.bricklet_rs485 import BrickletRS485
from tinkerforge.bricklet_segment_display_4x7 import BrickletSegmentDisplay4x7
from tinkerforge.bricklet_solid_state_relay import BrickletSolidStateRelay
from tinkerforge.bricklet_solid_state_relay_v2 import BrickletSolidStateRelayV2
from tinkerforge.bricklet_sound_intensity import BrickletSoundIntensity
from tinkerforge.bricklet_temperature import BrickletTemperature
from tinkerforge.bricklet_temperature_ir import BrickletTemperatureIR
from tinkerforge.bricklet_temperature_ir_v2 import BrickletTemperatureIRV2
from tinkerforge.bricklet_thermal_imaging import BrickletThermalImaging
from tinkerforge.bricklet_thermocouple import BrickletThermocouple
from tinkerforge.bricklet_tilt import BrickletTilt
from tinkerforge.bricklet_uv_light import BrickletUVLight
from tinkerforge.bricklet_voltage import BrickletVoltage
from tinkerforge.bricklet_voltage_current import BrickletVoltageCurrent
class Getter(object):
def __init__(self, proxy, getter_name, parameters, topic_suffix, result_name):
self.proxy = proxy
self.getter = getattr(proxy.device, getter_name)
self.parameters = parameters
self.topic_suffix = topic_suffix
self.result_name = result_name
self.last_result = None
def update(self):
try:
if self.parameters == None:
result = self.getter()
elif isinstance(self.parameters, tuple):
result = self.getter(*self.parameters)
else: # dict
result = {}
for key, value in self.parameters.items():
try:
result[key] = self.getter(*value)
except Error as e:
if e.value in [Error.INVALID_PARAMETER, Error.NOT_SUPPORTED]:
result[key] = None
else:
raise
except Exception as e:
result = self.last_result
if result != None and result != self.last_result:
payload = {}
if isinstance(result, dict):
for key, value in result.items():
payload[key] = {}
if isinstance(value, tuple) and hasattr(value, '_fields'): # assume it is a namedtuple
for field in value._fields:
payload[key][field] = getattr(value, field)
elif value == None:
payload[key] = value
else:
payload[key][self.result_name] = value
elif isinstance(result, tuple) and hasattr(result, '_fields'): # assume it is a namedtuple
for field in result._fields:
payload[field] = getattr(result, field)
else:
payload[self.result_name] = result
self.proxy.publish_values(self.topic_suffix, **payload)
self.last_result = result
class Setter(object):
def __init__(self, proxy, setter_name, topic_suffix, parameter_names, getter_info = None):
self.setter = None
self.getter = None
self.proxy = proxy
self.getter_info = getter_info
self.topic_suffix = topic_suffix
self.parameter_names = parameter_names
if setter_name != None:
self.setter = getattr(self.proxy.device, setter_name)
if getter_info != None:
self.getter = getattr(self.proxy.device, self.getter_info['getter_name'])
def handle_message(self, payload):
args = []
for parameter_name in self.parameter_names:
try:
args.append(payload[parameter_name])
except:
return
if self.getter_info == None:
try:
self.setter(*tuple(args))
except:
pass
else:
payload = {}
try:
result = self.getter(*tuple(args))
if isinstance(result, tuple) and hasattr(result, '_fields'): # assume it is a namedtuple
for field in result._fields:
payload[field] = getattr(result, field)
else:
payload[self.getter_info['getter_return_value']] = result
self.proxy.publish_values(self.getter_info['getter_publish_topic'], **payload)
except:
pass
class DeviceProxy(object):
GETTER_SPECS = []
SETTER_SPECS = []
EXTRA_SUBSCRIPTIONS = []
def __init__(self, uid, connected_uid, position, hardware_version, firmware_version,
ipcon, client, update_interval, global_topic_prefix):
self.timestamp = time.time()
self.uid = uid
self.connected_uid = connected_uid
self.position = position
self.hardware_version = hardware_version
self.firmware_version = firmware_version
self.ipcon = ipcon
self.client = client
self.device = self.DEVICE_CLASS(uid, ipcon)
self.topic_prefix = '{0}/{1}/'.format(self.TOPIC_PREFIX, uid)
self.getters = []
self.setters = {}
self.global_topic_prefix = global_topic_prefix
self.update_interval = 0 # seconds
self.update_timer = None
self.update_timer_lock = threading.Lock()
for getter_spec in self.GETTER_SPECS:
self.getters.append(Getter(self, *getter_spec))
for setter_spec in self.SETTER_SPECS:
self.setters[setter_spec[1]] = Setter(self, *setter_spec)
self.subscribe(self.topic_prefix + setter_spec[1])
for topic_suffix in self.EXTRA_SUBSCRIPTIONS:
self.subscribe(self.topic_prefix + topic_suffix)
self.subscribe(self.topic_prefix + '_update_interval/set')
try:
self.setup_callbacks()
except:
logging.exception('Exception during setup_callbacks call')
self.update_getters()
self.set_update_interval(update_interval)
def handle_message(self, topic_suffix, payload):
if topic_suffix == '_update_interval/set':
try:
self.set_update_interval(float(payload['_update_interval']))
except:
pass
elif topic_suffix in self.setters:
self.setters[topic_suffix].handle_message(payload)
else:
try:
self.handle_extra_message(topic_suffix, payload)
except:
logging.exception('Exception during handle_extra_message call')
self.update_getters()
def handle_extra_message(self, topic_suffix, payload): # to be implemented by subclasses
pass
def publish_as_json(self, topic, payload, *args, **kwargs):
self.client.publish(self.global_topic_prefix + topic,
json.dumps(payload, separators=(',', ':')),
*args, **kwargs)
def publish_values(self, topic_suffix, **kwargs):
payload = {'_timestamp': time.time()}
for key, value in kwargs.items():
payload[key] = value
self.publish_as_json(self.topic_prefix + topic_suffix, payload, retain=True)
def set_update_interval(self, update_interval): # in seconds
update_timer = None
with self.update_timer_lock:
update_timer = self.update_timer
self.update_timer = None
if update_timer != None:
update_timer.cancel()
if self.update_interval != update_interval:
self.publish_values('_update_interval', _update_interval=float(update_interval))
with self.update_timer_lock:
self.update_interval = update_interval
if self.update_timer == None and self.update_interval > 0:
self.update_timer = threading.Timer(self.update_interval, self.update)
self.update_timer.start()
def update_getters(self):
for getter in self.getters:
getter.update()
try:
self.update_extra_getters()
except:
logging.exception('Exception during update_extra_getters call')
def update_extra_getters(self): # to be implemented by subclasses
pass
def update(self):
with self.update_timer_lock:
if self.update_timer == None:
return
self.update_timer = None
self.update_getters()
with self.update_timer_lock:
if self.update_timer == None and self.update_interval > 0:
self.update_timer = threading.Timer(self.update_interval, self.update)
self.update_timer.start()
def setup_callbacks(self): # to be implemented by subclasses
pass
def get_enumerate_entry(self):
return {'_timestamp': self.timestamp,
'uid': self.uid,
'connected_uid': self.connected_uid,
'position': self.position,
'hardware_version': self.hardware_version,
'firmware_version': self.firmware_version,
'device_identifier': self.DEVICE_CLASS.DEVICE_IDENTIFIER}
def subscribe(self, topic_suffix):
topic = self.global_topic_prefix + topic_suffix
logging.debug('Subscribing to ' + topic)
self.client.subscribe(topic)
def unsubscribe(self, topic_suffix):
topic = self.global_topic_prefix + topic_suffix
logging.debug('Unsubscribing from ' + topic)
self.client.unsubscribe(topic)
def destroy(self):
self.set_update_interval(0)
for setter_spec in self.SETTER_SPECS:
self.unsubscribe(self.topic_prefix + setter_spec[1])
for topic_suffix in self.EXTRA_SUBSCRIPTIONS:
self.unsubscribe(self.topic_prefix + topic_suffix)
self.unsubscribe(self.topic_prefix + '_update_interval/set')
#
# DeviceProxy is the base class for all Brick and Bricklet MQTT handling. The
# DeviceProxy class expects subclasses to define several members:
#
# - DEVICE_CLASS (required): This is the Brick or Bricklet API bindings class.
# The DeviceProxy automatically creates an instance of this class that can be
# accessed via self.device in subclasses.
#
# - TOPIC_PREFIX (required): The MQTT topic prefix used for this DeviceProxy
# subclass. All messages published by this DeviceProxy to any topic suffix
# will automatically be prefixed with the topic prefix and the UID of the
# represented device:
#
# tinkerforge/<topic-prefix>/<uid>/<topic-suffix>
#
# Also all subscriptions for any topic suffix will automatically be prefixed
# with the same topic prefix.
#
# - GETTER_SPECS (optional): A list of Brick or Bricklet getter specifications.
# The DeviceProxy instance automatically calls the specified getter with the
# configured update interval on self.device. If the returned value changed
# since the last call then the new value is published as a retained message
# with a JSON payload that is formatted according to the getter specification.
# Each getter specification is a 4-tuple:
#
# (<getter-name>, <parameters>, <topic-suffix>, <value-name>)
#
# If the getter returns a single value, then the value name is used as key
# in the JSON payload. If the getter does not return a single value then it
# returns a namedtuple instead. The DeviceProxy instance automatically uses
# the field names of the namedtuple as keys in the JSON payload. In this case
# the value name in the getter specification is ignored and should be set to
# None.
#
# - update_extra_getters (optional): A bound function taking no arguments. This
# can be used to implement things that don't fit into a getter specification.
# The DeviceProxy instance will automatically call this function with the
# configured update interval. Inside this function the publish_values function
# of the DeviceProxy class can be used to publish a dict formatted as JSON to
# a specified topic suffix.
#
# - SETTER_SPECS (optional): A list of Brick or Bricklet setter specifications.
# The DeviceProxy instance automatically subscribes to the specified topics
# and handles messages with JSON payloads that contain key-value pairs
# according to the specified format. Each setter specification is a 3-tuple:
#
# (<setter-name>, <topic-suffix>, [<parameter-name>, ...])
#
# If the setter has no parameters then the third item in the tuple can be an
# empty list. Otherwise it has to be a list of strings specifying parameter
# names for the setter. The DeviceProxy instance looks for keys in the JSON
# payload that match the specified values names. If a value was found for
# each parameter then the specified setter is called on self.device with the
# arguments from the JSON payload.
#
# Getters which require input arguments are provided with the arguments through
# setter mechanism. In which case the setter specification is as follows:
#
# (None, <topic-suffix>, [<parameter-name>, ...], {<getter-info>})
#
# In this case the dictionary getter-info has the following fields:
#
# getter_name = Name of the getter to call. This getter will be called with
# the arguments as received on the <topic-suffix> topic as
# described above.
#
# getter_publish_topic = Name of the topic to which the getter return values
# are to be published.
#
# getter_return_value = Fields of the getter return value. If the getter returns
# more than one value then this field should be "None".
# Otherwise the return value field name must be specified
# as a string.
#
# - EXTRA_SUBSCRIPTIONS (optional): A list of additional topic suffixes. This
# can be used to implement things that don't fit into a setter specification.
# The DeviceProxy instance automatically subscribes to the specified topics
# and handles messages with JSON payloads. The payload is decoded as JSON and
# passed to the bound handle_extra_message function.
#
# - handle_extra_message (optional): A bound function taking two arguments: the
# topic suffix as str and the decoded JSON payload as dict.
#
# - setup_callbacks (optional): A bound function taking no arguments. This can
# be used to deal with callbacks such as the button pressed/released callbacks
# of the LCD 20x4 Bricklet. Only callbacks without configuration should be
# used, because the configuration in global and could interfere with other
# user programs.
#
# To add a new DeviceProxy subclass implement it according to the description
# above. The Proxy class will automatically pick up all DeviceProxy subclasses
# and use them.
#
class BrickDCProxy(DeviceProxy):
DEVICE_CLASS = BrickDC
TOPIC_PREFIX = 'brick/dc'
GETTER_SPECS = [('get_velocity', None, 'velocity', 'velocity'),
('get_current_velocity', None, 'current_velocity', 'velocity'),
('get_acceleration', None, 'acceleration', 'acceleration'),
('is_enabled', None, 'enabled', 'enabled'),
('get_pwm_frequency', None, 'pwm_frequency', 'frequency'),
('get_stack_input_voltage', None, 'stack_input_voltage', 'voltage'),
('get_external_input_voltage', None, 'external_input_voltage', 'voltage'),
('get_current_consumption', None, 'current_consumption', 'current'),
('get_drive_mode', None, 'drive_mode', 'mode'),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_velocity', 'velocity/set', ['velocity']),
('set_acceleration', 'acceleration/set', ['acceleration']),
('full_brake', 'full_brake/set', []),
('enable', 'enable/set', []),
('disable', 'disable/set', []),
('set_pwm_frequency', 'pwm_frequency/set', ['frequency']),
('set_drive_mode', 'drive_mode/set', ['mode']),
('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickIMUProxy(DeviceProxy):
DEVICE_CLASS = BrickIMU
TOPIC_PREFIX = 'brick/imu'
GETTER_SPECS = [('get_orientation', None, 'orientation', None),
('get_quaternion', None, 'quaternion', None),
('are_leds_on', None, 'leds_on', 'leds_on'),
('get_convergence_speed', None, 'convergence_speed', 'speed'),
('get_acceleration', None, 'acceleration', None),
('get_magnetic_field', None, 'magnetic_field', None),
('get_angular_velocity', None, 'angular_velocity', None),
('get_all_data', None, 'all_data', None),
('get_imu_temperature', None, 'imu_temperature', 'temperature'),
('get_acceleration_range', None, 'acceleration_range', 'range'),
('get_magnetometer_range', None, 'magnetometer_range', 'range'),
('get_calibration',
{
'accelerometer_gain': (BrickIMU.CALIBRATION_TYPE_ACCELEROMETER_GAIN,),
'accelerometer_bias': (BrickIMU.CALIBRATION_TYPE_ACCELEROMETER_BIAS,),
'magnetometer_gain': (BrickIMU.CALIBRATION_TYPE_MAGNETOMETER_GAIN,),
'magnetometer_bias': (BrickIMU.CALIBRATION_TYPE_MAGNETOMETER_BIAS,),
'gyroscope_gain': (BrickIMU.CALIBRATION_TYPE_GYROSCOPE_GAIN,),
'gyroscope_bias': (BrickIMU.CALIBRATION_TYPE_GYROSCOPE_BIAS,)
},
'calibration', 'data'
),
('is_orientation_calculation_on', None, 'orientation_calculation_on', 'orientation_calculation_on'),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('leds_on', 'leds_on/set', []),
('leds_off', 'leds_off/set', []),
('set_convergence_speed', 'convergence_speed/set', ['speed']),
('set_acceleration_range', 'acceleration_range/set', ['range']),
('set_magnetometer_range', 'magnetometer_range/set', ['range']),
('set_calibration', 'calibration/set', ['typ', 'data']),
('orientation_calculation_on', 'orientation_calculation_on/set', []),
('orientation_calculation_off', 'orientation_calculation_off/set', []),
('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickIMUV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickIMUV2
TOPIC_PREFIX = 'brick/imu_v2'
GETTER_SPECS = [('get_orientation', None, 'orientation', None),
('get_linear_acceleration', None, 'linear_acceleration', None),
('get_gravity_vector', None, 'gravity_vector', None),
('get_quaternion', None, 'quaternion', None),
('get_all_data', None, 'all_data', None),
('are_leds_on', None, 'leds_on', 'leds'),
('get_acceleration', None, 'acceleration', None),
('get_magnetic_field', None, 'magnetic_field', None),
('get_angular_velocity', None, 'angular_velocity', None),
('get_temperature', None, 'temperature', 'temperature'),
('get_sensor_configuration', None, 'sensor_configuration', None),
('get_sensor_fusion_mode', None, 'sensor_fusion_mode', 'mode'),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('leds_on', 'leds_on/set', []),
('leds_off', 'leds_off/set', []),
('set_sensor_configuration', 'sensor_configuration/set', ['magnetometer_rate', 'gyroscope_range', 'gyroscope_bandwidth', 'accelerometer_range', 'accelerometer_bandwidth']),
('set_sensor_fusion_mode', 'sensor_fusion_mode/set', ['mode']),
('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickMasterProxy(DeviceProxy):
DEVICE_CLASS = BrickMaster
TOPIC_PREFIX = 'brick/master'
GETTER_SPECS = [('get_stack_voltage', None, 'stack_voltage', 'voltage'),
('get_stack_current', None, 'stack_current', 'current'),
('get_usb_voltage', None, 'usb_voltage', 'voltage'),
('get_connection_type', None, 'connection_type', 'connection_type'),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickServoProxy(DeviceProxy):
DEVICE_CLASS = BrickServo
TOPIC_PREFIX = 'brick/servo'
GETTER_SPECS = [('is_enabled', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'enabled', 'enabled'),
('get_position', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'position', 'position'),
('get_current_position', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'current_position', 'position'),
('get_velocity', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'velocity', 'velocity'),
('get_current_velocity', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'current_velocity', 'velocity'),
('get_acceleration', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'acceleration', 'acceleration'),
('get_output_voltage', None, 'output_voltage', 'voltage'),
('get_pulse_width', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'pulse_width', None),
('get_degree', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'degree', None),
('get_period', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'period', 'period'),
('get_servo_current', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,)}, 'current', 'current'),
('get_overall_current', None, 'overall_current', 'current'),
('get_stack_input_voltage', None, 'stack_input_voltage', 'voltage'),
('get_external_input_voltage', None, 'external_input_voltage', 'voltage'),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('enable', 'enable/set', ['servo_num']),
('disable', 'disable/set', ['servo_num']),
('set_position', 'position/set', ['servo_num', 'position']),
('set_velocity', 'velocity/set', ['servo_num', 'velocity']),
('set_acceleration', 'acceleration/set', ['servo_num', 'acceleration']),
('set_output_voltage', 'output_voltage/set', ['voltage']),
('set_pulse_width', 'pulse_width/set', ['servo_num', 'min', 'max']),
('set_degree', 'degree/set', ['servo_num', 'min', 'max']),
('set_period', 'period/set', ['servo_num', 'period']),
('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickSilentStepperProxy(DeviceProxy):
DEVICE_CLASS = BrickSilentStepper
TOPIC_PREFIX = 'brick/silent_stepper'
GETTER_SPECS = [('get_max_velocity', None, 'max_velocity', 'velocity'),
('get_current_velocity', None, 'current_velocity', 'velocity'),
('get_speed_ramping', None, 'speed_ramping', None),
('get_steps', None, 'steps', 'steps'),
('get_remaining_steps', None, 'remaining_steps', 'steps'),
('get_motor_current', None, 'motor_current', 'current'),
('is_enabled', None, 'enabled', 'enabled'),
('get_basic_configuration', None, 'basic_configuration', None),
('get_current_position', None, 'current_position', 'position'),
('get_target_position', None, 'target_position', 'position'),
('get_step_configuration', None, 'step_configuration', None),
('get_stack_input_voltage', None, 'stack_input_voltage', 'voltage'),
('get_external_input_voltage', None, 'external_input_voltage', 'voltage'),
('get_spreadcycle_configuration', None, 'spreadcycle_configuration', None),
('get_stealth_configuration', None, 'stealth_configuration', None),
('get_coolstep_configuration', None, 'coolstep_configuration', None),
('get_misc_configuration', None, 'misc_configuration', None),
('get_driver_status', None, 'driver_status', None),
('get_time_base', None, 'time_base', None),
('get_all_data', None, 'all_data', None),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_max_velocity', 'max_velocity/set', ['velocity']),
('set_speed_ramping', 'speed_ramping/set', ['acceleration', 'deacceleration']),
('full_brake', 'full_brake/set', []),
('set_steps', 'steps/set', ['steps']),
('drive_forward', 'drive_forward/set', []),
('drive_backward', 'drive_backward/set', []),
('stop', 'stop/set', []),
('set_motor_current', 'motor_current/set', ['current']),
('enable', 'enable/set', []),
('disable', 'disable/set', []),
('set_basic_configuration', 'basic_configuration/set', ['standstill_current, motor_run_current', 'standstill_delay_time', 'power_down_time, stealth_threshold', 'coolstep_threshold', 'classic_threshold', 'high_velocity_chopper_mode']),
('set_current_position', 'current_position/set', ['position']),
('set_target_position', 'target_position/set', ['position']),
('set_step_configuration', 'step_configuration/set', ['step_resolution', 'interpolation']),
('set_spreadcycle_configuration', 'spreadcycle_configuration/set', ['slow_decay_duration', 'enable_random_slow_decay', 'fast_decay_duration', 'hysteresis_start_value', 'hysteresis_end_value', 'sine_wave_offset', 'chopper_mode', 'comparator_blank_time', 'fast_decay_without_comparator']),
('set_stealth_configuration', 'stealth_configuration/set', ['enable_stealth', 'amplitude', 'gradient', 'enable_autoscale', 'force_symmetric', 'freewheel_mode']),
('set_coolstep_configuration', 'coolstep_configuration/set', ['minimum_stallguard_value', 'maximum_stallguard_value', 'current_up_step_width', 'current_down_step_width', 'minimum_current', 'stallguard_threshold_value', 'stallguard_mode']),
('set_misc_configuration', 'misc_configuration/set', ['disable_short_to_ground_protection', 'synchronize_phase_frequency']),
('set_time_base', 'time_base/set', ['time_base']),
('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickStepperProxy(DeviceProxy):
DEVICE_CLASS = BrickStepper
TOPIC_PREFIX = 'brick/stepper'
GETTER_SPECS = [('get_max_velocity', None, 'max_velocity', 'velocity'),
('get_current_velocity', None, 'current_velocity', 'velocity'),
('get_speed_ramping', None, 'speed_ramping', None),
('get_steps', None, 'steps', 'steps'),
('get_remaining_steps', None, 'remaining_steps', 'steps'),
('get_motor_current', None, 'motor_current', 'current'),
('is_enabled', None, 'enabled', 'enabled'),
('get_current_position', None, 'current_position', 'position'),
('get_target_position', None, 'target_position', 'position'),
('get_step_mode', None, 'step_mode', 'mode'),
('get_stack_input_voltage', None, 'stack_input_voltage', 'voltage'),
('get_external_input_voltage', None, 'external_input_voltage', 'voltage'),
('get_current_consumption', None, 'current_consumption', 'current'),
('get_decay', None, 'decay', 'decay'),
('is_sync_rect', None, 'sync_rect', 'sync_rect'),
('get_time_base', None, 'time_base', 'time_base'),
('get_all_data', None, 'all_data', None),
('is_status_led_enabled', None, 'status_led_enabled', 'enabled'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_max_velocity', 'max_velocity/set', ['velocity']),
('set_speed_ramping', 'speed_ramping/set', ['acceleration', 'deacceleration']),
('full_brake', 'full_brake/set', []),
('set_steps', 'steps/set', ['steps']),
('drive_forward', 'drive_forward/set', []),
('drive_backward', 'drive_backward/set', []),
('stop', 'stop/set', []),
('set_motor_current', 'motor_current/set', ['current']),
('enable', 'enable/set', []),
('disable', 'disable/set', []),
('set_current_position', 'current_position/set', ['position']),
('set_target_position', 'target_position/set', ['position']),
('set_step_mode', 'step_mode/set', ['mode']),
('set_decay', 'decay/set', ['decay']),
('set_sync_rect', 'sync_rect/set', ['sync_rect']),
('set_time_base', 'time_base/set', ['time_base']),
('enable_status_led', 'enable_status_led/set', []),
('disable_status_led', 'disable_status_led/set', []),
('reset', 'reset/set', [])]
class BrickletAccelerometerProxy(DeviceProxy):
DEVICE_CLASS = BrickletAccelerometer
TOPIC_PREFIX = 'bricklet/accelerometer'
GETTER_SPECS = [('get_acceleration', None, 'acceleration', None),
('get_temperature', None, 'temperature', 'temperature'),
('get_configuration', None, 'configuration', None),
('is_led_on', None, 'led_on', 'on')]
SETTER_SPECS = [('set_configuration', 'configuration/set', ['data_rate', 'full_scale', 'filter_bandwidth']),
('led_on', 'led_on/set', []),
('led_off', 'led_off/set', [])]
# FIXME: expose analog_value getter?
class BrickletAmbientLightProxy(DeviceProxy):
DEVICE_CLASS = BrickletAmbientLight
TOPIC_PREFIX = 'bricklet/ambient_light'
GETTER_SPECS = [('get_illuminance', None, 'illuminance', 'illuminance')]
class BrickletAmbientLightV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletAmbientLightV2
TOPIC_PREFIX = 'bricklet/ambient_light_v2'
GETTER_SPECS = [('get_illuminance', None, 'illuminance', 'illuminance'),
('get_configuration', None, 'configuration', None)]
SETTER_SPECS = [('set_configuration', 'configuration/set', ['illuminance_range', 'integration_time'])]
# FIXME: expose analog_value getter?
class BrickletAnalogInProxy(DeviceProxy):
DEVICE_CLASS = BrickletAnalogIn
TOPIC_PREFIX = 'bricklet/analog_in'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage'),
('get_range', None, 'range', 'range'),
('get_averaging', None, 'averaging', 'average')]
SETTER_SPECS = [('set_range', 'range/set', ['range']),
('set_averaging', 'averaging/set', ['average'])]
# FIXME: expose analog_value getter?
class BrickletAnalogInV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletAnalogInV2
TOPIC_PREFIX = 'bricklet/analog_in_v2'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage'),
('get_moving_average', None, 'moving_average', 'average')]
SETTER_SPECS = [('set_moving_average', 'moving_average/set', ['average'])]
class BrickletAnalogOutProxy(DeviceProxy):
DEVICE_CLASS = BrickletAnalogOut
TOPIC_PREFIX = 'bricklet/analog_out'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage'),
('get_mode', None, 'mode', 'mode')]
SETTER_SPECS = [('set_voltage', 'voltage/set', ['voltage']),
('set_mode', 'mode/set', ['mode'])]
class BrickletAnalogOutV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletAnalogOutV2
TOPIC_PREFIX = 'bricklet/analog_out_v2'
GETTER_SPECS = [('get_output_voltage', None, 'output_voltage', 'voltage'),
('get_input_voltage', None, 'input_voltage', 'voltage')]
SETTER_SPECS = [('set_output_voltage', 'output_voltage/set', ['voltage'])]
class BrickletAnalogInV3Proxy(DeviceProxy):
DEVICE_CLASS = BrickletAnalogInV3
TOPIC_PREFIX = 'bricklet/analog_in_v3'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage')]
class BrickletBarometerProxy(DeviceProxy):
DEVICE_CLASS = BrickletBarometer
TOPIC_PREFIX = 'bricklet/barometer'
GETTER_SPECS = [('get_air_pressure', None, 'air_pressure', 'air_pressure'),
('get_altitude', None, 'altitude', 'altitude'),
('get_chip_temperature', None, 'chip_temperature', 'temperature'),
('get_reference_air_pressure', None, 'reference_air_pressure', 'air_pressure'),
('get_averaging', None, 'averaging', None)]
SETTER_SPECS = [('set_reference_air_pressure', 'reference_air_pressure/set', ['air_pressure']),
('set_averaging', 'averaging/set', ['moving_average_pressure', 'average_pressure', 'average_temperature'])]
class BrickletCANProxy(DeviceProxy):
DEVICE_CLASS = BrickletCAN
TOPIC_PREFIX = 'bricklet/can'
GETTER_SPECS = [('read_frame', None, 'read_frame', None),
('get_configuration', None, 'configuration', None),
('get_read_filter', None, 'read_filter', None),
('get_error_log', None, 'error_log', None)]
SETTER_SPECS = [(None, 'write_frame/set', ['frame_type', 'identifier', 'data', 'length'], {'getter_name': 'write_frame', 'getter_publish_topic': 'write_frame', 'getter_return_value': 'success'}),
('set_configuration', 'configuration/set', ['baud_rate', 'transceiver_mode', 'write_timeout']),
('set_read_filter', 'read_filter/set', ['mode', 'mask', 'filter1', 'filter2'])]
# Arguments required for a getter must be published to "<GETTER-NAME>/set"
# topic which will execute the getter with the provided arguments.
# The output of the getter then will be published on the "<GETTER-NAME>"
# topic.
class BrickletCO2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletCO2
TOPIC_PREFIX = 'bricklet/co2'
GETTER_SPECS = [('get_co2_concentration', None, 'co2_concentration', 'co2_concentration')]
class BrickletColorProxy(DeviceProxy):
DEVICE_CLASS = BrickletColor
TOPIC_PREFIX = 'bricklet/color'
GETTER_SPECS = [('get_color', None, 'color', None),
('get_illuminance', None, 'illuminance', 'illuminance'),
('get_color_temperature', None, 'color_temperature', 'color_temperature'),
('get_config', None, 'config', None),
('is_light_on', None, 'light_on', 'light')]
SETTER_SPECS = [('set_config', 'config/set', ['gain', 'integration_time']),
('light_on', 'light_on/set', []),
('light_off', 'light_off/set', [])]
# FIXME: expose analog_value getter?
# FIXME: handle over_current callback?
class BrickletCurrent12Proxy(DeviceProxy):
DEVICE_CLASS = BrickletCurrent12
TOPIC_PREFIX = 'bricklet/current12'
GETTER_SPECS = [('get_current', None, 'current', 'current'),
('is_over_current', None, 'over_current', 'over')]
SETTER_SPECS = [('calibrate', 'calibrate/set', [])]
# FIXME: expose analog_value getter?
# FIXME: handle over_current callback?
class BrickletCurrent25Proxy(DeviceProxy):
DEVICE_CLASS = BrickletCurrent25
TOPIC_PREFIX = 'bricklet/current25'
GETTER_SPECS = [('get_current', None, 'current', 'current'),
('is_over_current', None, 'over_current', 'over')]
SETTER_SPECS = [('calibrate', 'calibrate/set', [])]
# FIXME: expose analog_value getter?
# FIXME: expose sampling_point getter/setter?
class BrickletDistanceIRProxy(DeviceProxy):
DEVICE_CLASS = BrickletDistanceIR
TOPIC_PREFIX = 'bricklet/distance_ir'
GETTER_SPECS = [('get_distance', None, 'distance', 'distance')]
class BrickletDistanceUSProxy(DeviceProxy):
DEVICE_CLASS = BrickletDistanceUS
TOPIC_PREFIX = 'bricklet/distance_us'
GETTER_SPECS = [('get_distance_value', None, 'distance_value', 'distance'),
('get_moving_average', None, 'moving_average', 'average')]
SETTER_SPECS = [('set_moving_average', 'moving_average/set', ['average'])]
class BrickletDMXProxy(DeviceProxy):
DEVICE_CLASS = BrickletDMX
TOPIC_PREFIX = 'bricklet/dmx'
GETTER_SPECS = [('get_dmx_mode', None, 'dmx_mode', 'dmx_mode'),
('read_frame', None, 'read_frame', None),
('get_frame_duration', None, 'frame_duration', 'frame_duration'),
('get_frame_error_count', None, 'frame_error_count', None),
('get_communication_led_config', None, 'communication_led_config', 'config'),
('get_error_led_config', None, 'error_led_config', 'config'),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_dmx_mode', 'dmx_mode/set', ['dmx_mode']),
('write_frame', 'write_frame/set', ['frame']),
('set_frame_duration', 'frame_duration/set', ['frame_duration']),
('set_communication_led_config', 'communication_led_config/set', ['config']),
('set_error_led_config', 'error_led_config/set', ['config']),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', [])]
class BrickletDualButtonProxy(DeviceProxy):
DEVICE_CLASS = BrickletDualButton
TOPIC_PREFIX = 'bricklet/dual_button'
SETTER_SPECS = [('set_led_state', 'led_state/set', ['led_l', 'led_r']),
('set_selected_led_state', 'selected_led_state/set', ['led', 'state'])]
def cb_state_changed(self, button_l, button_r, led_l, led_r):
self.publish_values('button_state', button_l=button_l, button_r=button_r)
self.publish_values('led_state', led_l=led_l, led_r=led_r)
def setup_callbacks(self):
try:
button_l, button_r = self.device.get_button_state()
self.publish_values('button_state', button_l=button_l, button_r=button_r)
except:
pass
try:
led_l, led_r = self.device.get_led_state()
self.publish_values('led_state', led_l=led_l, led_r=led_r)
except:
pass
self.device.register_callback(BrickletDualButton.CALLBACK_STATE_CHANGED,
self.cb_state_changed)
# FIXME: get_monoflop needs special handling
# FIXME: handle monoflop_done callback?
class BrickletDualRelayProxy(DeviceProxy):
DEVICE_CLASS = BrickletDualRelay
TOPIC_PREFIX = 'bricklet/dual_relay'
GETTER_SPECS = [('get_state', None, 'state', None)]
SETTER_SPECS = [('set_state', 'state/set', ['relay1', 'relay2']),
('set_monoflop', 'monoflop/set', ['relay', 'state', 'time']),
('set_selected_state', 'selected_state/set', ['relay', 'state'])]
class BrickletDustDetectorProxy(DeviceProxy):
DEVICE_CLASS = BrickletDustDetector
TOPIC_PREFIX = 'bricklet/dust_detector'
GETTER_SPECS = [('get_dust_density', None, 'dust_density', 'dust_density'),
('get_moving_average', None, 'moving_average', 'average')]
SETTER_SPECS = [('set_moving_average', 'moving_average/set', ['average'])]
# FIXME: get_coordinates, get_altitude and get_motion need special status handling to avoid publishing invalid data
class BrickletGPSProxy(DeviceProxy):
DEVICE_CLASS = BrickletGPS
TOPIC_PREFIX = 'bricklet/gps'
GETTER_SPECS = [('get_status', None, 'status', None),
('get_coordinates', None, 'coordinates', None),
('get_altitude', None, 'altitude', None),
('get_motion', None, 'motion', None),
('get_date_time', None, 'date_time', 'date_time')]
SETTER_SPECS = [('restart', 'restart/set', ['restart_type'])]
class BrickletGPSV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletGPSV2
TOPIC_PREFIX = 'bricklet/gps_v2'
GETTER_SPECS = [('get_coordinates', None, 'coordinates', None),
('get_status', None, 'status', None),
('get_altitude', None, 'altitude', None),
('get_motion', None, 'motion', None),
('get_date_time', None, 'date_time', None),
('get_satellite_system_status', {'gps': (BrickletGPSV2.SATELLITE_SYSTEM_GPS,), 'glonass': (BrickletGPSV2.SATELLITE_SYSTEM_GLONASS,), 'galileo': (BrickletGPSV2.SATELLITE_SYSTEM_GALILEO,)}, 'satellite_system_status', None),
('get_fix_led_config', None, 'fix_led_config', 'config'),
('get_sbas_config', None, 'sbas_config', 'sbas_config'),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('restart', 'restart/set', ['restart_type']),
('set_fix_led_config', 'fix_led_config/set', ['config']),
('set_sbas_config', 'sbas_config/set', ['sbas_config']),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', [])]
def update_extra_getters(self):
result = [[None] * 32] * 3
for i, system in enumerate([BrickletGPSV2.SATELLITE_SYSTEM_GPS, BrickletGPSV2.SATELLITE_SYSTEM_GLONASS, BrickletGPSV2.SATELLITE_SYSTEM_GALILEO]):
for k in range(32):
try:
status = self.device.get_satellite_status(system, k + 1)
except:
continue
result[i][k] = {}
for field in status._fields:
result[i][k][field] = getattr(status, field)
payload = {
'gps': result[0],
'glonass': result[1],
'galileo': result[2]
}
self.publish_values('satellite_status', **payload)
# FIXME: get_edge_count needs special handling
class BrickletHallEffectProxy(DeviceProxy):
DEVICE_CLASS = BrickletHallEffect
TOPIC_PREFIX = 'bricklet/hall_effect'
GETTER_SPECS = [('get_value', None, 'value', 'value'),
('get_edge_count_config', None, 'edge_count_config', None)]
SETTER_SPECS = [('set_edge_count_config', 'edge_count_config/set', ['edge_type', 'debounce'])]
# FIXME: expose analog_value getter?
class BrickletHumidityProxy(DeviceProxy):
DEVICE_CLASS = BrickletHumidity
TOPIC_PREFIX = 'bricklet/humidity'
GETTER_SPECS = [('get_humidity', None, 'humidity', 'humidity')]
class BrickletHumidityV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletHumidityV2
TOPIC_PREFIX = 'bricklet/humidity_v2'
GETTER_SPECS = [('get_humidity', None, 'humidity', 'humidity'),
('get_temperature', None, 'temperature', 'temperature'),
('get_heater_configuration', None, 'heater_configuration', 'heater_config'),
('get_moving_average_configuration', None, 'moving_average_configuration', None),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_heater_configuration', 'heater_configuration/set', ['heater_config']),
('set_moving_average_configuration', 'moving_average_configuration/set', ['moving_average_length_humidity', 'moving_average_length_temperature']),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', [])]
class BrickletIndustrialAnalogOutProxy(DeviceProxy):
DEVICE_CLASS = BrickletIndustrialAnalogOut
TOPIC_PREFIX = 'bricklet/industrial_analog_out'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage'),
('get_current', None, 'current', 'current'),
('get_configuration', None, 'configuration', None),
('is_enabled', None, 'enabled', 'enabled')]
SETTER_SPECS = [('set_voltage', 'voltage/set', ['voltage']),
('set_current', 'current/set', ['current']),
('set_configuration', 'configuration/set', ['voltage_range', 'current_range']),
('enable', 'enable/set', []),
('disable', 'disable/set', [])]
# FIXME: get_edge_count and get_edge_count_config need special handling
# FIXME: handle interrupt callback, including get_interrupt and set_interrupt?
class BrickletIndustrialDigitalIn4Proxy(DeviceProxy):
DEVICE_CLASS = BrickletIndustrialDigitalIn4
TOPIC_PREFIX = 'bricklet/industrial_digital_in_4'
GETTER_SPECS = [('get_value', None, 'value', 'value_mask'),
('get_group', None, 'group', 'group'),
('get_available_for_group', None, 'available_for_group', 'available')]
SETTER_SPECS = [('set_edge_count_config', 'edge_count_config/set', ['edge_type', 'debounce']),
('set_group', 'group/set', ['group'])]
# FIXME: get_monoflop needs special handling
# FIXME: handle monoflop_done callback?
class BrickletIndustrialDigitalOut4Proxy(DeviceProxy):
DEVICE_CLASS = BrickletIndustrialDigitalOut4
TOPIC_PREFIX = 'bricklet/industrial_digital_out_4'
GETTER_SPECS = [('get_value', None, 'value', 'value_mask'),
('get_group', None, 'group', 'group'),
('get_available_for_group', None, 'available_for_group', 'available')]
SETTER_SPECS = [('set_value', 'value/set', ['value_mask']),
('set_selected_values', 'selected_values/set', ['selection_mask', 'value_mask']),
('set_monoflop', 'monoflop/set', ['selection_mask', 'value_mask', 'time']),
('set_group', 'group/set', ['group'])]
# FIXME: get_current needs special handling
class BrickletIndustrialDual020mAProxy(DeviceProxy):
DEVICE_CLASS = BrickletIndustrialDual020mA
TOPIC_PREFIX = 'bricklet/industrial_dual_0_20ma'
GETTER_SPECS = [('get_sample_rate', None, 'sample_rate', 'rate')]
SETTER_SPECS = [('set_sample_rate', None, 'sample_rate/set', ['rate'])]
# FIXME: get_voltage needs special handling
class BrickletIndustrialDualAnalogInProxy(DeviceProxy):
DEVICE_CLASS = BrickletIndustrialDualAnalogIn
TOPIC_PREFIX = 'bricklet/industrial_dual_analog_in'
GETTER_SPECS = [('get_sample_rate', None, 'sample_rate', 'rate'),
('get_calibration', None, 'calibration', None),
('get_adc_values', None, 'adc_values', 'value')]
SETTER_SPECS = [('set_sample_rate', 'sample_rate/set', ['rate']),
('set_calibration', 'calibration/set', ['offset', 'gain'])]
# FIXME: get_monoflop needs special handling
# FIXME: handle monoflop_done callback?
class BrickletIndustrialQuadRelayProxy(DeviceProxy):
DEVICE_CLASS = BrickletIndustrialQuadRelay
TOPIC_PREFIX = 'bricklet/industrial_quad_relay'
GETTER_SPECS = [('get_value', None, 'value', 'value_mask'),
('get_group', None, 'group', 'group'),
('get_available_for_group', None, 'available_for_group', 'available')]
SETTER_SPECS = [('set_value', 'value/set', ['value_mask']),
('set_selected_values', 'selected_values/set', ['selection_mask', 'value_mask']),
('set_monoflop', 'monoflop/set', ['selection_mask', 'value_mask', 'time']),
('set_group', 'group/set', ['group'])]
# FIXME: get_edge_count, get_port_monoflop and get_edge_count_config need special handling
# FIXME: handle monoflop_done callback?
# FIXME: handle interrupt callback, including get_port_interrupt and set_port_interrupt?
class BrickletIO16Proxy(DeviceProxy):
DEVICE_CLASS = BrickletIO16
TOPIC_PREFIX = 'bricklet/io16'
GETTER_SPECS = [('get_port', {'a': ('a',), 'b': ('b',)}, 'port', 'value_mask'),
('get_port_configuration', {'a': ('a',), 'b': ('b',)}, 'port_configuration', None)]
SETTER_SPECS = [('set_port', 'port/set', ['port', 'value_mask']),
('set_port_configuration', 'port_configuration/set', ['port', 'selection_mask', 'direction', 'value']),
('set_port_monoflop', 'port_monoflop/set', ['port', 'selection_mask', 'value_mask', 'time']),
('set_selected_values', 'selected_values/set', ['port', 'selection_mask', 'value_mask']),
('set_edge_count_config', 'edge_count_config/set', ['port', 'edge_type', 'debounce'])]
# FIXME: get_edge_count, get_monoflop and get_edge_count_config need special handling
# FIXME: handle monoflop_done callback?
# FIXME: handle interrupt callback, including get_interrupt and set_interrupt?
class BrickletIO4Proxy(DeviceProxy):
DEVICE_CLASS = BrickletIO4
TOPIC_PREFIX = 'bricklet/io4'
GETTER_SPECS = [('get_value', None, 'value', 'value_mask'),
('get_configuration', None, 'configuration', None)]
SETTER_SPECS = [('set_value', 'value/set', ['value_mask']),
('set_configuration', 'configuration/set', ['selection_mask', 'direction', 'value']),
('set_monoflop', 'monoflop/set', ['selection_mask', 'value_mask', 'time']),
('set_selected_values', 'selected_values/set', ['selection_mask', 'value_mask']),
('set_edge_count_config', 'edge_count_config/set', ['edge_type', 'debounce'])]
# FIXME: expose analog_value getter?
class BrickletJoystickProxy(DeviceProxy):
DEVICE_CLASS = BrickletJoystick
TOPIC_PREFIX = 'bricklet/joystick'
GETTER_SPECS = [('get_position', None, 'position', None)]
SETTER_SPECS = [('calibrate', 'calibrate/set', [])]
def cb_pressed(self):
self.publish_values('pressed', pressed=True)
def cb_released(self):
self.publish_values('pressed', pressed=False)
def setup_callbacks(self):
try:
self.publish_values('pressed', pressed=self.device.is_pressed())
except:
pass
self.device.register_callback(BrickletJoystick.CALLBACK_PRESSED,
self.cb_pressed)
self.device.register_callback(BrickletJoystick.CALLBACK_RELEASED,
self.cb_released)
class BrickletLaserRangeFinderProxy(DeviceProxy):
DEVICE_CLASS = BrickletLaserRangeFinder
TOPIC_PREFIX = 'bricklet/laser_range_finder'
GETTER_SPECS = [('get_distance', None, 'distance', 'distance'),
('get_velocity', None, 'velocity', 'velocity'),
('get_mode', None, 'mode', 'mode'),
('is_laser_enabled', None, 'laser_enabled', 'laser_enabled'),
('get_moving_average', None, 'moving_average', None)]
SETTER_SPECS = [('set_mode', 'mode/set', ['mode']),
('enable_laser', 'enable_laser/set', []),
('disable_laser', 'disable_laser/set', []),
('set_moving_average', 'moving_average/set', ['distance_average_length', 'velocity_average_length'])]
class BrickletLCD16x2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletLCD16x2
TOPIC_PREFIX = 'bricklet/lcd_16x2'
GETTER_SPECS = [('is_backlight_on', None, 'backlight_on', 'backlight'),
('get_config', None, 'config', None),
('get_custom_character', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,), '7': (7,)}, 'custom_character', 'character')]
SETTER_SPECS = [('write_line', 'write_line/set', ['line', 'position', 'text']),
('clear_display', 'clear_display/set', []),
('backlight_on', 'backlight_on/set', []),
('backlight_off', 'backlight_off/set', []),
('set_config', 'config/set', ['cursor', 'blinking']),
('set_custom_character', 'custom_character/set', ['index', 'character'])]
def cb_button_pressed(self, button):
self.last_button_pressed[str(button)] = True
self.publish_values('button_pressed', **self.last_button_pressed)
def cb_button_released(self, button):
self.last_button_pressed[str(button)] = False
self.publish_values('button_pressed', **self.last_button_pressed)
def setup_callbacks(self):
self.last_button_pressed = {'0': False, '1': False, '2': False}
for button in range(3):
try:
self.last_button_pressed[str(button)] = self.device.is_button_pressed(button)
except:
pass
self.publish_values('button_pressed', **self.last_button_pressed)
self.device.register_callback(BrickletLCD16x2.CALLBACK_BUTTON_PRESSED,
self.cb_button_pressed)
self.device.register_callback(BrickletLCD16x2.CALLBACK_BUTTON_RELEASED,
self.cb_button_released)
class BrickletLCD20x4Proxy(DeviceProxy):
DEVICE_CLASS = BrickletLCD20x4
TOPIC_PREFIX = 'bricklet/lcd_20x4'
GETTER_SPECS = [('is_backlight_on', None, 'backlight_on', 'backlight'),
('get_config', None, 'config', None),
('get_custom_character', {'0': (0,), '1': (1,), '2': (2,), '3': (3,), '4': (4,), '5': (5,), '6': (6,), '7': (7,)}, 'custom_character', 'character'),
('get_default_text', {'0': (0,), '1': (1,), '2': (2,), '3': (3,)}, 'default_text', 'text'),
('get_default_text_counter', None, 'default_text_counter', 'counter')]
SETTER_SPECS = [('write_line', 'write_line/set', ['line', 'position', 'text']),
('clear_display', 'clear_display/set', []),
('backlight_on', 'backlight_on/set', []),
('backlight_off', 'backlight_off/set', []),
('set_config', 'config/set', ['cursor', 'blinking']),
('set_custom_character', 'custom_character/set', ['index', 'character']),
('set_default_text', 'default_text/set', ['line', 'text']),
('set_default_text_counter', 'default_text_counter/set', ['counter'])]
def cb_button_pressed(self, button):
self.last_button_pressed[str(button)] = True
self.publish_values('button_pressed', **self.last_button_pressed)
def cb_button_released(self, button):
self.last_button_pressed[str(button)] = False
self.publish_values('button_pressed', **self.last_button_pressed)
def setup_callbacks(self):
self.last_button_pressed = {'0': False, '1': False, '2': False, '3': False}
for button in range(4):
try:
self.last_button_pressed[str(button)] = self.device.is_button_pressed(button)
except:
pass
self.publish_values('button_pressed', **self.last_button_pressed)
self.device.register_callback(BrickletLCD20x4.CALLBACK_BUTTON_PRESSED,
self.cb_button_pressed)
self.device.register_callback(BrickletLCD20x4.CALLBACK_BUTTON_RELEASED,
self.cb_button_released)
class BrickletLEDStripProxy(DeviceProxy):
DEVICE_CLASS = BrickletLEDStrip
TOPIC_PREFIX = 'bricklet/led_strip'
GETTER_SPECS = [('get_rgb_values', None, 'rgb_values', None),
('get_frame_duration', None, 'frame_duration', 'duration'),
('get_supply_voltage', None, 'supply_voltage', 'voltage'),
('get_clock_frequency', None, 'clock_frequency', 'frequency'),
('get_chip_type', None, 'chip_type', 'chip')]
SETTER_SPECS = [('set_rgb_values', 'rgb_values/set', ['index', 'length', 'r', 'g', 'b']),
('set_frame_duration', 'frame_duration/set', ['duration']),
('set_clock_frequency', 'clock_frequency/set', ['frequency']),
('set_chip_type', 'chip_type/set', ['chip'])]
class BrickletLEDStripV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletLEDStripV2
TOPIC_PREFIX = 'bricklet/led_strip_v2'
GETTER_SPECS = [('get_led_values', None, 'led_values', None),
('get_frame_duration', None, 'frame_duration', 'duration'),
('get_supply_voltage', None, 'supply_voltage', 'voltage'),
('get_clock_frequency', None, 'clock_frequency', 'frequency'),
('get_chip_type', None, 'chip_type', 'chip'),
('get_channel_mapping', None, 'channel_mapping', 'mapping'),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [(None, 'get_led_values/set', ['index', 'length'], {'getter_name': 'get_led_values', 'getter_publish_topic': 'led_values', 'getter_return_value': 'value'}),
('set_led_values', 'led_values/set', ['index', 'value']),
('set_frame_duration', 'frame_duration/set', ['duration']),
('set_clock_frequency', 'clock_frequency/set', ['frequency']),
('set_chip_type', 'chip_type/set', ['chip']),
('set_channel_mapping', 'channel_mapping/set', ['mapping']),
('set_status_led_config', 'status_led_config/set', ['config'])]
# Arguments required for a getter must be published to "<GETTER-NAME>/set"
# topic which will execute the getter with the provided arguments.
# The output of the getter then will be published on the "<GETTER-NAME>"
# topic.
class BrickletLineProxy(DeviceProxy):
DEVICE_CLASS = BrickletLine
TOPIC_PREFIX = 'bricklet/line'
GETTER_SPECS = [('get_reflectivity', None, 'reflectivity', 'reflectivity')]
# FIXME: expose analog_value getter?
class BrickletLinearPotiProxy(DeviceProxy):
DEVICE_CLASS = BrickletLinearPoti
TOPIC_PREFIX = 'bricklet/linear_poti'
GETTER_SPECS = [('get_position', None, 'position', 'position')]
class BrickletLoadCellProxy(DeviceProxy):
DEVICE_CLASS = BrickletLoadCell
TOPIC_PREFIX = 'bricklet/load_cell'
GETTER_SPECS = [('get_weight', None, 'weight', 'weight'),
('is_led_on', None, 'led_on', 'on'),
('get_moving_average', None, 'moving_average', 'average'),
('get_configuration', None, 'configuration', None)]
SETTER_SPECS = [('led_on', 'led_on/set', []),
('led_off', 'led_off/set', []),
('set_moving_average', 'moving_average/set', ['average']),
('set_configuration', 'configuration/set', ['rate', 'gain']),
('tare', 'tare/set', [])]
class BrickletMoistureProxy(DeviceProxy):
DEVICE_CLASS = BrickletMoisture
TOPIC_PREFIX = 'bricklet/moisture'
GETTER_SPECS = [('get_moisture_value', None, 'moisture_value', 'moisture'),
('get_moving_average', None, 'moving_average', 'average')]
SETTER_SPECS = [('set_moving_average', 'moving_average/set', ['average'])]
# FIXME: handle motion_detected and detection_cycle_ended callbacks?
class BrickletMotionDetectorProxy(DeviceProxy):
DEVICE_CLASS = BrickletMotionDetector
TOPIC_PREFIX = 'bricklet/motion_detector'
GETTER_SPECS = [('get_motion_detected', None, 'motion_detected', 'motion')]
class BrickletMotionDetectorV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletMotionDetectorV2
TOPIC_PREFIX = 'bricklet/motion_detector_v2'
GETTER_SPECS = [('get_motion_detected', None, 'motion_detected', 'motion')]
class BrickletMotorizedLinearPotiProxy(DeviceProxy):
DEVICE_CLASS = BrickletMotorizedLinearPoti
TOPIC_PREFIX = 'bricklet/motorized_linear_poti'
GETTER_SPECS = [('get_position', None, 'position', 'position'),
('get_motor_position', None, 'motor_position', None),
('get_status_led_config', None, 'status_led_config', ['config']),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_motor_position', 'motor_position/set', ['position', 'drive_mode', 'hold_position']),
('calibrate', 'calibrate/set', []),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', [])]
class BrickletMultiTouchProxy(DeviceProxy):
DEVICE_CLASS = BrickletMultiTouch
TOPIC_PREFIX = 'bricklet/multi_touch'
GETTER_SPECS = [('get_electrode_config', None, 'electrode_config', 'enabled_electrodes'),
('get_electrode_sensitivity', None, 'electrode_sensitivity', 'sensitivity')]
SETTER_SPECS = [('recalibrate', 'recalibrate/set', []),
('set_electrode_config', 'electrode_config/set', ['enabled_electrodes']),
('set_electrode_sensitivity', 'electrode_sensitivity/set', ['sensitivity'])]
def cb_touch_state(self, state):
self.publish_values('touch_state', state=state)
def setup_callbacks(self):
try:
self.publish_values('touch_state', state=self.device.get_touch_state())
except:
pass
self.device.register_callback(BrickletMultiTouch.CALLBACK_TOUCH_STATE,
self.cb_touch_state)
class BrickletNFCRFIDProxy(DeviceProxy):
DEVICE_CLASS = BrickletNFCRFID
TOPIC_PREFIX = 'bricklet/nfc_rfid'
GETTER_SPECS = [('get_tag_id', None, 'tag_id', None),
('get_state', None, 'state', None),
('get_page', None, 'page', 'page')]
SETTER_SPECS = [('request_tag_id', 'request_tag_id/set', ['tag_type']),
('authenticate_mifare_classic_page', 'authenticate_mifare_classic_page/set', ['page', 'key_number', 'key']),
('write_page', 'write_page/set', ['page', 'data']),
('request_page', 'request_page/set', ['page'])]
class BrickletOLED128x64Proxy(DeviceProxy):
DEVICE_CLASS = BrickletOLED128x64
TOPIC_PREFIX = 'bricklet/oled_128x64'
GETTER_SPECS = [('get_display_configuration', None, 'display_configuration', None)]
SETTER_SPECS = [('write', 'write/set', ['data']),
('new_window', 'new_window/set', ['column_from', 'column_to', 'row_from', 'row_to']),
('clear_display', 'clear_display/set', []),
('write_line', 'write_line/set', ['line', 'position', 'text']),
('set_display_configuration', 'display_configuration/set', ['contrast', 'invert'])]
class BrickletOLED64x48Proxy(DeviceProxy):
DEVICE_CLASS = BrickletOLED64x48
TOPIC_PREFIX = 'bricklet/oled_64x48'
GETTER_SPECS = [('get_display_configuration', None, 'display_configuration', None)]
SETTER_SPECS = [('write', 'write/set', ['data']),
('new_window', 'new_window/set', ['column_from', 'column_to', 'row_from', 'row_to']),
('clear_display', 'clear_display/set', []),
('write_line', 'write_line/set', ['line', 'position', 'text']),
('set_display_configuration', 'display_configuration/set', ['contrast', 'invert'])]
class BrickletPiezoBuzzerProxy(DeviceProxy):
DEVICE_CLASS = BrickletPiezoBuzzer
TOPIC_PREFIX = 'bricklet/piezo_buzzer'
SETTER_SPECS = [('beep', 'beep/set', ['duration']),
('morse_code', 'morse_code/set', ['morse'])]
# FIXME: handle beep_finished and morse_code_finished callback?
# FIXME: expose calibrate setter?
class BrickletPiezoSpeakerProxy(DeviceProxy):
DEVICE_CLASS = BrickletPiezoSpeaker
TOPIC_PREFIX = 'bricklet/piezo_speaker'
SETTER_SPECS = [('beep', 'beep/set', ['duration', 'frequency']),
('morse_code', 'morse_code/set', ['morse', 'frequency'])]
class BrickletOutdoorWeatherProxy(DeviceProxy):
DEVICE_CLASS = BrickletOutdoorWeather
TOPIC_PREFIX = 'bricklet/outdoor_weather'
def update_extra_getters(self):
# stations
try:
identifiers = self.device.get_station_identifiers()
except:
identifiers = []
payload = {}
for identifier in identifiers:
data = self.device.get_station_data(identifier)
payload[str(identifier)] = {}
for field in data._fields:
payload[str(identifier)][field] = getattr(data, field)
self.publish_values('station_data', **payload)
# sensors
try:
identifiers = self.device.get_sensor_identifiers()
except:
identifiers = []
payload = {}
for identifier in identifiers:
data = self.device.get_sensor_data(identifier)
payload[str(identifier)] = {}
for field in data._fields:
payload[str(identifier)][field] = getattr(data, field)
self.publish_values('sensor_data', **payload)
class BrickletPTCProxy(DeviceProxy):
DEVICE_CLASS = BrickletPTC
TOPIC_PREFIX = 'bricklet/ptc'
GETTER_SPECS = [('get_temperature', None, 'temperature', 'temperature'),
('get_resistance', None, 'resistance', 'resistance'),
('is_sensor_connected', None, 'sensor_connected', 'connected'),
('get_wire_mode', None, 'wire_mode', 'mode'),
('get_noise_rejection_filter', None, 'noise_rejection_filter', 'filter')]
SETTER_SPECS = [('set_wire_mode', 'wire_mode/set', ['mode']),
('set_noise_rejection_filter', 'noise_rejection_filter/set', ['filter'])]
class BrickletRealTimeClockProxy(DeviceProxy):
DEVICE_CLASS = BrickletRealTimeClock
TOPIC_PREFIX = 'bricklet/real_time_clock'
GETTER_SPECS = [('get_date_time', None, 'date_time', None),
('get_timestamp', None, 'timestamp', 'timestamp'),
('get_offset', None, 'offset', 'offset')]
SETTER_SPECS = [('set_date_time', 'date_time/set', ['year', 'month', 'day', 'hour', 'minute', 'second', 'centisecond', 'weekday']),
('set_offset', 'offset/set', ['offset'])]
# FIXME: handle switching_done callback?
class BrickletRemoteSwitchProxy(DeviceProxy):
DEVICE_CLASS = BrickletRemoteSwitch
TOPIC_PREFIX = 'bricklet/remote_switch'
GETTER_SPECS = [('get_switching_state', None, 'switching_state', 'state'),
('get_repeats', None, 'repeats', 'repeats')]
SETTER_SPECS = [('switch_socket_a', 'switch_socket_a/set', ['house_code', 'receiver_code', 'switch_to']),
('switch_socket_b', 'switch_socket_b/set', ['address', 'unit', 'switch_to']),
('dim_socket_b', 'dim_socket_b/set', ['address', 'unit', 'dim_value']),
('switch_socket_c', 'switch_socket_c/set', ['system_code', 'device_code', 'switch_to']),
('set_repeats', 'repeats/set', ['repeats'])]
class BrickletRemoteSwitchV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletRemoteSwitchV2
TOPIC_PREFIX = 'bricklet/remote_switch_v2'
GETTER_SPECS = [('get_switching_state', None, 'switching_state', 'state'),
('get_repeats', None, 'repeats', 'repeats')]
SETTER_SPECS = [('switch_socket_a', 'switch_socket_a/set', ['house_code', 'receiver_code', 'switch_to']),
('switch_socket_b', 'switch_socket_b/set', ['address', 'unit', 'switch_to']),
('dim_socket_b', 'dim_socket_b/set', ['address', 'unit', 'dim_value']),
('switch_socket_c', 'switch_socket_c/set', ['system_code', 'device_code', 'switch_to']),
('set_repeats', 'repeats/set', ['repeats'])]
class BrickletRGBLEDProxy(DeviceProxy):
DEVICE_CLASS = BrickletRGBLED
TOPIC_PREFIX = 'bricklet/rgb_led'
GETTER_SPECS = [('get_rgb_value', None, 'rgb_value', None)]
SETTER_SPECS = [('set_rgb_value', 'rgb_value/set', ['r', 'g', 'b'])]
class BrickletRGBLEDButtonProxy(DeviceProxy):
DEVICE_CLASS = BrickletRGBLEDButton
TOPIC_PREFIX = 'bricklet/rgb_led_button'
GETTER_SPECS = [('get_color', None, 'color', None),
('get_button_state', None, 'button_state', 'state'),
('get_color_calibration', None, 'color_calibration', None),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_color', 'color/set', ['red', 'green', 'blue']),
('set_color_calibration', 'color_calibration/set', ['red', 'green', 'blue']),
('set_status_led_config', 'status_led_config/set', 'config'),
('reset', 'reset/set', [])]
def cb_button_state_changed(self, button_state):
self.publish_values('button_state', state = button_state)
def setup_callbacks(self):
try:
button_state = self.device.get_button_state()
self.publish_values('button_state', state = button_state)
except:
pass
self.device.register_callback(BrickletRGBLEDButton.CALLBACK_BUTTON_STATE_CHANGED,
self.cb_button_state_changed)
class BrickletRGBLEDMatrixProxy(DeviceProxy):
DEVICE_CLASS = BrickletRGBLEDMatrix
TOPIC_PREFIX = 'bricklet/rgb_led_matrix'
GETTER_SPECS = [('get_red', None, 'red', 'red'),
('get_green', None, 'green', 'green'),
('get_blue', None, 'blue', 'blue'),
('get_frame_duration', None, 'frame_duration', 'frame_duration'),
('get_supply_voltage', None, 'supply_voltage', 'voltage'),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [('set_red', 'red/set', ['red']),
('set_green', 'green/set', ['green']),
('set_blue', 'blue/set', ['blue']),
('set_frame_duration', 'frame_duration/set', ['frame_duration']),
('draw_frame', 'draw_frame/set', []),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', [])]
class BrickletRotaryEncoderProxy(DeviceProxy):
DEVICE_CLASS = BrickletRotaryEncoder
TOPIC_PREFIX = 'bricklet/rotary_encoder'
GETTER_SPECS = [('get_count', (False,), 'count', 'count')]
SETTER_SPECS = [(None, 'get_count/set', ['reset'], {'getter_name': 'get_count', 'getter_publish_topic': 'count', 'getter_return_value': 'count'})]
# Arguments required for a getter must be published to "<GETTER-NAME>/set"
# topic which will execute the getter with the provided arguments.
# The output of the getter then will be published on the "<GETTER-NAME>"
# topic.
def cb_pressed(self):
self.publish_values('pressed', pressed=True)
def cb_released(self):
self.publish_values('pressed', pressed=False)
def setup_callbacks(self):
try:
self.publish_values('pressed', pressed=self.device.is_pressed())
except:
pass
self.device.register_callback(BrickletRotaryEncoder.CALLBACK_PRESSED,
self.cb_pressed)
self.device.register_callback(BrickletRotaryEncoder.CALLBACK_RELEASED,
self.cb_released)
class BrickletRotaryEncoderV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletRotaryEncoderV2
TOPIC_PREFIX = 'bricklet/rotary_encoder_v2'
GETTER_SPECS = [('get_count', (False,), 'count', 'count')]
SETTER_SPECS = [(None, 'get_count/set', ['reset'], {'getter_name': 'get_count', 'getter_publish_topic': 'count', 'getter_return_value': 'count'})]
# Arguments required for a getter must be published to "<GETTER-NAME>/set"
# topic which will execute the getter with the provided arguments.
# The output of the getter then will be published on the "<GETTER-NAME>"
# topic.
def cb_pressed(self):
self.publish_values('pressed', pressed=True)
def cb_released(self):
self.publish_values('pressed', pressed=False)
def setup_callbacks(self):
try:
self.publish_values('pressed', pressed=self.device.is_pressed())
except:
pass
self.device.register_callback(BrickletRotaryEncoderV2.CALLBACK_PRESSED,
self.cb_pressed)
self.device.register_callback(BrickletRotaryEncoderV2.CALLBACK_RELEASED,
self.cb_released)
# FIXME: expose analog_value getter?
class BrickletRotaryPotiProxy(DeviceProxy):
DEVICE_CLASS = BrickletRotaryPoti
TOPIC_PREFIX = 'bricklet/rotary_poti'
GETTER_SPECS = [('get_position', None, 'position', 'position')]
class BrickletRS232Proxy(DeviceProxy):
DEVICE_CLASS = BrickletRS232
TOPIC_PREFIX = 'bricklet/rs232'
GETTER_SPECS = [('read', None, 'read', None),
('get_configuration', None, 'configuration', None)]
SETTER_SPECS = [(None, 'write/set', ['message'], {'getter_name': 'write', 'getter_publish_topic': 'write', 'getter_return_value': 'written'}),
('set_configuration', 'configuration/set', ['baudrate', 'parity', 'stopbits', 'wordlength', 'hardware_flowcontrol', 'software_flowcontrol']),
('set_break_condition', 'break_condition/set', ['break_time'])]
# Arguments required for a getter must be published to "<GETTER-NAME>/set"
# topic which will execute the getter with the provided arguments.
# The output of the getter then will be published on the "<GETTER-NAME>"
# topic.
class BrickletRS485Proxy(DeviceProxy):
DEVICE_CLASS = BrickletRS485
TOPIC_PREFIX = 'bricklet/rs485'
GETTER_SPECS = [('get_rs485_configuration', None, 'rs485_configuration', None),
('get_modbus_configuration', None, 'modbus_configuration', None),
('get_mode', None, 'mode', 'mode'),
('get_communication_led_config', None, 'communication_led_config', 'config'),
('get_error_led_config', None, 'error_led_config', 'config'),
('get_buffer_config', None, 'buffer_config', None),
('get_buffer_status', None, 'buffer_status', None),
('get_error_count', None, 'error_count', None),
('get_modbus_common_error_count', None, 'modbus_common_error_count', None),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature')]
SETTER_SPECS = [(None, 'write/set', ['message'], {'getter_name': 'write', 'getter_publish_topic': 'write', 'getter_return_value': 'written'}),
(None, 'read/set', ['length'], {'getter_name': 'read', 'getter_publish_topic': 'read', 'getter_return_value': 'message'}),
('set_rs485_configuration', 'rs485_configuration/set', ['baudrate', 'parity', 'stopbits', 'wordlength', 'duplex']),
('set_modbus_configuration', 'modbus_configuration/set', ['slave_address', 'master_request_timeout']),
('set_mode', 'mode/set', ['mode']),
('set_communication_led_config', 'communication_led_config/set', ['config']),
('set_error_led_config', 'error_led_config/set', ['config']),
('set_buffer_config', 'buffer_config/set', ['send_buffer_size', 'receive_buffer_size']),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', [])]
# Arguments required for a getter must be published to "<GETTER-NAME>/set"
# topic which will execute the getter with the provided arguments.
# The output of the getter then will be published on the "<GETTER-NAME>"
# topic.
class BrickletSegmentDisplay4x7Proxy(DeviceProxy):
DEVICE_CLASS = BrickletSegmentDisplay4x7
TOPIC_PREFIX = 'bricklet/segment_display_4x7'
GETTER_SPECS = [('get_segments', None, 'segments', None),
('get_counter_value', None, 'counter_value', 'value')]
SETTER_SPECS = [('set_segments', 'segments/set', ['segments', 'brightness', 'colon']),
('start_counter', 'start_counter/set', ['value_from', 'value_to', 'increment', 'length'])]
# FIXME: handle monoflop_done callback?
class BrickletSolidStateRelayProxy(DeviceProxy):
DEVICE_CLASS = BrickletSolidStateRelay
TOPIC_PREFIX = 'bricklet/solid_state_relay'
GETTER_SPECS = [('get_state', None, 'state', 'state'),
('get_monoflop', None, 'monoflop', None)]
SETTER_SPECS = [('set_state', 'state/set', ['state']),
('set_monoflop', 'monoflop/set', ['state', 'time'])]
class BrickletSolidStateRelayV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletSolidStateRelayV2
TOPIC_PREFIX = 'bricklet/solid_state_relay_v2'
GETTER_SPECS = [('get_state', None, 'state', 'state'),
('get_monoflop', None, 'monoflop', None)]
SETTER_SPECS = [('set_state', 'state/set', ['state']),
('set_monoflop', 'monoflop/set', ['state', 'time'])]
class BrickletSoundIntensityProxy(DeviceProxy):
DEVICE_CLASS = BrickletSoundIntensity
TOPIC_PREFIX = 'bricklet/sound_intensity'
GETTER_SPECS = [('get_intensity', None, 'intensity', 'intensity')]
class BrickletTemperatureProxy(DeviceProxy):
DEVICE_CLASS = BrickletTemperature
TOPIC_PREFIX = 'bricklet/temperature'
GETTER_SPECS = [('get_temperature', None, 'temperature', 'temperature'),
('get_i2c_mode', None, 'i2c_mode', 'mode')]
SETTER_SPECS = [('set_i2c_mode', 'i2c_mode/set', ['mode'])]
class BrickletTemperatureIRProxy(DeviceProxy):
DEVICE_CLASS = BrickletTemperatureIR
TOPIC_PREFIX = 'bricklet/temperature_ir'
GETTER_SPECS = [('get_ambient_temperature', None, 'ambient_temperature', 'temperature'),
('get_object_temperature', None, 'object_temperature', 'temperature'),
('get_emissivity', None, 'emissivity', 'emissivity')]
SETTER_SPECS = [('set_emissivity', 'emissivity/set', ['emissivity'])]
class BrickletTemperatureIRV2Proxy(DeviceProxy):
DEVICE_CLASS = BrickletTemperatureIRV2
TOPIC_PREFIX = 'bricklet/temperature_ir'
GETTER_SPECS = [('get_ambient_temperature', None, 'ambient_temperature', 'temperature'),
('get_object_temperature', None, 'object_temperature', 'temperature'),
('get_emissivity', None, 'emissivity', 'emissivity')]
SETTER_SPECS = [('set_emissivity', 'emissivity/set', ['emissivity'])]
class BrickletThermalImagingProxy(DeviceProxy):
DEVICE_CLASS = BrickletThermalImaging
TOPIC_PREFIX = 'bricklet/thermal_imaging'
GETTER_SPECS = [('get_high_contrast_image', None, 'high_contrast_image', 'image'),
('get_temperature_image', None, 'temperature_image', 'image'),
('get_statistics', None, 'statistics', None),
('get_resolution', None, 'resolution', 'resolution'),
('get_spotmeter_config', None, 'spotmeter_config', 'region_of_interest'),
('get_high_contrast_config', None, 'high_contrast_config', None),
('get_status_led_config', None, 'status_led_config', 'config'),
('get_chip_temperature', None, 'chip_temperature', 'temperature'),
('get_image_transfer_config', None, 'image_transfer_config', 'config')]
SETTER_SPECS = [('set_resolution', 'resolution/set', ['resolution']),
('set_spotmeter_config', 'spotmeter_config/set', ['region_of_interest']),
('set_high_contrast_config', 'high_contrast_config/set', ['region_of_interest', 'dampening_factor', 'clip_limit', 'empty_counts']),
('set_status_led_config', 'status_led_config/set', ['config']),
('reset', 'reset/set', []),
('set_image_transfer_config', 'image_transfer_config/set', ['config'])]
class BrickletThermocoupleProxy(DeviceProxy):
DEVICE_CLASS = BrickletThermocouple
TOPIC_PREFIX = 'bricklet/thermocouple'
GETTER_SPECS = [('get_temperature', None, 'temperature', 'temperature'),
('get_configuration', None, 'configuration', None),
('get_error_state', None, 'error_state', None)]
SETTER_SPECS = [('set_configuration', 'configuration/set', ['averaging', 'thermocouple_type', 'filter'])]
# FIXME: handle tilt_state callback, including enable_tilt_state_callback, disable_tilt_state_callback and is_tilt_state_callback_enabled?
class BrickletTiltProxy(DeviceProxy):
DEVICE_CLASS = BrickletTilt
TOPIC_PREFIX = 'bricklet/tilt'
GETTER_SPECS = [('get_tilt_state', None, 'tilt_state', 'state')]
class BrickletUVLightProxy(DeviceProxy):
DEVICE_CLASS = BrickletUVLight
TOPIC_PREFIX = 'bricklet/uv_light'
GETTER_SPECS = [('get_uv_light', None, 'uv_light', 'uv_light')]
# FIXME: expose analog_value getter?
class BrickletVoltageProxy(DeviceProxy):
DEVICE_CLASS = BrickletVoltage
TOPIC_PREFIX = 'bricklet/voltage'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage')]
class BrickletVoltageCurrentProxy(DeviceProxy):
DEVICE_CLASS = BrickletVoltageCurrent
TOPIC_PREFIX = 'bricklet/voltage_current'
GETTER_SPECS = [('get_voltage', None, 'voltage', 'voltage'),
('get_current', None, 'current', 'current'),
('get_power', None, 'power', 'power'),
('get_configuration', None, 'configuration', None),
('get_calibration', None, 'calibration', None)]
SETTER_SPECS = [('set_configuration', 'configuration/set', ['averaging', 'voltage_conversion_time', 'current_conversion_time']),
('set_calibration', 'calibration/set', ['gain_multiplier', 'gain_divisor'])]
class Proxy(object):
def __init__(self, brickd_host, brickd_port, broker_host, broker_port,
broker_username, broker_password, broker_certificate, broker_tls_insecure,
update_interval, global_topic_prefix):
self.brickd_host = brickd_host
self.brickd_port = brickd_port
self.broker_host = broker_host
self.broker_port = broker_port
self.broker_username = broker_username
self.broker_password = broker_password
self.broker_certificate = broker_certificate
self.broker_tls_insecure = broker_tls_insecure
self.update_interval = update_interval
self.global_topic_prefix = global_topic_prefix
self.ipcon = IPConnection()
self.ipcon.register_callback(IPConnection.CALLBACK_CONNECTED, self.ipcon_cb_connected)
self.ipcon.register_callback(IPConnection.CALLBACK_ENUMERATE, self.ipcon_cb_enumerate)
self.client = mqtt.Client()
self.client.on_connect = self.mqtt_on_connect
self.client.on_disconnect = self.mqtt_on_disconnect
self.client.on_message = self.mqtt_on_message
self.device_proxies = {}
self.device_proxy_classes = {}
for subclass in DeviceProxy.__subclasses__():
self.device_proxy_classes[subclass.DEVICE_CLASS.DEVICE_IDENTIFIER] = subclass
def connect(self):
if self.broker_username is not None:
self.client.username_pw_set(self.broker_username, self.broker_password)
if self.broker_certificate is not None:
self.client.tls_set(self.broker_certificate)
if self.broker_tls_insecure:
self.client.tls_insecure_set(True)
self.client.connect(self.broker_host, self.broker_port)
self.client.loop_start()
while True:
try:
time.sleep(ENUMERATE_INTERVAL)
self.ipcon.enumerate()
except KeyboardInterrupt:
self.client.disconnect()
break
except:
pass
self.client.loop_stop()
def publish_as_json(self, topic, payload, *args, **kwargs):
self.client.publish(self.global_topic_prefix + topic,
json.dumps(payload, separators=(',',':')),
*args, **kwargs)
def publish_enumerate(self, changed_uid, connected):
device_proxy = self.device_proxies[changed_uid]
topic_prefix = device_proxy.TOPIC_PREFIX
if connected:
topic = 'enumerate/connected/' + topic_prefix
else:
topic = 'enumerate/disconnected/' + topic_prefix
self.publish_as_json(topic, device_proxy.get_enumerate_entry())
enumerate_entries = []
for uid, device_proxy in self.device_proxies.items():
if not connected and uid == changed_uid or device_proxy.TOPIC_PREFIX != topic_prefix:
continue
enumerate_entries.append(device_proxy.get_enumerate_entry())
self.publish_as_json('enumerate/available/' + topic_prefix, enumerate_entries, retain=True)
def ipcon_cb_connected(self, connect_reason):
self.ipcon.enumerate()
def ipcon_cb_enumerate(self, uid, connected_uid, position, hardware_version,
firmware_version, device_identifier, enumeration_type):
if enumeration_type == IPConnection.ENUMERATION_TYPE_DISCONNECTED:
if uid in self.device_proxies:
self.publish_enumerate(uid, False)
self.device_proxies[uid].destroy()
del self.device_proxies[uid]
elif device_identifier in self.device_proxy_classes and uid not in self.device_proxies:
self.device_proxies[uid] = self.device_proxy_classes[device_identifier](uid, connected_uid, position, hardware_version,
firmware_version, self.ipcon, self.client,
self.update_interval, self.global_topic_prefix)
self.publish_enumerate(uid, True)
def mqtt_on_connect(self, client, user_data, flags, result_code):
if result_code == 0:
self.ipcon.connect(self.brickd_host, self.brickd_port)
def mqtt_on_disconnect(self, client, user_data, result_code):
self.ipcon.disconnect()
for uid in self.device_proxies:
self.device_proxies[uid].destroy()
self.device_proxies = {}
def mqtt_on_message(self, client, user_data, message):
logging.debug('Received message for topic ' + message.topic)
topic = message.topic[len(self.global_topic_prefix):]
if topic.startswith('brick/') or topic.startswith('bricklet/'):
topic_prefix1, topic_prefix2, uid, topic_suffix = topic.split('/', 3)
topic_prefix = topic_prefix1 + '/' + topic_prefix2
if uid in self.device_proxies and topic_prefix == self.device_proxies[uid].TOPIC_PREFIX:
payload = message.payload.strip()
if len(payload) > 0:
try:
payload = json.loads(message.payload.decode('UTF-8'))
except Exception as e:
logging.warn('Received message with invalid JSON payload for topic ' + message.topic + ': ' + str(e))
return
else:
payload = {}
self.device_proxies[uid].handle_message(topic_suffix, payload)
return
logging.debug('Unknown topic ' + message.topic)
def parse_positive_int(value):
value = int(value)
if value < 0:
raise ValueError()
return value
parse_positive_int.__name__ = 'positive-int'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Brick MQTT Proxy')
parser.add_argument('--brickd-host', dest='brickd_host', type=str, default=BRICKD_HOST,
help='hostname or IP address of Brick Daemon, WIFI or Ethernet Extension (default: {0})'.format(BRICKD_HOST))
parser.add_argument('--brickd-port', dest='brickd_port', type=int, default=BRICKD_PORT,
help='port number of Brick Daemon, WIFI or Ethernet Extension (default: {0})'.format(BRICKD_PORT))
parser.add_argument('--broker-host', dest='broker_host', type=str, default=BROKER_HOST,
help='hostname or IP address of MQTT broker (default: {0})'.format(BROKER_HOST))
parser.add_argument('--broker-port', dest='broker_port', type=int, default=BROKER_PORT,
help='port number of MQTT broker (default: {0})'.format(BROKER_PORT))
parser.add_argument('--broker-username', dest='broker_username', type=str, default=None,
help='username for the MQTT broker connection')
parser.add_argument('--broker-password', dest='broker_password', type=str, default=None,
help='password for the MQTT broker connection')
parser.add_argument('--broker-certificate', dest='broker_certificate', type=str, default=None,
help='Certificate Authority certificate file used for SSL/TLS connections')
parser.add_argument('--broker-tls-insecure', dest='broker_tls_insecure', action='store_true',
help='disable verification of the server hostname in the server certificate for the MQTT broker connection')
parser.add_argument('--update-interval', dest='update_interval', type=parse_positive_int, default=UPDATE_INTERVAL,
help='update interval in seconds (default: {0})'.format(UPDATE_INTERVAL))
parser.add_argument('--global-topic-prefix', dest='global_topic_prefix', type=str, default=GLOBAL_TOPIC_PREFIX,
help='global MQTT topic prefix for this proxy instance (default: {0})'.format(GLOBAL_TOPIC_PREFIX))
parser.add_argument('--debug', dest='debug', action='store_true', help='enable debug output')
args = parser.parse_args(sys.argv[1:])
if args.broker_username is None and args.broker_password is not None:
parser.error('--broker-password cannot be used without --broker-username')
if args.debug:
logging.basicConfig(level=logging.DEBUG)
global_topic_prefix = args.global_topic_prefix
if len(global_topic_prefix) > 0 and not global_topic_prefix.endswith('/'):
global_topic_prefix += '/'
proxy = Proxy(args.brickd_host, args.brickd_port, args.broker_host,
args.broker_port, args.broker_username, args.broker_password,
args.broker_certificate, args.broker_tls_insecure,
args.update_interval, global_topic_prefix)
proxy.connect()
| gpl-2.0 | -7,394,515,960,011,033,000 | 51.783024 | 307 | 0.604185 | false |
open-cloud/xos | lib/xos-genx/xosgenx/generator.py | 2 | 14235 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
import jinja2
import plyxproto.parser as plyxproto
import yaml
from colorama import Fore
import sys
from . import jinja2_extensions
from .proto2xproto import Proto2XProto
from .xos2jinja import XOS2Jinja
from .validator import XProtoValidator
loader = jinja2.PackageLoader(__name__, "templates")
env = jinja2.Environment(loader=loader)
class XOSProcessorArgs:
""" Helper class for use cases that want to call XOSProcessor directly, rather than executing xosgenx from the
command line.
"""
default_rev = False
default_output = None
default_attic = None
default_kvpairs = None
default_write_to_file = None
default_dest_file = None
default_dest_extension = None
default_target = None
default_checkers = None
default_verbosity = (
0
) # Higher numbers = more verbosity, lower numbers = less verbosity
default_include_models = (
[]
) # If neither include_models nor include_apps is specified, then all models will
default_include_apps = [] # be included.
default_strict_validation = False
default_lint = False
def __init__(self, **kwargs):
# set defaults
self.rev = XOSProcessorArgs.default_rev
self.output = XOSProcessorArgs.default_output
self.attic = XOSProcessorArgs.default_attic
self.kvpairs = XOSProcessorArgs.default_kvpairs
self.verbosity = XOSProcessorArgs.default_verbosity
self.write_to_file = XOSProcessorArgs.default_write_to_file
self.default_dest_file = XOSProcessorArgs.default_dest_file
self.default_dest_extension = XOSProcessorArgs.default_dest_extension
self.default_target = XOSProcessorArgs.default_target
self.default_checkers = XOSProcessorArgs.default_target
self.include_models = XOSProcessorArgs.default_include_models
self.include_apps = XOSProcessorArgs.default_include_apps
self.strict_validation = XOSProcessorArgs.default_strict_validation
self.lint = XOSProcessorArgs.default_lint
# override defaults with kwargs
for (k, v) in kwargs.items():
setattr(self, k, v)
class XOSProcessor:
@staticmethod
def _read_input_from_files(files):
""" Read the files and return the combined text read.
Also returns a list of (line_number, filename) tuples that tell which
starting line corresponds to each file.
"""
line_map = []
input = ""
for fname in files:
with open(fname) as infile:
line_map.append((len(input.split("\n")), fname))
input += infile.read()
return (input, line_map)
@staticmethod
def _attach_parser(ast, args):
if hasattr(args, "rev") and args.rev:
v = Proto2XProto()
ast.accept(v)
v = XOS2Jinja(args)
ast.accept(v)
return v
@staticmethod
def _get_template(target):
if not os.path.isabs(target):
return os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/targets/" + target
)
return target
@staticmethod
def _file_exists(attic):
# NOTE this method can be used in the jinja template
def file_exists2(name):
if attic is not None:
path = attic + "/" + name
else:
path = name
return os.path.exists(path)
return file_exists2
@staticmethod
def _include_file(attic):
# NOTE this method can be used in the jinja template
def include_file2(name):
if attic is not None:
path = attic + "/" + name
else:
path = name
return open(path).read()
return include_file2
@staticmethod
def _load_jinja2_extensions(os_template_env, attic):
os_template_env.globals["include_file"] = XOSProcessor._include_file(
attic
) # Generates a function
os_template_env.globals["file_exists"] = XOSProcessor._file_exists(
attic
) # Generates a function
os_template_env.filters["yaml"] = yaml.dump
for f in dir(jinja2_extensions):
if f.startswith("xproto"):
os_template_env.globals[f] = getattr(jinja2_extensions, f)
return os_template_env
@staticmethod
def _add_context(args):
if not hasattr(args, "kv") or not args.kv:
return
try:
context = {}
for s in args.kv.split(","):
k, val = s.split(":")
context[k] = val
return context
except Exception as e:
print(e)
@staticmethod
def _write_single_file(rendered, dir, dest_file, quiet):
file_name = "%s/%s" % (dir, dest_file)
file = open(file_name, "w")
file.write(rendered)
file.close()
if not quiet:
print("Saved: %s" % file_name)
@staticmethod
def _write_split_target(rendered, dir, quiet):
lines = rendered.splitlines()
current_buffer = []
for line in lines:
if line.startswith("+++"):
if dir:
path = dir + "/" + line[4:].lower()
fil = open(path, "w")
buf = "\n".join(current_buffer)
obuf = buf
fil.write(obuf)
fil.close()
if not quiet:
print("Save file to: %s" % path)
current_buffer = []
else:
current_buffer.append(line)
@staticmethod
def _find_message_by_model_name(messages, model):
return next((x for x in messages if x["name"] == model), None)
@staticmethod
def _find_last_nonempty_line(text, pointer):
ne_pointer = pointer
found = False
while ne_pointer != 0 and not found:
ne_pointer = text[: (ne_pointer - 1)].rfind("\n")
if ne_pointer < 0:
ne_pointer = 0
if text[ne_pointer - 1] != "\n":
found = True
return ne_pointer
@staticmethod
def process(args, operator=None):
# Setting defaults
if not hasattr(args, "attic"):
args.attic = None
if not hasattr(args, "write_to_file"):
args.write_to_file = None
if not hasattr(args, "dest_file"):
args.dest_file = None
if not hasattr(args, "dest_extension"):
args.dest_extension = None
if not hasattr(args, "output"):
args.output = None
if not hasattr(args, "quiet"):
args.quiet = True
# Validating
if args.write_to_file == "single" and args.dest_file is None:
raise Exception(
"[XosGenX] write_to_file option is specified as 'single' but no dest_file is provided"
)
if args.write_to_file == "model" and (args.dest_extension is None):
raise Exception(
"[XosGenX] write_to_file option is specified as 'model' but no dest_extension is provided"
)
if args.output is not None and not os.path.isabs(args.output):
raise Exception("[XosGenX] The output dir (%s) must be an absolute path!" % args.output)
if args.output is not None and not os.path.isdir(args.output):
raise Exception("[XosGenX] The output dir (%s) must be a directory!" % args.output)
if hasattr(args, "files"):
(inputs, line_map) = XOSProcessor._read_input_from_files(args.files)
elif hasattr(args, "inputs"):
inputs = args.inputs
line_map = []
else:
raise Exception("[XosGenX] No inputs provided!")
context = XOSProcessor._add_context(args)
parser = plyxproto.ProtobufAnalyzer()
try:
ast = parser.parse_string(inputs, debug=0)
except plyxproto.ParsingError as e:
if e.message:
error = e.message
else:
error = "xproto parsing error"
if e.error_range is None:
# No line number information
print(error + "\n")
else:
line, start, end = e.error_range
ptr = XOSProcessor._find_last_nonempty_line(inputs, start)
if start == 0:
beginning = ""
else:
beginning = inputs[ptr: start - 1]
line_end_char = inputs[start + end:].find("\n")
line_end = inputs[line_end_char]
print(error + "\n" + Fore.YELLOW + "Line %d:" % line + Fore.WHITE)
print(
beginning
+ Fore.YELLOW
+ inputs[start - 1: start + end]
+ Fore.WHITE
+ line_end
)
exit(1)
v = XOSProcessor._attach_parser(ast, args)
if args.include_models or args.include_apps:
for message in v.messages:
message["is_included"] = False
if message["name"] in args.include_models:
message["is_included"] = True
else:
app_label = (
message.get("options", {})
.get("app_label")
.strip('"')
)
if app_label in args.include_apps:
message["is_included"] = True
else:
for message in v.messages:
message["is_included"] = True
validator = XProtoValidator(v.models, line_map)
validator.validate()
if validator.errors:
if args.strict_validation or (args.verbosity >= 0):
validator.print_errors()
fatal_errors = [x for x in validator.errors if x["severity"] == "ERROR"]
if fatal_errors and args.strict_validation:
sys.exit(-1)
if args.lint:
return ""
if not operator:
operator = args.target
template_path = XOSProcessor._get_template(operator)
else:
template_path = operator
[template_folder, template_name] = os.path.split(template_path)
os_template_loader = jinja2.FileSystemLoader(searchpath=[template_folder])
os_template_env = jinja2.Environment(loader=os_template_loader)
os_template_env = XOSProcessor._load_jinja2_extensions(
os_template_env, args.attic
)
template = os_template_env.get_template(template_name)
if args.output is not None and args.write_to_file == "model":
# Handle the case where each model is written to a separate python file.
rendered = {}
for i, model in enumerate(v.models):
model_dict = v.models[model]
messages = [XOSProcessor._find_message_by_model_name(v.messages, model)]
rendered[model] = template.render(
{
"proto": {
"message_table": {model: model_dict},
"messages": messages,
"policies": v.policies,
"message_names": [m["name"] for m in v.messages],
},
"context": context,
"options": v.options,
}
)
if not rendered[model]:
print("Not saving model %s as it is empty" % model, file=sys.stderr)
else:
legacy = jinja2_extensions.base.xproto_list_evaluates_true(
[model_dict.get("options", {}).get("custom_python", None),
model_dict.get("options", {}).get("legacy", None),
v.options.get("custom_python", None),
v.options.get("legacy", None)])
if legacy:
file_name = "%s/%s_decl.%s" % (args.output, model.lower(), args.dest_extension)
else:
file_name = "%s/%s.%s" % (args.output, model.lower(), args.dest_extension)
file = open(file_name, "w")
file.write(rendered[model])
file.close()
if not args.quiet:
print("Saved: %s" % file_name, file=sys.stderr)
else:
# Handle the case where all models are written to the same python file.
rendered = template.render(
{
"proto": {
"message_table": v.models,
"messages": v.messages,
"policies": v.policies,
"message_names": [m["name"] for m in v.messages],
},
"context": context,
"options": v.options,
}
)
if args.output is not None and args.write_to_file == "target":
XOSProcessor._write_split_target(rendered, args.output, args.quiet)
elif args.output is not None and args.write_to_file == "single":
XOSProcessor._write_single_file(
rendered, args.output, args.dest_file, args.quiet
)
return rendered
| apache-2.0 | -6,051,380,843,218,182,000 | 34.5875 | 114 | 0.539867 | false |
vzer/ToughRADIUS | toughradius/console/admin/param_forms.py | 1 | 4870 | #coding:utf-8
from toughradius.console.libs import pyforms
from toughradius.console.libs.pyforms import dataform
from toughradius.console.libs.pyforms import rules
from toughradius.console.libs.pyforms.rules import button_style,input_style
boolean = {0:u"否", 1:u"是"}
booleans = {'0': u"否", '1': u"是"}
bool_bypass = {'0': u"免密码认证", '1': u"强制密码认证"}
sys_form = pyforms.Form(
pyforms.Textbox("system_name", description=u"管理系统名称",help=u"管理系统名称,可以根据你的实际情况进行定制", **input_style),
pyforms.Textbox("customer_system_name", description=u"自助服务系统名称", **input_style),
pyforms.Textbox("customer_system_url", description=u"自助服务系统网站地址", **input_style),
pyforms.Dropdown("online_support", args=booleans.items(), description=u"开启在线支持功能",help=u"开启此项,可以随时向ToughRADIUS开发团队反馈问题", **input_style),
pyforms.Dropdown("is_debug", args=booleans.items(), description=u"开启DEBUG",help=u"开启此项,可以获取更多的系统日志纪录", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>更新</b>", **button_style),
title=u"参数配置管理",
action="/param/update?active=syscfg"
)
serv_form = pyforms.Form(
pyforms.Dropdown("customer_must_active", args=booleans.items(), description=u"激活邮箱才能自助开户充值",**input_style),
pyforms.Textbox("weixin_qrcode", description=u"微信公众号二维码图片(宽度230px)", **input_style),
pyforms.Textbox("service_phone", description=u"客户服务电话", **input_style),
pyforms.Textbox("service_qq", description=u"客户服务QQ号码", **input_style),
pyforms.Textbox("rcard_order_url", description=u"充值卡订购网站地址", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>更新</b>", **button_style),
title=u"参数配置管理",
action="/param/update?active=servcfg"
)
notify_form = pyforms.Form(
pyforms.Textbox("expire_notify_days", rules.is_number, description=u"到期提醒提前天数", **input_style),
pyforms.Textarea("expire_notify_tpl", description=u"到期提醒邮件模板", rows=5, **input_style),
pyforms.Textbox("expire_notify_url", description=u"到期通知触发URL", **input_style),
pyforms.Textbox("expire_session_timeout", description=u"到期用户下发最大会话时长(秒)", **input_style),
pyforms.Textbox("expire_addrpool", description=u"到期提醒下发地址池", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>更新</b>", **button_style),
title=u"参数配置管理",
action="/param/update?active=notifycfg"
)
mail_form = pyforms.Form(
pyforms.Textbox("smtp_server", description=u"SMTP服务器", **input_style),
pyforms.Textbox("smtp_user", description=u"SMTP用户名", **input_style),
pyforms.Textbox("smtp_pwd", description=u"SMTP密码", help=u"如果密码不是必须的,请填写none", **input_style),
# pyforms.Textbox("smtp_sender", description=u"smtp发送人名称", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>更新</b>", **button_style),
title=u"参数配置管理",
action="/param/update?active=mailcfg"
)
rad_form = pyforms.Form(
pyforms.Dropdown("radiusd_bypass", args=bool_bypass.items(), description=u"Radius认证模式", **input_style),
pyforms.Dropdown("allow_show_pwd", args=booleans.items(), description=u"是否允许查询用户密码", **input_style),
pyforms.Textbox("radiusd_address", description=u"Radius服务地址",help=u"填写radius服务器真实的ip地址或域名", **input_style),
pyforms.Textbox("radiusd_admin_port", rules.is_number, description=u"Radius服务管理端口",help=u"默认为1815,此端口提供一些管理接口功能", **input_style),
pyforms.Textbox("acct_interim_intelval", rules.is_number, description=u"Radius记账间隔(秒)",help=u"radius向bas设备下发的全局记账间隔,bas不支持则不生效", **input_style),
pyforms.Textbox("max_session_timeout", rules.is_number, description=u"Radius最大会话时长(秒)",help=u"用户在线达到最大会话时长时会自动断开", **input_style),
pyforms.Textbox("reject_delay", rules.is_number, description=u"拒绝延迟时间(秒)(0-9)",help=u"延迟拒绝消息的下发间隔,防御ddos攻击", **input_style),
pyforms.Dropdown("auth_auto_unlock", args=booleans.items(), description=u"并发自动解锁", help=u"如果账号被挂死,认证时自动踢下线",**input_style),
# pyforms.Textbox("portal_secret", description=u"portal登陆密钥", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>更新</b>", **button_style),
title=u"参数配置管理",
action="/param/update?active=radcfg"
)
| agpl-3.0 | -5,280,956,592,056,908,000 | 51.545455 | 148 | 0.712061 | false |
deepmind/dm-haiku | haiku/_src/embed_test.py | 1 | 3751 | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.embed."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import embed
from haiku._src import test_utils
import jax.numpy as jnp
import numpy as np
_EMBEDDING_MATRIX = np.asarray([
[0.0, 0.0, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.5],
[0.1, 0.2, 0.3, 0.4]
])
_1D_IDS = [0, 2] # pylint: disable=invalid-name
_2D_IDS = [[0, 2], [2, 2]] # pylint: disable=invalid-name
_3D_IDS = [[[0, 2], [2, 2]], [[1, 1], [0, 2]]] # pylint: disable=invalid-name
class EmbedTest(parameterized.TestCase):
@parameterized.parameters(
itertools.product(["ARRAY_INDEX", "ONE_HOT"],
[_1D_IDS, _2D_IDS, _3D_IDS]))
@test_utils.transform_and_run
def test_lookup(self, lookup_style, inp_ids):
emb = embed.Embed(embedding_matrix=_EMBEDDING_MATRIX,
lookup_style=lookup_style)
np.testing.assert_allclose(
emb(inp_ids),
jnp.asarray(_EMBEDDING_MATRIX)[jnp.asarray(inp_ids)])
self.assertEqual(
list(emb(inp_ids).shape),
list(jnp.asarray(_EMBEDDING_MATRIX)[jnp.asarray(inp_ids)].shape))
@parameterized.parameters("ARRAY_INDEX", "ONE_HOT")
@test_utils.transform_and_run
def test_default_creation(self, lookup_style):
emb = embed.Embed(vocab_size=6, embed_dim=12, lookup_style=lookup_style)
self.assertEqual(emb(_1D_IDS).shape, (2, 12))
@test_utils.transform_and_run
def test_no_creation_args(self):
with self.assertRaisesRegex(ValueError, "must be supplied either with an"):
embed.Embed()
@test_utils.transform_and_run
def test_inconsistent_creation_args(self):
with self.assertRaisesRegex(ValueError, "supplied but the `vocab_size`"):
embed.Embed(embedding_matrix=_EMBEDDING_MATRIX, vocab_size=4)
with self.assertRaisesRegex(ValueError, "supplied but the `embed_dim`"):
embed.Embed(embedding_matrix=_EMBEDDING_MATRIX, embed_dim=5)
@test_utils.transform_and_run
def test_embed_dtype_check(self):
emb = embed.Embed(
embedding_matrix=_EMBEDDING_MATRIX, lookup_style="ARRAY_INDEX")
with self.assertRaisesRegex(
ValueError,
"hk.Embed's __call__ method must take an array of integer dtype but "
"was called with an array of float32"):
emb([1.0, 2.0])
@test_utils.transform_and_run
def test_embed_invalid_lookup(self):
lookup_style = "FOO"
emb = embed.Embed(embedding_matrix=_EMBEDDING_MATRIX, lookup_style="FOO")
with self.assertRaisesRegex(AttributeError, lookup_style):
emb(_1D_IDS)
@test_utils.transform_and_run
def test_embed_property_check(self):
lookup_style = "ONE_HOT"
emb = embed.Embed(
embedding_matrix=_EMBEDDING_MATRIX, lookup_style=lookup_style)
self.assertEqual(emb.vocab_size, 3)
self.assertEqual(emb.embed_dim, 4)
np.testing.assert_allclose(
emb.embeddings,
jnp.asarray([[0., 0., 0., 0.], [0.5, 0.5, 0.5, 0.5],
[0.1, 0.2, 0.3, 0.4]]))
if __name__ == "__main__":
absltest.main()
| apache-2.0 | -4,315,887,944,373,415,400 | 34.056075 | 80 | 0.656625 | false |
hasgeek/lastuser | lastuser_core/models/helpers.py | 1 | 2867 | # -*- coding: utf-8 -*-
from .user import (
USER_STATUS,
User,
UserEmail,
UserEmailClaim,
UserExternalId,
UserOldId,
db,
)
__all__ = ['getuser', 'getextid', 'merge_users']
def getuser(name):
if '@' in name:
# TODO: This should have used UserExternalId.__at_username_services__,
# but this bit has traditionally been for Twitter only. Fix pending.
if name.startswith('@'):
extid = UserExternalId.get(service='twitter', username=name[1:])
if extid and extid.user.is_active:
return extid.user
else:
return None
else:
useremail = UserEmail.get(email=name)
if useremail and useremail.user is not None and useremail.user.is_active:
return useremail.user
# No verified email id. Look for an unverified id; return first found
result = UserEmailClaim.all(email=name).first()
if result and result.user.is_active:
return result.user
return None
else:
return User.get(username=name)
def getextid(service, userid):
return UserExternalId.get(service=service, userid=userid)
def merge_users(user1, user2):
"""
Merge two user accounts and return the new user account.
"""
# Always keep the older account and merge from the newer account
if user1.created_at < user2.created_at:
keep_user, merge_user = user1, user2
else:
keep_user, merge_user = user2, user1
# 1. Release the username
if not keep_user.username:
if merge_user.username:
# Flush before re-assigning to avoid dupe name constraint
username = merge_user.username
merge_user.username = None
db.session.flush()
keep_user.username = username
merge_user.username = None
# 2. Inspect all tables for foreign key references to merge_user and switch to keep_user.
for model in db.Model.__subclasses__():
if model != User:
# a. This is a model and it's not the User model. Does it have a migrate_user classmethod?
if hasattr(model, 'migrate_user'):
model.migrate_user(olduser=merge_user, newuser=keep_user)
# b. No migrate_user? Does it have a user_id column?
elif hasattr(model, 'user_id') and hasattr(model, 'query'):
for row in model.query.filter_by(user_id=merge_user.id).all():
row.user_id = keep_user.id
# 3. Add merge_user's uuid to olduserids. Commit session.
db.session.add(UserOldId(id=merge_user.uuid, user=keep_user))
# 4. Mark merge_user as merged. Commit session.
merge_user.status = USER_STATUS.MERGED
# 5. Commit all of this
db.session.commit()
# 6. Return keep_user.
return keep_user
| bsd-2-clause | -5,985,482,440,712,031,000 | 34.395062 | 102 | 0.611789 | false |
devnev/sesspy | setup.py | 1 | 1354 | from setuptools import setup, find_packages
setup(
name = "sesspy",
version = "0.3.1",
url = "http://nevill.ch/sesspy/",
packages = find_packages(exclude=['tests']),
install_requires = [],
extras_require = {
"sqlalchemy": ["sqlalchemy"],
},
package_data = {
'': [
'/COPYING', '/COPYING.LESSER',
'/README.rst',
],
},
include_package_data=True,
test_suite = "tests",
tests_require = ["mock", "sqlalchemy"],
author = "Mark Nevill",
author_email = "[email protected]",
description = "Session/Transaction Management and Dependency Injection",
license = "LGPLv3",
keywords = "session transaction dependency injection",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Testing",
],
)
| gpl-3.0 | 6,859,801,116,403,127,000 | 32.02439 | 89 | 0.584934 | false |
AndyDeany/pygame-template | pygametemplate/console.py | 1 | 1213 | from pygametemplate.hotkey import Hotkey
class Console(object):
def __init__(self, game, *, toggle_fps_hotkey=None):
"""Create a Console instance.
`toggle_fps_hotkey` defaults to Ctrl+F.
"""
self.game = game
toggle_fps_hotkey = toggle_fps_hotkey or Hotkey(self.game, "f", ctrl=True)
self.font = self.game.pygame.font.SysFont("consolas", 15)
self.text_colour = (255, 255, 255) # white
self.show_fps = False
self.fps_coordinates = (game.width - self.font.size("FPS: 000")[0], 0)
self.hotkeys = { # (hotkey condition, function)
"toggle fps": (toggle_fps_hotkey.pressed, self.toggle_fps)
}
def logic(self):
for condition, function in self.hotkeys.values():
if condition():
function()
def draw(self):
if self.show_fps:
self.display_fps()
def toggle_fps(self):
self.show_fps = not self.show_fps
def display_fps(self):
fps_text = "FPS: {}".format(int(self.game.clock.get_fps()))
fps_image = self.font.render(fps_text, True, self.text_colour)
self.game.screen.blit(fps_image, self.fps_coordinates)
| mit | -2,817,762,012,081,443,000 | 30.102564 | 82 | 0.588623 | false |
CJnZ/Unblock-Youku | dns-reverse-proxy/url-list-manager.py | 10 | 5917 | # vim:fileencoding=utf-8:sw=4:et:syntax=python
# Update url_list online
https = require("https")
http = require("http")
fs = require("fs")
murl = require("url")
shared_urls = require("../shared/urls.js")
lutils = require("./lutils")
log = lutils.logger
def require_str(content, fname):
"""require() call using string content"""
Module = JS("module.constructor")
m = new Module()
m._compile(content, fname)
return m.exports
class RemoteRequire:
def __init__(self, headers):
"""require() call for remote javascript file. """
self.cache = {}
self.headers = {
"Accept": "*/*",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:29.0) Gecko/20100101 Firefox/29.0",
"Accept-encoding": "gzip,deflate",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "-1",
}
if headers:
for k in headers:
self.headers[k] = headers[k]
def require(self, uri, callback, force=False):
#log.info("cache: ", self.cache)
urlobj = murl.parse(uri)
options = {
hostname: urlobj.hostname,
Host: urlobj.host,
port: urlobj.port,
path: urlobj.path,
method: "GET",
}
#log.debug(options)
options.headers = self.headers
cinfo = self.cache[uri]
if not force and cinfo and cinfo["etag"]:
options.headers["If-None-Match"] = cinfo["etag"]
#req = http.request(options)
if urlobj.protocol == "https:":
req = https.request(options)
else:
req = http.request(options)
info = {
data: "",
changed: True,
fname: urlobj.pathname,
module: None,
etag: None,
}
def _on_data(chunk):
info.data += chunk
def _on_end():
if info.data.length is 0: return
self.load_data_info(uri, info, callback)
def _on_response(res):
#log.debug("RemoteRequire:", res.statusCode,
# res.req._headers, res.headers)
nonlocal info
if res.statusCode == 200:
output_stream = lutils.create_decompress_stream(res)
output_stream.on("data", _on_data)
output_stream.on("end", _on_end)
if res.headers["etag"]:
info["etag"] = res.headers["etag"]
elif res.statusCode == 304: # not changed
log.debug("RemoteRequire: Status 304")
info = self.cache[uri]
info.changed = False
self.load_data_info(uri, info, callback)
def _on_error(e):
log.error("RemoteRequire: ", e, uri)
req.on("response", _on_response)
req.on("error", _on_error)
req.end()
def load_data_info(self, uri, info, callback):
"""Load info as module and pass to callback.
@info: map with keys:
data: content of uri response as string
changed: if the uri changed, status code 304
fname: filename of uri
module: cached module
etag: etag of the uri response
"""
if info.changed:
mod = require_str(info.data, info.fname)
info.module = mod
del info.data
self.cache[uri] = info
if isinstance(callback, Function):
callback(info.module, info.changed)
class URLListReloader:
def __init__(self, extra_list_fname):
"""Reload url_list from remote host(github), periodically"""
self.url = "https://raw.githubusercontent.com/zhuzhuor/Unblock-Youku/master/shared/urls.js"
self.timeout = 12*60*60*1000 # 12 hours
#self.timeout = 30*1000 # debug
self.update_timer = None
self.extra_list_fname = extra_list_fname
self.rrequire = RemoteRequire()
def start(self, timeout):
"""start reload timer
@timeout: optional in millisec"""
timeout_l = timeout or self.timeout
def _on_interval():
self.do_reload()
self.update_timer = setInterval(_on_interval, timeout_l)
self.update_timer.unref()
def do_reload(self):
"""Reload urls.js from remote host"""
def _on_required(mod, changed):
log.debug("URLListReloader change status:", changed)
if changed:
log.info("URLListReloader: url_list changed",
Date().toString() )
shared_urls.url_list = mod.url_list
shared_urls.url_regex_list = mod.url_regex_list
shared_urls.url_whitelist = mod.url_whitelist
shared_urls.url_regex_whitelist = mod.url_regex_whitelist
exfname = self.extra_list_fname
if exfname and fs.existsSync(exfname):
lutils.load_extra_url_list(self.extra_list_fname)
self.rrequire.require(self.url, _on_required)
def stop(self):
clearInterval(self.update_timer)
def createURLListReloader(extra_list_fname):
rr = URLListReloader(extra_list_fname)
return rr
def main():
rl = createURLListReloader()
rl.start(10*1000)
rl.update_timer.ref()
def main2():
rr = RemoteRequire()
u = "https://raw.githubusercontent.com/zhuzhuor/Unblock-Youku/master/shared/urls.js"
#u = "http://slashdot.org/"
n = 0
def _on_load(mod, changed):
nonlocal n
n += 1
log.info("url_list:", mod.url_list, changed, n, Date().toString())
if n < 2:
rr.require(u, _on_load)
rr.require(u, _on_load)
if require.main is JS("module"):
main2()
exports.createURLListReloader = createURLListReloader
| agpl-3.0 | 7,555,295,378,061,583,000 | 31.872222 | 107 | 0.545209 | false |
TheWardoctor/Wardoctors-repo | plugin.video.saltsrd.lite/default.py | 1 | 142269 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import random
import sys
import os
import re
import datetime
import time
import shutil
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import json
import kodi
import log_utils
import utils
from url_dispatcher import URL_Dispatcher
from salts_lib.db_utils import DB_Connection, DatabaseRecoveryError
from salts_lib.srt_scraper import SRT_Scraper
from salts_lib.trakt_api import Trakt_API, TransientTraktError, TraktNotFoundError, TraktError, TraktAuthError
from salts_lib import salts_utils
from salts_lib import utils2
from salts_lib import gui_utils
from salts_lib import strings
from salts_lib import worker_pool
from salts_lib import image_scraper
from salts_lib.constants import * # @UnusedWildImport
from salts_lib.utils2 import i18n
from scrapers import * # import all scrapers into this namespace @UnusedWildImport
from scrapers import ScraperVideo
try:
import urlresolver
except:
kodi.notify(msg=i18n('smu_failed'), duration=5000)
logger = log_utils.Logger.get_logger()
TOKEN = kodi.get_setting('trakt_oauth_token')
use_https = kodi.get_setting('use_https') == 'true'
trakt_timeout = int(kodi.get_setting('trakt_timeout'))
list_size = int(kodi.get_setting('list_size'))
OFFLINE = kodi.get_setting('trakt_offline') == 'true'
trakt_api = Trakt_API(TOKEN, use_https, list_size, trakt_timeout, OFFLINE)
url_dispatcher = URL_Dispatcher()
@url_dispatcher.register(MODES.MAIN)
def main_menu():
db_connection.init_database(None)
if kodi.get_setting('auto-disable') != DISABLE_SETTINGS.OFF:
salts_utils.do_disable_check()
kodi.create_item({'mode': MODES.BROWSE, 'section': SECTIONS.MOVIES}, i18n('movies'), thumb=utils2.art('movies.png'), fanart=utils2.art('fanart.jpg'))
kodi.create_item({'mode': MODES.BROWSE, 'section': SECTIONS.TV}, i18n('tv_shows'), thumb=utils2.art('television.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('settings'): kodi.create_item({'mode': MODES.SETTINGS}, i18n('settings'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
if TOKEN:
profile = trakt_api.get_user_profile()
kodi.set_setting('trakt_user', '%s (%s)' % (profile['username'], profile['name']))
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.SETTINGS)
def settings_menu():
kodi.create_item({'mode': MODES.SCRAPERS}, i18n('scraper_sort_order'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
kodi.create_item({'mode': MODES.RES_SETTINGS}, i18n('url_resolver_settings'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
kodi.create_item({'mode': MODES.ADDON_SETTINGS}, i18n('addon_settings'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
# kodi.create_item({'mode': MODES.AUTO_CONF}, i18n('auto_config'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
kodi.create_item({'mode': MODES.RESET_BASE_URL}, i18n('reset_base_url'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
kodi.create_item({'mode': MODES.AUTH_TRAKT}, i18n('auth_salts'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
kodi.create_item({'mode': MODES.REPAIR_URLRESOLVER}, i18n('repair_urlresolver'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
kodi.create_item({'mode': MODES.SHOW_VIEWS}, i18n('set_default_views'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
kodi.create_item({'mode': MODES.BROWSE_URLS}, i18n('remove_cached_urls'), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
kodi.create_item({'mode': MODES.SETTINGS}, 'This is a \'Lite MOD\' of the original Salts (i\'m orphan, are you my new DEV?)', thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.BROWSE, ['section'])
def browse_menu(section):
section_params = utils2.get_section_params(section)
section_label = section_params['label_plural']
section_label2 = section_params['label_single']
if utils2.menu_on('trending'): kodi.create_item({'mode': MODES.TRENDING, 'section': section}, i18n('trending') % (section_label), thumb=utils2.art('trending.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('popular'): kodi.create_item({'mode': MODES.POPULAR, 'section': section}, i18n('popular') % (section_label), thumb=utils2.art('popular.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('anticipated'): kodi.create_item({'mode': MODES.ANTICIPATED, 'section': section}, i18n('anticipated') % (section_label), thumb=utils2.art('anticipated.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('recent'): kodi.create_item({'mode': MODES.RECENT, 'section': section}, i18n('recently_updated') % (section_label), thumb=utils2.art('recent.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('mosts'): kodi.create_item({'mode': MODES.MOSTS, 'section': section}, i18n('mosts') % (section_label2), thumb=utils2.art('mosts.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('genres'): kodi.create_item({'mode': MODES.GENRES, 'section': section}, i18n('genres'), thumb=utils2.art('genres.png'), fanart=utils2.art('fanart.jpg'))
add_section_lists(section)
if TOKEN:
if utils2.menu_on('on_deck'): kodi.create_item({'mode': MODES.SHOW_BOOKMARKS, 'section': section}, i18n('trakt_on_deck'), thumb=utils2.art('on_deck.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('recommended'): kodi.create_item({'mode': MODES.RECOMMEND, 'section': section}, i18n('recommended') % (section_label), thumb=utils2.art('recommended.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('collection'): add_refresh_item({'mode': MODES.SHOW_COLLECTION, 'section': section}, i18n('my_collection') % (section_label), utils2.art('collection.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('history'): kodi.create_item({'mode': MODES.SHOW_HISTORY, 'section': section}, i18n('watched_history'), thumb=utils2.art('watched_history.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('favorites'): kodi.create_item({'mode': MODES.SHOW_FAVORITES, 'section': section}, i18n('my_favorites'), thumb=utils2.art('my_favorites.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('subscriptions'): add_refresh_item({'mode': MODES.MANAGE_SUBS, 'section': section}, i18n('my_subscriptions'), utils2.art('my_subscriptions.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('watchlist'): add_refresh_item({'mode': MODES.SHOW_WATCHLIST, 'section': section}, i18n('my_watchlist'), utils2.art('my_watchlist.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('my_lists'): kodi.create_item({'mode': MODES.MY_LISTS, 'section': section}, i18n('my_lists'), thumb=utils2.art('my_lists.png'), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('liked_lists'): add_refresh_item({'mode': MODES.LIKED_LISTS, 'section': section}, i18n('liked_lists'), utils2.art('liked_lists.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('other_lists'): kodi.create_item({'mode': MODES.OTHER_LISTS, 'section': section}, i18n('other_lists'), thumb=utils2.art('other_lists.png'), fanart=utils2.art('fanart.jpg'))
if section == SECTIONS.TV:
if TOKEN:
if utils2.menu_on('progress'): add_refresh_item({'mode': MODES.SHOW_PROGRESS}, i18n('my_next_episodes'), utils2.art('my_progress.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('rewatch'): add_refresh_item({'mode': MODES.SHOW_REWATCH}, i18n('my_rewatches'), utils2.art('my_rewatch.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('my_cal'): add_refresh_item({'mode': MODES.MY_CAL}, i18n('my_calendar'), utils2.art('my_calendar.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('general_cal'): add_refresh_item({'mode': MODES.CAL}, i18n('general_calendar'), utils2.art('calendar.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('premiere_cal'): add_refresh_item({'mode': MODES.PREMIERES}, i18n('premiere_calendar'), utils2.art('premiere_calendar.png'), utils2.art('fanart.jpg'))
if utils2.menu_on('search'): kodi.create_item({'mode': MODES.SEARCH, 'section': section}, i18n('search'), thumb=utils2.art(section_params['search_img']), fanart=utils2.art('fanart.jpg'))
if utils2.menu_on('search'): add_search_item({'mode': MODES.RECENT_SEARCH, 'section': section}, i18n('recent_searches'), utils2.art(section_params['search_img']), MODES.CLEAR_RECENT)
if utils2.menu_on('search'): add_search_item({'mode': MODES.SAVED_SEARCHES, 'section': section}, i18n('saved_searches'), utils2.art(section_params['search_img']), MODES.CLEAR_SAVED)
if OFFLINE:
kodi.notify(msg='[COLOR blue]***[/COLOR][COLOR red] %s [/COLOR][COLOR blue]***[/COLOR]' % (i18n('trakt_api_offline')))
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.GENRES, ['section'])
def browse_genres(section):
for genre in trakt_api.get_genres(section):
if genre['slug'] == 'none': continue
kodi.create_item({'mode': MODES.SHOW_GENRE, 'genre': genre['slug'], 'section': section}, genre['name'], utils2.art('%s.png' % (genre['slug'])), fanart=utils2.art('fanart.jpg'))
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.SHOW_GENRE, ['genre', 'section'], ['page'])
def show_genre(genre, section, page=1):
filters = {'genres': genre}
genre_list = int(kodi.get_setting('%s_genre_list' % (section)))
if genre_list == GENRE_LIST.TRENDING:
list_data = trakt_api.get_trending(section, page, filters=filters)
elif genre_list == GENRE_LIST.POPULAR:
list_data = trakt_api.get_popular(section, page, filters=filters)
elif genre_list == GENRE_LIST.ANTICIPATED:
list_data = trakt_api.get_anticipated(section, page, filters=filters)
elif genre_list == GENRE_LIST.MOST_WATCHED_WEEK:
list_data = trakt_api.get_most_watched(section, 'weekly', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_WATCHED_MONTH:
list_data = trakt_api.get_most_watched(section, 'monthly', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_WATCHED_ALL:
list_data = trakt_api.get_most_watched(section, 'all', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_PLAYED_WEEK:
list_data = trakt_api.get_most_played(section, 'weekly', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_PLAYED_MONTH:
list_data = trakt_api.get_most_played(section, 'monthly', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_PLAYED_ALL:
list_data = trakt_api.get_most_played(section, 'all', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_COLLECTED_WEEK:
list_data = trakt_api.get_most_collected(section, 'weekly', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_COLLECTED_MONTH:
list_data = trakt_api.get_most_collected(section, 'monthly', page, filters=filters)
elif genre_list == GENRE_LIST.MOST_COLLECTED_ALL:
list_data = trakt_api.get_most_collected(section, 'all', page, filters=filters)
else:
logger.log('Unrecognized genre list: %s' % (genre_list), log_utils.LOGWARNING)
list_data = []
make_dir_from_list(section, list_data, query={'mode': MODES.SHOW_GENRE, 'genre': genre, 'section': section}, page=page)
@url_dispatcher.register(MODES.SHOW_BOOKMARKS, ['section'])
def view_bookmarks(section):
section_params = utils2.get_section_params(section)
for bookmark in trakt_api.get_bookmarks(section, full=True):
queries = {'mode': MODES.DELETE_BOOKMARK, 'bookmark_id': bookmark['id']}
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
menu_items = [(i18n('delete_bookmark'), runstring,)]
if bookmark['type'] == 'movie':
liz, liz_url = make_item(section_params, bookmark['movie'], menu_items=menu_items)
else:
liz, liz_url = make_episode_item(bookmark['show'], bookmark['episode'], show_subs=False, menu_items=menu_items)
label = liz.getLabel()
label = '%s - %s' % (bookmark['show']['title'], label)
liz.setLabel(label)
label = liz.getLabel()
pause_label = ''
if kodi.get_setting('trakt_bookmark') == 'true':
pause_label = '[COLOR blue]%.2f%%[/COLOR] %s ' % (bookmark['progress'], i18n('on'))
paused_at = time.strftime('%Y-%m-%d', time.localtime(utils.iso_2_utc(bookmark['paused_at'])))
pause_label += '[COLOR deeppink]%s[/COLOR]' % (utils2.make_day(paused_at, use_words=False))
label = '[%s] %s ' % (pause_label, label)
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False, totalItems=0)
content_type = CONTENT_TYPES.EPISODES if section == SECTIONS.TV else CONTENT_TYPES.MOVIES
kodi.set_view(content_type, True)
kodi.end_of_directory()
@url_dispatcher.register(MODES.DELETE_BOOKMARK, ['bookmark_id'])
def delete_bookmark(bookmark_id):
trakt_api.delete_bookmark(bookmark_id)
kodi.notify(msg=i18n('bookmark_deleted'))
kodi.refresh_container()
@url_dispatcher.register(MODES.SHOW_VIEWS)
def show_views():
for content_type in ['movies', 'tvshows', 'seasons', 'episodes', 'files']:
kodi.create_item({'mode': MODES.BROWSE_VIEW, 'content_type': content_type}, i18n('set_default_x_view') % (content_type.capitalize()),
thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'))
kodi.end_of_directory()
@url_dispatcher.register(MODES.BROWSE_VIEW, ['content_type'])
def browse_view(content_type):
kodi.create_item({'mode': MODES.SET_VIEW, 'content_type': content_type}, i18n('set_view_instr') % (content_type.capitalize()), thumb=utils2.art('settings.png'),
fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
kodi.set_view(content_type, True)
kodi.end_of_directory()
@url_dispatcher.register(MODES.SET_VIEW, ['content_type'])
def set_default_view(content_type):
current_view = kodi.get_current_view()
if current_view:
kodi.set_setting('%s_view' % (content_type), current_view)
view_name = xbmc.getInfoLabel('Container.Viewmode')
kodi.notify(msg=i18n('view_set') % (content_type.capitalize(), view_name))
@url_dispatcher.register(MODES.BROWSE_URLS)
def browse_urls():
urls = db_connection.get_all_urls(order_matters=True)
kodi.create_item({'mode': MODES.FLUSH_CACHE}, '***%s***' % (i18n('delete_cache')), thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
for url in urls:
if url[1]:
label = '%s (%s)' % (url[0], url[1])
else:
label = url[0]
kodi.create_item({'mode': MODES.DELETE_URL, 'url': url[0], 'data': url[1]}, label, thumb=utils2.art('settings.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
kodi.set_content(CONTENT_TYPES.FILES)
kodi.end_of_directory()
@url_dispatcher.register(MODES.DELETE_URL, ['url'], ['data'])
def delete_url(url, data=''):
db_connection.delete_cached_url(url, data)
kodi.refresh_container()
@url_dispatcher.register(MODES.RES_SETTINGS)
def resolver_settings():
urlresolver.display_settings()
@url_dispatcher.register(MODES.ADDON_SETTINGS)
def addon_settings():
kodi.show_settings()
@url_dispatcher.register(MODES.AUTH_TRAKT)
def auth_trakt():
utils.auth_trakt(Trakt_API, kodi.Translations(strings.STRINGS))
#@url_dispatcher.register(MODES.INSTALL_THEMES)
#def install_themepak():
# xbmc.executebuiltin('RunPlugin(plugin://script.salts.themepak)')
#@url_dispatcher.register(MODES.INSTALL_CACHE)
#def install_cache():
# xbmc.executebuiltin('RunPlugin(plugin://script.module.image_cache)')
@url_dispatcher.register(MODES.REPAIR_URLRESOLVER)
def repair_urlresolver():
try:
path = os.path.join(kodi.translate_path('special://home'), 'addons', 'script.module.urlresolver')
shutil.rmtree(path)
dlg = xbmcgui.Dialog()
dlg.ok(i18n('repair_urlresolver'), i18n('repair_line_1'))
except:
xbmc.executebuiltin('RunPlugin(plugin://script.module.urlresolver)')
@url_dispatcher.register(MODES.RESET_BASE_URL)
def reset_base_url():
with kodi.WorkingDialog():
utils2.reset_base_url()
kodi.notify(msg=i18n('reset_complete'))
@url_dispatcher.register(MODES.AUTO_CONF)
def auto_conf():
gui_utils.do_auto_config()
def add_section_lists(section):
main_list = []
main_str = kodi.get_setting('%s_main' % (section))
if main_str:
main_list = main_str.split('|')
other_dict = dict(('%s@%s' % (item[1], item[0]), item) for item in db_connection.get_other_lists(section))
if TOKEN:
lists_dict = dict((user_list['ids']['slug'], user_list) for user_list in trakt_api.get_lists())
for list_str in main_list:
if '@' not in list_str:
if TOKEN:
fake_list = {'name': list_str, 'ids': {'slug': list_str}}
user_list = lists_dict.get(list_str, fake_list)
add_list_item(section, user_list)
else:
other_list = other_dict.get(list_str, list(reversed(list_str.split('@'))))
#add_other_list_item(MODES.BROWSE, section, other_list)
def add_refresh_item(queries, label, thumb, fanart):
refresh_queries = {'mode': MODES.FORCE_REFRESH, 'refresh_mode': queries['mode']}
if 'section' in queries: refresh_queries.update({'section': queries['section']})
menu_items = [(i18n('force_refresh'), 'RunPlugin(%s)' % (kodi.get_plugin_url(refresh_queries)))]
kodi.create_item(queries, label, thumb=thumb, fanart=fanart, is_folder=True, menu_items=menu_items)
def add_search_item(queries, label, thumb, clear_mode):
menu_queries = {'mode': clear_mode, 'section': queries['section']}
menu_items = [(i18n('clear_all') % (label), 'RunPlugin(%s)' % (kodi.get_plugin_url(menu_queries)))]
kodi.create_item(queries, label, thumb=thumb, fanart=utils2.art('fanart.jpg'), is_folder=True, menu_items=menu_items)
@url_dispatcher.register(MODES.FORCE_REFRESH, ['refresh_mode'], ['section', 'slug', 'username'])
def force_refresh(refresh_mode, section=None, slug=None, username=None):
kodi.notify(msg=i18n('forcing_refresh'))
logger.log('Forcing refresh for mode: |%s|%s|%s|%s|' % (refresh_mode, section, slug, username), log_utils.LOGDEBUG)
now = datetime.datetime.now()
offset = int(kodi.get_setting('calendar-day'))
start_date = now + datetime.timedelta(days=offset)
start_date = datetime.datetime.strftime(start_date, '%Y-%m-%d')
if refresh_mode == MODES.SHOW_COLLECTION:
trakt_api.get_collection(section, cached=False)
elif refresh_mode == MODES.SHOW_PROGRESS:
try:
workers, _progress = get_progress(cached=False)
finally:
try: worker_pool.reap_workers(workers, None)
except: pass
elif refresh_mode == MODES.MY_CAL:
trakt_api.get_my_calendar(start_date, 8, cached=False)
elif refresh_mode == MODES.CAL:
trakt_api.get_calendar(start_date, 8, cached=False)
elif refresh_mode == MODES.PREMIERES:
trakt_api.get_premieres(start_date, 8, cached=False)
elif refresh_mode == MODES.SHOW_LIST:
get_list(section, slug, username, cached=False)
elif refresh_mode == MODES.SHOW_WATCHLIST:
get_list(section, WATCHLIST_SLUG, username, cached=False)
elif refresh_mode == MODES.MANAGE_SUBS:
slug = kodi.get_setting('%s_sub_slug' % (section))
if slug:
get_list(section, slug, username, cached=False)
elif refresh_mode == MODES.LIKED_LISTS:
trakt_api.get_liked_lists(cached=False)
elif refresh_mode == MODES.SHOW_REWATCH:
try:
workers, _rewatches = get_rewatches(cached=False)
finally:
try: worker_pool.reap_workers(workers, None)
except: pass
else:
logger.log('Force refresh on unsupported mode: |%s|' % (refresh_mode), log_utils.LOGWARNING)
return
logger.log('Force refresh complete: |%s|%s|%s|%s|' % (refresh_mode, section, slug, username), log_utils.LOGDEBUG)
kodi.notify(msg=i18n('force_refresh_complete'))
@url_dispatcher.register(MODES.MOSTS, ['section'])
def mosts_menu(section):
modes = [(MODES.PLAYED, 'most_played_%s'), (MODES.WATCHED, 'most_watched_%s'), (MODES.COLLECTED, 'most_collected_%s')]
for mode in modes:
for period in ['weekly', 'monthly', 'all']:
kodi.create_item({'mode': mode[0], 'section': section, 'period': period}, i18n(mode[1] % (period)), thumb=utils2.art('%s.png' % (mode[1] % (period))), fanart=utils2.art('fanart.jpg'))
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.PLAYED, ['mode', 'section', 'period'], ['page'])
@url_dispatcher.register(MODES.WATCHED, ['mode', 'section', 'period'], ['page'])
@url_dispatcher.register(MODES.COLLECTED, ['mode', 'section', 'period'], ['page'])
def browse_mosts(mode, section, period, page=1):
if mode == MODES.PLAYED:
items = trakt_api.get_most_played(section, period, page)
elif mode == MODES.WATCHED:
items = trakt_api.get_most_watched(section, period, page)
elif mode == MODES.COLLECTED:
items = trakt_api.get_most_collected(section, period, page)
make_dir_from_list(section, items, query={'mode': mode, 'section': section, 'period': period}, page=page)
@url_dispatcher.register(MODES.SCRAPERS)
def scraper_settings():
scrapers = salts_utils.relevant_scrapers(None, True, True)
if kodi.get_setting('toggle_enable') == 'true':
label = '**%s**' % (i18n('enable_all_scrapers'))
else:
label = '**%s**' % (i18n('disable_all_scrapers'))
kodi.create_item({'mode': MODES.TOGGLE_ALL}, label, thumb=utils2.art('scraper.png'), fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
COLORS = ['green', 'limegreen', 'greenyellow', 'yellowgreen', 'yellow', 'orange', 'darkorange', 'orangered', 'red', 'darkred']
fail_limit = int(kodi.get_setting('disable-limit'))
cur_failures = utils2.get_failures()
for i, cls in enumerate(scrapers):
name = cls.get_name()
label = '%s (Provides: %s)' % (name, str(list(cls.provides())).replace("'", ""))
if not utils2.scraper_enabled(name):
label = '[COLOR darkred]%s[/COLOR]' % (label)
toggle_label = i18n('enable_scraper')
else:
toggle_label = i18n('disable_scraper')
failures = cur_failures.get(cls.get_name(), 0)
if failures == -1:
failures = 'N/A'
index = 0
else:
index = min([(int(failures) * (len(COLORS) - 1) / fail_limit), len(COLORS) - 1])
label = '%s. %s [COLOR %s][FL: %s][/COLOR]:' % (i + 1, label, COLORS[index], failures)
menu_items = []
if i > 0:
queries = {'mode': MODES.MOVE_SCRAPER, 'name': name, 'direction': DIRS.UP, 'other': scrapers[i - 1].get_name()}
menu_items.append([i18n('move_up'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))])
if i < len(scrapers) - 1:
queries = {'mode': MODES.MOVE_SCRAPER, 'name': name, 'direction': DIRS.DOWN, 'other': scrapers[i + 1].get_name()}
menu_items.append([i18n('move_down'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))])
queries = {'mode': MODES.MOVE_TO, 'name': name}
menu_items.append([i18n('move_to'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))])
queries = {'mode': MODES.RESET_FAILS, 'name': name}
menu_items.append([i18n('reset_fails'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))])
queries = {'mode': MODES.RESET_REL_URLS, 'name': name}
menu_items.append([i18n('reset_rel_urls'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))])
queries = {'mode': MODES.TOGGLE_SCRAPER, 'name': name}
menu_items.append([toggle_label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))])
queries = {'mode': MODES.TOGGLE_SCRAPER, 'name': name}
kodi.create_item(queries, label, thumb=utils2.art('scraper.png'), fanart=utils2.art('fanart.jpg'), is_folder=False,
is_playable=False, menu_items=menu_items, replace_menu=True)
kodi.set_content(CONTENT_TYPES.FILES)
kodi.end_of_directory()
@url_dispatcher.register(MODES.RESET_REL_URLS, ['name'])
def reset_rel_urls(name):
db_connection.clear_scraper_related_urls(name)
kodi.notify(msg=i18n('scraper_url_reset') % (name))
@url_dispatcher.register(MODES.RESET_FAILS, ['name'])
def reset_fails(name):
failures = utils2.get_failures()
failures[name] = 0
utils2.store_failures(failures)
kodi.refresh_container()
@url_dispatcher.register(MODES.MOVE_TO, ['name'])
def move_to(name):
dialog = xbmcgui.Dialog()
sort_key = salts_utils.make_source_sort_key()
new_pos = dialog.numeric(0, i18n('new_pos') % (len(sort_key)))
if new_pos:
new_pos = int(new_pos)
old_key = sort_key[name]
new_key = -new_pos + 1
if (new_pos <= 0 or new_pos > len(sort_key)) or old_key == new_key:
return
for key in sort_key:
this_key = sort_key[key]
# moving scraper up
if new_key > old_key:
# move everything between the old and new down
if this_key > old_key and this_key <= new_key:
sort_key[key] -= 1
# moving scraper down
else:
# move everything between the old and new up
if this_key > new_key and this_key <= new_key:
sort_key[key] += 1
sort_key[name] = new_key
kodi.set_setting('source_sort_order', utils2.make_source_sort_string(sort_key))
kodi.refresh_container()
@url_dispatcher.register(MODES.MOVE_SCRAPER, ['name', 'direction', 'other'])
def move_scraper(name, direction, other):
sort_key = salts_utils.make_source_sort_key()
if direction == DIRS.UP:
sort_key[name] += 1
sort_key[other] -= 1
elif direction == DIRS.DOWN:
sort_key[name] -= 1
sort_key[other] += 1
kodi.set_setting('source_sort_order', utils2.make_source_sort_string(sort_key))
kodi.refresh_container()
@url_dispatcher.register(MODES.TOGGLE_ALL)
def toggle_scrapers():
cur_toggle = kodi.get_setting('toggle_enable')
scrapers = salts_utils.relevant_scrapers(None, True, True)
for scraper in scrapers:
kodi.set_setting('%s-enable' % (scraper.get_name()), cur_toggle)
new_toggle = 'false' if cur_toggle == 'true' else 'true'
kodi.set_setting('toggle_enable', new_toggle)
kodi.refresh_container()
@url_dispatcher.register(MODES.TOGGLE_SCRAPER, ['name'])
def toggle_scraper(name):
if utils2.scraper_enabled(name):
setting = 'false'
else:
setting = 'true'
kodi.set_setting('%s-enable' % (name), setting)
kodi.refresh_container()
@url_dispatcher.register(MODES.TRENDING, ['section'], ['page'])
def browse_trending(section, page=1):
list_data = trakt_api.get_trending(section, page)
make_dir_from_list(section, list_data, query={'mode': MODES.TRENDING, 'section': section}, page=page)
@url_dispatcher.register(MODES.POPULAR, ['section'], ['page'])
def browse_popular(section, page=1):
list_data = trakt_api.get_popular(section, page)
make_dir_from_list(section, list_data, query={'mode': MODES.POPULAR, 'section': section}, page=page)
@url_dispatcher.register(MODES.ANTICIPATED, ['section'], ['page'])
def browse_anticipated(section, page=1):
list_data = trakt_api.get_anticipated(section, page)
make_dir_from_list(section, list_data, query={'mode': MODES.ANTICIPATED, 'section': section}, page=page)
@url_dispatcher.register(MODES.RECENT, ['section'], ['page'])
def browse_recent(section, page=1):
now = datetime.datetime.now()
start_date = now - datetime.timedelta(days=7)
start_date = datetime.datetime.strftime(start_date, '%Y-%m-%d')
list_data = trakt_api.get_recent(section, start_date, page)
make_dir_from_list(section, list_data, query={'mode': MODES.RECENT, 'section': section}, page=page)
@url_dispatcher.register(MODES.RECOMMEND, ['section'])
def browse_recommendations(section):
list_data = trakt_api.get_recommendations(section)
make_dir_from_list(section, list_data)
@url_dispatcher.register(MODES.SHOW_HISTORY, ['section'], ['page'])
def show_history(section, page=1):
section_params = utils2.get_section_params(section)
history = trakt_api.get_history(section, full=True, page=page)
totalItems = len(history)
for item in history:
if section == SECTIONS.MOVIES:
item['movie']['watched'] = True
liz, liz_url = make_item(section_params, item['movie'])
else:
show = item['show']
item['episode']['watched'] = True
menu_items = []
queries = {'mode': MODES.SEASONS, 'trakt_id': show['ids']['trakt'], 'title': show['title'], 'year': show['year'], 'tvdb_id': show['ids']['tvdb']}
menu_items.append((i18n('browse_seasons'), 'Container.Update(%s)' % (kodi.get_plugin_url(queries))),)
liz, liz_url = make_episode_item(show, item['episode'], show_subs=False, menu_items=menu_items)
label = liz.getLabel()
label = '%s - %s' % (show['title'], label)
liz.setLabel(label)
label = liz.getLabel()
watched_at = time.strftime('%Y-%m-%d', time.localtime(utils.iso_2_utc(item['watched_at'])))
header = '[COLOR deeppink]%s[/COLOR]' % (utils2.make_day(watched_at, use_words=False))
label = '[%s] %s' % (header, label)
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False, totalItems=totalItems)
if page and totalItems >= int(kodi.get_setting('list_size')):
query = {'mode': MODES.SHOW_HISTORY, 'section': section, 'page': int(page) + 1}
label = '%s >>' % (i18n('next_page'))
kodi.create_item(query, label, thumb=utils2.art('nextpage.png'), fanart=utils2.art('fanart.jpg'), is_folder=True)
content_type = CONTENT_TYPES.EPISODES if section == SECTIONS.TV else CONTENT_TYPES.MOVIES
kodi.set_view(content_type, True)
kodi.end_of_directory()
@url_dispatcher.register(MODES.MY_CAL, ['mode'], ['start_date'])
@url_dispatcher.register(MODES.CAL, ['mode'], ['start_date'])
@url_dispatcher.register(MODES.PREMIERES, ['mode'], ['start_date'])
def browse_calendar(mode, start_date=None):
if start_date is None:
now = datetime.datetime.now()
offset = int(kodi.get_setting('calendar-day'))
start_date = now + datetime.timedelta(days=offset)
start_date = datetime.datetime.strftime(start_date, '%Y-%m-%d')
if mode == MODES.MY_CAL:
days = trakt_api.get_my_calendar(start_date, 8)
elif mode == MODES.CAL:
days = trakt_api.get_calendar(start_date, 8)
elif mode == MODES.PREMIERES:
days = trakt_api.get_premieres(start_date, 8)
make_dir_from_cal(mode, start_date, days)
@url_dispatcher.register(MODES.MY_LISTS, ['section'])
def browse_lists(section):
lists = trakt_api.get_lists()
lists.insert(0, {'name': 'watchlist', 'ids': {'slug': salts_utils.WATCHLIST_SLUG}})
total_items = len(lists)
for user_list in lists:
add_list_item(section, user_list, total_items)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
def add_list_item(section, user_list, total_items=0):
ids = user_list['ids']
menu_items = []
queries = {'mode': MODES.SET_FAV_LIST, 'slug': ids['slug'], 'section': section}
menu_items.append((i18n('set_fav_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_SUB_LIST, 'slug': ids['slug'], 'section': section}
menu_items.append((i18n('set_sub_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_REWATCH_LIST, 'slug': ids['slug'], 'section': SECTIONS.TV}
menu_items.append((i18n('set_rewatch_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.COPY_LIST, 'slug': COLLECTION_SLUG, 'section': section, 'target_slug': ids['slug']}
menu_items.append((i18n('import_collection'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.FORCE_REFRESH, 'refresh_mode': MODES.SHOW_LIST, 'section': section, 'slug': ids['slug']}
menu_items.append((i18n('force_refresh'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if ids['slug'] != salts_utils.WATCHLIST_SLUG:
if ids['slug'] in kodi.get_setting('%s_main' % (section)).split('|'):
label = i18n('remove_from_main')
action = 'remove'
else:
label = i18n('add_to_main')
action = 'add'
queries = {'mode': MODES.TOGGLE_TO_MENU, 'action': action, 'section': section, 'slug': ids['slug']}
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SHOW_LIST, 'section': section, 'slug': ids['slug']}
kodi.create_item(queries, user_list['name'], thumb=utils2.art('list.png'), fanart=utils2.art('fanart.jpg'), is_folder=True,
total_items=total_items, menu_items=menu_items, replace_menu=False)
@url_dispatcher.register(MODES.LIKED_LISTS, ['section'], ['page'])
def browse_liked_lists(section, page=1):
liked_lists = trakt_api.get_liked_lists(page=page)
total_items = len(liked_lists)
for liked_list in liked_lists:
list_item = (liked_list['list']['user']['username'], liked_list['list']['ids']['slug'])
add_other_list_item(MODES.LIKED_LISTS, section, list_item, total_items)
query = {'mode': MODES.LIKED_LISTS, 'section': section}
if query and page and total_items >= int(kodi.get_setting('list_size')):
query['page'] = int(page) + 1
label = '%s >>' % (i18n('next_page'))
kodi.create_item(query, label, thumb=utils2.art('nextpage.png'), fanart=utils2.art('fanart.jpg'), is_folder=True)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.OTHER_LISTS, ['section'])
def browse_other_lists(section):
kodi.create_item({'mode': MODES.ADD_OTHER_LIST, 'section': section}, i18n('add_other_list'), thumb=utils2.art('add_other.png'),
fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
lists = db_connection.get_other_lists(section)
total_items = len(lists)
for other_list in lists:
add_other_list_item(MODES.OTHER_LISTS, section, other_list, total_items)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
def add_other_list_item(mode, section, other_list, total_items=0):
try:
header = trakt_api.get_list_header(other_list[1], other_list[0], bool(TOKEN))
except (TraktNotFoundError, TraktAuthError) as e:
logger.log('List Access Failure: %s' % (e), log_utils.LOGWARNING)
header = None
if header:
if len(other_list) >= 3 and other_list[2]:
name = other_list[2]
else:
name = header['name']
else:
name = other_list[1]
menu_items = []
if header:
queries = {'mode': MODES.FORCE_REFRESH, 'refresh_mode': MODES.SHOW_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append((i18n('force_refresh'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.COPY_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append((i18n('copy_to_my_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
list_str = '%s@%s' % (other_list[1], other_list[0])
if list_str in kodi.get_setting('%s_main' % (section)).split('|'):
label = i18n('remove_from_main')
action = 'remove'
else:
label = i18n('add_to_main')
action = 'add'
queries = {'mode': MODES.TOGGLE_TO_MENU, 'action': action, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if mode == MODES.OTHER_LISTS:
queries = {'mode': MODES.ADD_OTHER_LIST, 'section': section, 'username': other_list[0]}
menu_items.append((i18n('add_more_from') % (other_list[0]), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.REMOVE_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append((i18n('remove_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.RENAME_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0], 'name': name}
menu_items.append((i18n('rename_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if header:
queries = {'mode': MODES.SHOW_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
else:
queries = {'mode': MODES.OTHER_LISTS, 'section': section}
label = '[[COLOR blue]%s[/COLOR]] %s' % (other_list[0], name)
kodi.create_item(queries, label, thumb=utils2.art('list.png'), fanart=utils2.art('fanart.jpg'), is_folder=True, total_items=total_items, menu_items=menu_items, replace_menu=True)
@url_dispatcher.register(MODES.TOGGLE_TO_MENU, ['action', 'section', 'slug'], ['username'])
def toggle_to_menu(action, section, slug, username=None):
if username is None:
list_str = slug
else:
list_str = '%s@%s' % (slug, username)
setting = '%s_main' % (section)
main_str = kodi.get_setting(setting)
if main_str:
main_list = main_str.split('|')
else:
main_list = []
if action == 'add':
main_list.append(list_str)
else:
for i, item in enumerate(main_list):
if item == list_str:
del main_list[i]
break
main_str = '|'.join(main_list)
kodi.set_setting(setting, main_str)
kodi.refresh_container()
@url_dispatcher.register(MODES.REMOVE_LIST, ['section', 'username', 'slug'])
def remove_list(section, username, slug):
db_connection.delete_other_list(section, username, slug)
kodi.refresh_container()
@url_dispatcher.register(MODES.RENAME_LIST, ['section', 'slug', 'username', 'name'])
def rename_list(section, slug, username, name):
new_name = kodi.get_keyboard(i18n('new_name_heading'), name)
if new_name is not None:
db_connection.rename_other_list(section, username, slug, new_name)
kodi.refresh_container()
@url_dispatcher.register(MODES.ADD_OTHER_LIST, ['section'], ['username'])
def add_other_list(section, username=None):
if username is None:
username = kodi.get_keyboard(i18n('username_list_owner'))
if username is not None:
slug = pick_list(None, section, username)
if slug:
db_connection.add_other_list(section, username, slug)
kodi.refresh_container()
def get_rewatches(cached=True):
rewatches = []
workers = []
list_data = get_list(SECTIONS.TV, kodi.get_setting('rewatch_slug'))
if list_data is not None:
begin = time.time()
history = dict((item['show']['ids']['trakt'], item) for item in trakt_api.get_watched(SECTIONS.TV, cached=cached))
list_order = dict((item['ids']['trakt'], i) for i, item in enumerate(list_data))
timeout = max_timeout = int(kodi.get_setting('trakt_timeout'))
try:
wp = worker_pool.WorkerPool()
list_size = len(list_data)
shows = {}
for show in list_data:
trakt_id = show['ids']['trakt']
plays = utils2.make_plays(history.get(trakt_id, {}))
wp.request(salts_utils.parallel_get_progress, [trakt_id, cached, None])
shows[trakt_id] = {'show': show, 'plays': plays}
while len(rewatches) < list_size:
try:
logger.log('Waiting on progress - Timeout: %s' % (timeout), log_utils.LOGDEBUG)
progress = wp.receive(timeout)
trakt_id = progress['trakt']
next_episode = utils2.get_next_rewatch(trakt_id, plays, progress)
show = shows[trakt_id]['show']
logger.log('Next Rewatch: %s (%s) - %s - %s' % (show['title'], show['year'], trakt_id, next_episode), log_utils.LOGDEBUG)
if next_episode:
rewatch = {'episode': next_episode}
rewatch.update(shows[trakt_id])
rewatches.append(rewatch)
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except worker_pool.Empty:
logger.log('Get Progress Process Timeout', log_utils.LOGWARNING)
timeout = True
break
else:
logger.log('All progress results received', log_utils.LOGDEBUG)
timeout = False
if timeout:
timeout_msg = i18n('progress_timeouts') % (list_size - len(rewatches), list_size)
kodi.notify(msg=timeout_msg, duration=5000)
logger.log(timeout_msg, log_utils.LOGWARNING)
rewatches.sort(key=lambda x: list_order[x['show']['ids']['trakt']])
finally:
workers = wp.close()
return workers, rewatches
@url_dispatcher.register(MODES.SHOW_REWATCH)
def show_rewatch():
slug = kodi.get_setting('rewatch_slug')
if not slug:
kodi.create_item({'mode': MODES.PICK_REWATCH_LIST, 'section': SECTIONS.TV}, i18n('pick_rewatch_list'), is_folder=False, is_playable=False)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
else:
try:
workers, rewatches = get_rewatches()
total_items = len(rewatches)
for rewatch in rewatches:
show = rewatch['show']
trakt_id = show['ids']['trakt']
plays = rewatch['plays']
next_episode = rewatch['episode']
episode = trakt_api.get_episode_details(trakt_id, next_episode['season'], next_episode['episode'])
episode['watched'] = plays.get(next_episode['season'], {}).get(next_episode['episode'], 0) > 0
menu_items = []
queries = {'mode': MODES.SEASONS, 'trakt_id': trakt_id, 'title': show['title'], 'year': show['year'], 'tvdb_id': show['ids']['tvdb']}
menu_items.append((i18n('browse_seasons'), 'Container.Update(%s)' % (kodi.get_plugin_url(queries))),)
label, new_method = utils2.get_next_rewatch_method(trakt_id)
queries = {'mode': MODES.MANAGE_REWATCH, 'trakt_id': trakt_id, 'new_method': new_method}
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if episode['watched']:
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': SECTIONS.TV, 'season': episode['season'], 'episode': episode['number'], 'watched': True}
queries.update(utils2.show_id(show))
menu_items.append((i18n('mark_as_watched'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
liz, liz_url = make_episode_item(show, episode, show_subs=False, menu_items=menu_items)
label = liz.getLabel()
label = '%s - %s' % (show['title'], label)
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False, totalItems=total_items)
kodi.set_content(CONTENT_TYPES.EPISODES)
kodi.end_of_directory(cache_to_disc=False)
finally:
try: worker_pool.reap_workers(workers, None)
except UnboundLocalError: pass
@url_dispatcher.register(MODES.MANAGE_REWATCH, ['trakt_id', 'new_method'])
def manage_rewatch(trakt_id, new_method):
min_list = utils2.get_min_rewatch_list()
max_list = utils2.get_max_rewatch_list()
if new_method == REWATCH_METHODS.LEAST_WATCHED:
if trakt_id not in min_list: min_list.append(trakt_id)
if trakt_id in max_list: max_list.remove(trakt_id)
elif new_method == REWATCH_METHODS.MOST_WATCHED:
if trakt_id in min_list: min_list.remove(trakt_id)
if trakt_id not in max_list: max_list.append(trakt_id)
else:
if trakt_id in min_list: min_list.remove(trakt_id)
if trakt_id in max_list: max_list.remove(trakt_id)
kodi.set_setting('rewatch_min_list', '|'.join(min_list))
kodi.set_setting('rewatch_max_list', '|'.join(max_list))
kodi.refresh_container()
@url_dispatcher.register(MODES.SHOW_LIST, ['section', 'slug'], ['username'])
def show_list(section, slug, username=None):
items = get_list(section, slug, username)
if items is not None:
make_dir_from_list(section, items, slug)
@url_dispatcher.register(MODES.SHOW_WATCHLIST, ['section'])
def show_watchlist(section):
show_list(section, salts_utils.WATCHLIST_SLUG)
@url_dispatcher.register(MODES.SHOW_COLLECTION, ['section'])
def show_collection(section):
items = trakt_api.get_collection(section)
sort_key = int(kodi.get_setting('sort_collection'))
if sort_key == 1:
items.reverse()
elif sort_key == 2:
items.sort(key=lambda x: utils2.title_key(x['title']))
elif sort_key == 3:
items.sort(key=lambda x: x['year'])
# hack aired_episodes to override w/ collected_episodes to workaround trakt.tv cache issue
if section == SECTIONS.TV:
for item in items:
collected_episodes = len([e for s in item['seasons'] if s['number'] != 0 for e in s['episodes']])
logger.log('%s/%s: Collected: %s - Aired: %s' % (item['ids']['trakt'], item['ids']['slug'], collected_episodes, item['aired_episodes']), log_utils.LOGDEBUG)
if collected_episodes > item['aired_episodes']:
item['aired_episodes'] = collected_episodes
make_dir_from_list(section, items, COLLECTION_SLUG)
def get_progress(cached=True):
if cached:
in_cache, result = db_connection.get_cached_function(get_progress.__name__, cache_limit=15 * 60)
if in_cache:
return [], utils2.sort_progress(result, sort_order=SORT_MAP[int(kodi.get_setting('sort_progress'))])
workers = []
episodes = []
with kodi.ProgressDialog(i18n('discover_mne'), background=True) as pd:
begin = time.time()
timeout = max_timeout = int(kodi.get_setting('trakt_timeout'))
pd.update(0, line1=i18n('retr_history'))
progress_list = trakt_api.get_watched(SECTIONS.TV, full=True, noseasons=True, cached=cached)
if kodi.get_setting('include_watchlist_next') == 'true':
pd.update(5, line1=i18n('retr_watchlist'))
watchlist = trakt_api.show_watchlist(SECTIONS.TV)
watchlist = [{'show': item} for item in watchlist]
progress_list += watchlist
pd.update(10, line1=i18n('retr_hidden'))
hidden = set([item['show']['ids']['trakt'] for item in trakt_api.get_hidden_progress(cached=cached)])
shows = {}
filter_list = set(utils2.get_progress_skip_list())
force_list = set(utils2.get_force_progress_list())
use_exclusion = kodi.get_setting('use_cached_exclusion') == 'true'
progress_size = len(progress_list)
try:
wp = worker_pool.WorkerPool(max_workers=50)
for i, show in enumerate(progress_list):
trakt_id = show['show']['ids']['trakt']
# skip hidden shows
if trakt_id in hidden:
continue
# skip cached ended 100% shows
if use_exclusion and str(trakt_id) in filter_list and str(trakt_id) not in force_list:
logger.log('Skipping %s (%s) as cached MNE ended exclusion' % (trakt_id, show['show']['title']), log_utils.LOGDEBUG)
continue
percent = (i + 1) * 25 / progress_size + 10
pd.update(percent, line1=i18n('req_progress') % (show['show']['title']))
wp.request(salts_utils.parallel_get_progress, [trakt_id, cached, .08])
shows[trakt_id] = show['show']
total_shows = len(shows)
progress_count = 0
while progress_count < total_shows:
try:
logger.log('Waiting for Progress - Timeout: %s' % (timeout), log_utils.LOGDEBUG)
progress = wp.receive(timeout)
progress_count += 1
trakt_id = progress['trakt']
show = shows[trakt_id]
percent = (progress_count * 65 / total_shows) + 35
pd.update(percent, line1=i18n('rec_progress') % (show['title']))
if 'next_episode' in progress and progress['next_episode']:
episode = {'show': show, 'episode': progress['next_episode']}
episode['last_watched_at'] = progress['last_watched_at']
episode['percent_completed'] = (progress['completed'] * 100) / progress['aired'] if progress['aired'] > 0 else 0
episode['completed'] = progress['completed']
episodes.append(episode)
else:
ended = show['status'] and show['status'].upper() == 'ENDED'
completed = progress['completed'] == progress['aired']
if ended and completed and str(trakt_id) not in filter_list and str(trakt_id) not in force_list:
logger.log('Adding %s (%s) (%s - %s) to MNE exclusion list' % (trakt_id, show['title'], progress['completed'], progress['aired']), log_utils.LOGDEBUG)
manage_progress_cache(ACTIONS.ADD, progress['trakt'])
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except worker_pool.Empty:
logger.log('Get Progress Process Timeout', log_utils.LOGWARNING)
timeout = True
break
else:
logger.log('All progress results received', log_utils.LOGDEBUG)
timeout = False
finally:
workers = wp.close()
if timeout:
timeouts = total_shows - progress_count
timeout_msg = i18n('progress_timeouts') % (timeouts, total_shows)
kodi.notify(msg=timeout_msg, duration=5000)
logger.log(timeout_msg, log_utils.LOGWARNING)
else:
# only cache the results if all results were successful
db_connection.cache_function(get_progress.__name__, result=episodes)
return workers, utils2.sort_progress(episodes, sort_order=SORT_MAP[int(kodi.get_setting('sort_progress'))])
@url_dispatcher.register(MODES.SHOW_PROGRESS)
def show_progress():
try:
workers, progress = get_progress()
for episode in progress:
logger.log('Episode: Sort Keys: Tile: |%s| Last Watched: |%s| Percent: |%s%%| Completed: |%s|' % (episode['show']['title'], episode['last_watched_at'], episode['percent_completed'], episode['completed']), log_utils.LOGDEBUG)
first_aired_utc = utils.iso_2_utc(episode['episode']['first_aired'])
if kodi.get_setting('show_unaired_next') == 'true' or first_aired_utc <= time.time():
show = episode['show']
date = utils2.make_day(utils2.make_air_date(episode['episode']['first_aired']))
if kodi.get_setting('mne_time') != '0':
date_time = '%s@%s' % (date, utils2.make_time(first_aired_utc, 'mne_time'))
else:
date_time = date
menu_items = []
queries = {'mode': MODES.SEASONS, 'trakt_id': show['ids']['trakt'], 'title': show['title'], 'year': show['year'], 'tvdb_id': show['ids']['tvdb']}
menu_items.append((i18n('browse_seasons'), 'Container.Update(%s)' % (kodi.get_plugin_url(queries))),)
liz, liz_url = make_episode_item(show, episode['episode'], show_subs=False, menu_items=menu_items)
label = liz.getLabel()
label = '[[COLOR deeppink]%s[/COLOR]] %s - %s' % (date_time, show['title'], label)
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
kodi.set_content(CONTENT_TYPES.EPISODES)
kodi.end_of_directory(cache_to_disc=False)
finally:
try: worker_pool.reap_workers(workers, None)
except UnboundLocalError: pass
@url_dispatcher.register(MODES.MANAGE_SUBS, ['section'])
def manage_subscriptions(section):
slug = kodi.get_setting('%s_sub_slug' % (section))
if slug:
next_run = salts_utils.get_next_run(MODES.UPDATE_SUBS)
label = i18n('update_subs')
if kodi.get_setting('auto-' + MODES.UPDATE_SUBS) == 'true':
color = 'green'
run_str = next_run.strftime("%Y-%m-%d %I:%M:%S %p")
else:
color = 'red'
run_str = i18n('disabled')
kodi.create_item({'mode': MODES.UPDATE_SUBS, 'section': section}, label % (color, run_str), thumb=utils2.art('update_subscriptions.png'),
fanart=utils2.art('fanart.jpg'), is_folder=False, is_playable=False)
if section == SECTIONS.TV:
kodi.create_item({'mode': MODES.CLEAN_SUBS}, i18n('cleanup_subs'), thumb=utils2.art('clean_up.png'), fanart=utils2.art('fanart.jpg'),
is_folder=False, is_playable=False)
show_pickable_list(slug, i18n('pick_sub_list'), MODES.PICK_SUB_LIST, section)
@url_dispatcher.register(MODES.SHOW_FAVORITES, ['section'])
def show_favorites(section):
slug = kodi.get_setting('%s_fav_slug' % (section))
show_pickable_list(slug, i18n('pick_fav_list'), MODES.PICK_FAV_LIST, section)
@url_dispatcher.register(MODES.PICK_SUB_LIST, ['mode', 'section'])
@url_dispatcher.register(MODES.PICK_FAV_LIST, ['mode', 'section'])
@url_dispatcher.register(MODES.PICK_REWATCH_LIST, ['mode', 'section'])
def pick_list(mode, section, username=None):
slug, _name = utils.choose_list(Trakt_API, kodi.Translations(strings.STRINGS), username)
if slug:
if mode == MODES.PICK_FAV_LIST:
set_list(MODES.SET_FAV_LIST, slug, section)
elif mode == MODES.PICK_SUB_LIST:
set_list(MODES.SET_SUB_LIST, slug, section)
elif mode == MODES.PICK_REWATCH_LIST:
set_list(MODES.SET_REWATCH_LIST, slug, SECTIONS.TV)
else:
return slug
kodi.refresh_container()
@url_dispatcher.register(MODES.SET_SUB_LIST, ['mode', 'slug', 'section'])
@url_dispatcher.register(MODES.SET_FAV_LIST, ['mode', 'slug', 'section'])
@url_dispatcher.register(MODES.SET_REWATCH_LIST, ['mode', 'slug', 'section'])
def set_list(mode, slug, section):
if mode == MODES.SET_FAV_LIST:
setting = '%s_fav_slug' % (section)
elif mode == MODES.SET_SUB_LIST:
setting = '%s_sub_slug' % (section)
elif mode == MODES.SET_REWATCH_LIST:
setting = 'rewatch_slug'
kodi.set_setting(setting, slug)
@url_dispatcher.register(MODES.SEARCH, ['section'])
def search(section, search_text=None): # @UnusedVariable
section_params = utils2.get_section_params(section)
heading = '%s %s' % (i18n('search'), section_params['label_plural'])
search_text = kodi.get_keyboard(heading)
if search_text == '':
kodi.notify(msg=i18n('blank_searches'), duration=5000)
elif search_text is not None:
salts_utils.keep_search(section, search_text)
queries = {'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': search_text}
plugin_url = kodi.get_plugin_url(queries)
kodi.update_container(plugin_url)
@url_dispatcher.register(MODES.RECENT_SEARCH, ['section'])
def recent_searches(section):
section_params = utils2.get_section_params(section)
head = int(kodi.get_setting('%s_search_head' % (section)))
for i in reversed(range(0, SEARCH_HISTORY)):
index = (i + head + 1) % SEARCH_HISTORY
search_text = db_connection.get_setting('%s_search_%s' % (section, index))
if not search_text:
break
menu_items = []
menu_queries = {'mode': MODES.SAVE_SEARCH, 'section': section, 'query': search_text}
menu_items.append((i18n('save_search'), 'RunPlugin(%s)' % (kodi.get_plugin_url(menu_queries))),)
menu_queries = {'mode': MODES.DELETE_RECENT, 'section': section, 'index': index}
menu_items.append((i18n('remove_from_recent'), 'RunPlugin(%s)' % (kodi.get_plugin_url(menu_queries))),)
queries = {'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': search_text}
label = '[%s %s] %s' % (section_params['label_single'], i18n('search'), search_text)
kodi.create_item(queries, label, thumb=utils2.art(section_params['search_img']), fanart=utils2.art('fanart.png'), is_folder=True, menu_items=menu_items)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.SAVED_SEARCHES, ['section'])
def saved_searches(section):
section_params = utils2.get_section_params(section)
for search in db_connection.get_searches(section, order_matters=True):
menu_items = []
refresh_queries = {'mode': MODES.DELETE_SEARCH, 'search_id': search[0]}
menu_items.append((i18n('delete_search'), 'RunPlugin(%s)' % (kodi.get_plugin_url(refresh_queries))),)
queries = {'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': search[1]}
label = '[%s %s] %s' % (section_params['label_single'], i18n('search'), search[1])
kodi.create_item(queries, label, thumb=utils2.art(section_params['search_img']), fanart=utils2.art('fanart.png'), is_folder=True, menu_items=menu_items)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
@url_dispatcher.register(MODES.CLEAR_RECENT, ['section'])
def clear_recent(section):
for i in range(0, SEARCH_HISTORY):
db_connection.set_setting('%s_search_%s' % (section, i), '')
kodi.notify(msg=i18n('recent_cleared'), duration=2500)
@url_dispatcher.register(MODES.DELETE_RECENT, ['section', 'index'])
def delete_recent(section, index):
index = int(index)
head = int(kodi.get_setting('%s_search_head' % (section)))
logger.log('Head is: %s' % (head), log_utils.LOGDEBUG)
for i in range(SEARCH_HISTORY, 0, -1):
pos = (i - 1 + index) % SEARCH_HISTORY
last_pos = (pos + 1) % SEARCH_HISTORY
if pos == head:
break
search_text = db_connection.get_setting('%s_search_%s' % (section, pos))
logger.log('Moving %s to position %s' % (search_text, last_pos), log_utils.LOGDEBUG)
db_connection.set_setting('%s_search_%s' % (section, last_pos), search_text)
logger.log('Blanking position %s' % (last_pos), log_utils.LOGDEBUG)
db_connection.set_setting('%s_search_%s' % (section, last_pos), '')
kodi.refresh_container()
@url_dispatcher.register(MODES.SAVE_SEARCH, ['section', 'query'])
def save_search(section, query):
db_connection.save_search(section, query)
@url_dispatcher.register(MODES.DELETE_SEARCH, ['search_id'])
def delete_search(search_id):
db_connection.delete_search(search_id)
kodi.refresh_container()
@url_dispatcher.register(MODES.CLEAR_SAVED, ['section'])
def clear_saved(section):
for search in db_connection.get_searches(section):
db_connection.delete_search(search[0])
kodi.notify(msg=i18n('saved_cleared'), duration=2500)
@url_dispatcher.register(MODES.SEARCH_RESULTS, ['section', 'query'], ['page'])
def search_results(section, query, page=1):
results = trakt_api.search(section, query, page)
make_dir_from_list(section, results, query={'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': query}, page=page)
@url_dispatcher.register(MODES.SEASONS, ['trakt_id', 'title', 'year'], ['tvdb_id'])
def browse_seasons(trakt_id, title, year, tvdb_id=None):
seasons = sorted(trakt_api.get_seasons(trakt_id), key=lambda x: x['number'])
info = {}
if TOKEN:
progress = trakt_api.get_show_progress(trakt_id, hidden=True, specials=True)
info = utils2.make_seasons_info(progress)
total_items = len(seasons)
for season in seasons:
if kodi.get_setting('show_season0') == 'true' or season['number'] != 0:
season_info = info.get(str(season['number']), {'season': season['number']})
liz = make_season_item(season, season_info, trakt_id, title, year, tvdb_id)
queries = {'mode': MODES.EPISODES, 'trakt_id': trakt_id, 'season': season['number'], 'random': time.time()}
kodi.add_item(queries, liz, is_folder=True, total_items=total_items)
kodi.set_view(CONTENT_TYPES.SEASONS, True)
kodi.end_of_directory()
@url_dispatcher.register(MODES.EPISODES, ['trakt_id', 'season'])
def browse_episodes(trakt_id, season):
show = trakt_api.get_show_details(trakt_id)
episodes = trakt_api.get_episodes(trakt_id, season)
if TOKEN:
progress = trakt_api.get_show_progress(trakt_id, hidden=True, specials=True)
episodes = utils2.make_episodes_watched(episodes, progress)
totalItems = len(episodes)
now = time.time()
for episode in episodes:
utc_air_time = utils.iso_2_utc(episode['first_aired'])
if kodi.get_setting('show_unaired') == 'true' or utc_air_time <= now:
if kodi.get_setting('show_unknown') == 'true' or utc_air_time:
liz, liz_url = make_episode_item(show, episode)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False, totalItems=totalItems)
kodi.set_view(CONTENT_TYPES.EPISODES, True)
kodi.end_of_directory()
@url_dispatcher.register(MODES.GET_SOURCES, ['mode', 'video_type', 'title', 'year', 'trakt_id'], ['season', 'episode', 'ep_title', 'ep_airdate'])
@url_dispatcher.register(MODES.SELECT_SOURCE, ['mode', 'video_type', 'title', 'year', 'trakt_id'], ['season', 'episode', 'ep_title', 'ep_airdate'])
@url_dispatcher.register(MODES.DOWNLOAD_SOURCE, ['mode', 'video_type', 'title', 'year', 'trakt_id'], ['season', 'episode', 'ep_title', 'ep_airdate'])
@url_dispatcher.register(MODES.AUTOPLAY, ['mode', 'video_type', 'title', 'year', 'trakt_id'], ['season', 'episode', 'ep_title', 'ep_airdate'])
def get_sources(mode, video_type, title, year, trakt_id, season='', episode='', ep_title='', ep_airdate=''):
cool_down_active = kodi.get_setting('cool_down') == 'true'
# if not salts_utils.is_salts() or cool_down_active:
# kodi.notify(msg=i18n('playback_limited'))
# return False
timeout = max_timeout = int(kodi.get_setting('source_timeout'))
if max_timeout == 0: timeout = None
max_results = int(kodi.get_setting('source_results'))
begin = time.time()
fails = set()
counts = {}
video = ScraperVideo(video_type, title, year, trakt_id, season, episode, ep_title, ep_airdate)
active = False if kodi.get_setting('pd_force_disable') == 'true' else True
cancelled = False
with kodi.ProgressDialog(i18n('getting_sources'), utils2.make_progress_msg(video), active=active) as pd:
try:
wp = worker_pool.WorkerPool()
scrapers = salts_utils.relevant_scrapers(video_type)
total_scrapers = len(scrapers)
for i, cls in enumerate(scrapers):
if pd.is_canceled(): return False
scraper = cls(max_timeout)
wp.request(salts_utils.parallel_get_sources, [scraper, video])
progress = i * 25 / total_scrapers
pd.update(progress, line2=i18n('requested_sources_from') % (cls.get_name()))
fails.add(cls.get_name())
counts[cls.get_name()] = 0
hosters = []
result_count = 0
while result_count < total_scrapers:
try:
logger.log('Waiting on sources - Timeout: %s' % (timeout), log_utils.LOGDEBUG)
result = wp.receive(timeout)
result_count += 1
hoster_count = len(result['hosters'])
counts[result['name']] = hoster_count
logger.log('Got %s Source Results from %s' % (hoster_count, result['name']), log_utils.LOGDEBUG)
progress = (result_count * 75 / total_scrapers) + 25
hosters += result['hosters']
fails.remove(result['name'])
if pd.is_canceled():
cancelled = True
break
if len(fails) > 5:
line3 = i18n('remaining_over') % (len(fails), total_scrapers)
else:
line3 = i18n('remaining_under') % (', '.join([name for name in fails]))
pd.update(progress, line2=i18n('received_sources_from') % (hoster_count, len(hosters), result['name']), line3=line3)
if max_results > 0 and len(hosters) >= max_results:
logger.log('Exceeded max results: %s/%s' % (max_results, len(hosters)), log_utils.LOGDEBUG)
fails = {}
break
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except worker_pool.Empty:
logger.log('Get Sources Scraper Timeouts: %s' % (', '.join(fails)), log_utils.LOGWARNING)
break
else:
logger.log('All source results received', log_utils.LOGDEBUG)
finally:
workers = wp.close()
try:
timeout_msg = ''
if not cancelled:
utils2.record_failures(fails, counts)
timeouts = len(fails)
if timeouts > 4:
timeout_msg = i18n('scraper_timeout') % (timeouts, total_scrapers)
elif timeouts > 0:
timeout_msg = i18n('scraper_timeout_list') % ('/'.join([name for name in fails]))
if not hosters:
logger.log('No Sources found for: |%s|' % (video), log_utils.LOGWARNING)
msg = i18n('no_sources')
msg += ' (%s)' % timeout_msg if timeout_msg else ''
kodi.notify(msg=msg, duration=5000)
return False
if timeout_msg:
kodi.notify(msg=timeout_msg, duration=7500)
if not fails: line3 = ' '
pd.update(100, line2=i18n('applying_source_filters'), line3=line3)
hosters = utils2.filter_exclusions(hosters)
hosters = utils2.filter_quality(video_type, hosters)
hosters = apply_urlresolver(hosters)
if kodi.get_setting('enable_sort') == 'true':
SORT_KEYS['source'] = salts_utils.make_source_sort_key()
hosters.sort(key=utils2.get_sort_key)
else:
random.shuffle(hosters)
local_hosters = []
for i, item in enumerate(hosters):
if isinstance(item['class'], local_scraper.Scraper):
local_hosters.append(item)
hosters[i] = None
hosters = local_hosters + [item for item in hosters if item is not None]
finally:
workers = worker_pool.reap_workers(workers)
try:
if not hosters:
logger.log('No Usable Sources found for: |%s|' % (video), log_utils.LOGDEBUG)
msg = ' (%s)' % timeout_msg if timeout_msg else ''
kodi.notify(msg=i18n('no_useable_sources') % (msg), duration=5000)
return False
pseudo_tv = xbmcgui.Window(10000).getProperty('PseudoTVRunning').lower()
if pseudo_tv == 'true' or (mode == MODES.GET_SOURCES and kodi.get_setting('auto-play') == 'true') or mode == MODES.AUTOPLAY:
auto_play_sources(hosters, video_type, trakt_id, season, episode)
else:
plugin_name = xbmc.getInfoLabel('Container.PluginName')
if kodi.get_setting('source-win') == 'Dialog':
stream_url, direct = pick_source_dialog(hosters)
return play_source(mode, stream_url, direct, video_type, trakt_id, season, episode)
else:
pick_source_dir(mode, hosters, video_type, trakt_id, season, episode)
finally:
try: worker_pool.reap_workers(workers, None)
except UnboundLocalError: pass
def apply_urlresolver(hosters):
filter_unusable = kodi.get_setting('filter_unusable') == 'true'
show_debrid = kodi.get_setting('show_debrid') == 'true'
if not filter_unusable and not show_debrid:
return hosters
debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]
filtered_hosters = []
debrid_hosts = {}
unk_hosts = {}
known_hosts = {}
for hoster in hosters:
if 'direct' in hoster and hoster['direct'] is False and hoster['host']:
host = hoster['host']
if filter_unusable:
if host in unk_hosts:
# logger.log('Unknown Hit: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
unk_hosts[host] += 1
continue
elif host in known_hosts:
# logger.log('Known Hit: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
known_hosts[host] += 1
filtered_hosters.append(hoster)
else:
hmf = urlresolver.HostedMediaFile(host=host, media_id='12345678901') # use dummy media_id to force host validation
if hmf:
# logger.log('Known Miss: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
known_hosts[host] = known_hosts.get(host, 0) + 1
filtered_hosters.append(hoster)
else:
# logger.log('Unknown Miss: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
unk_hosts[host] = unk_hosts.get(host, 0) + 1
continue
else:
filtered_hosters.append(hoster)
if host in debrid_hosts:
# logger.log('Debrid cache found for %s: %s' % (host, debrid_hosts[host]), log_utils.LOGDEBUG)
hoster['debrid'] = debrid_hosts[host]
else:
temp_resolvers = [resolver.name[:3].upper() for resolver in debrid_resolvers if resolver.valid_url('', host)]
# logger.log('%s supported by: %s' % (host, temp_resolvers), log_utils.LOGDEBUG)
debrid_hosts[host] = temp_resolvers
if temp_resolvers:
hoster['debrid'] = temp_resolvers
else:
filtered_hosters.append(hoster)
logger.log('Discarded Hosts: %s' % (sorted(unk_hosts.items(), key=lambda x: x[1], reverse=True)), log_utils.LOGDEBUG)
return filtered_hosters
@url_dispatcher.register(MODES.RESOLVE_SOURCE, ['mode', 'class_url', 'direct', 'video_type', 'trakt_id', 'class_name'], ['season', 'episode'])
@url_dispatcher.register(MODES.DIRECT_DOWNLOAD, ['mode', 'class_url', 'direct', 'video_type', 'trakt_id', 'class_name'], ['season', 'episode'])
def resolve_source(mode, class_url, direct, video_type, trakt_id, class_name, season='', episode=''):
for cls in salts_utils.relevant_scrapers(video_type):
if cls.get_name() == class_name:
scraper_instance = cls()
break
else:
logger.log('Unable to locate scraper with name: %s' % (class_name), log_utils.LOGWARNING)
return False
hoster_url = scraper_instance.resolve_link(class_url)
if mode == MODES.DIRECT_DOWNLOAD:
kodi.end_of_directory()
return play_source(mode, hoster_url, direct, video_type, trakt_id, season, episode)
@url_dispatcher.register(MODES.PLAY_TRAILER, ['stream_url'])
def play_trailer(stream_url):
xbmc.Player().play(stream_url)
def download_subtitles(language, title, year, season, episode):
srt_scraper = SRT_Scraper()
tvshow_id = srt_scraper.get_tvshow_id(title, year)
if tvshow_id is None:
return
subs = srt_scraper.get_episode_subtitles(language, tvshow_id, season, episode)
sub_labels = [utils2.format_sub_label(sub) for sub in subs]
index = 0
if len(sub_labels) > 1 and kodi.get_setting('subtitle-autopick') == 'false':
dialog = xbmcgui.Dialog()
index = dialog.select(i18n('choose_subtitle'), sub_labels)
if subs and index > -1:
return srt_scraper.download_subtitle(subs[index]['url'])
def play_source(mode, hoster_url, direct, video_type, trakt_id, season='', episode=''):
if hoster_url is None:
if direct is not None:
kodi.notify(msg=i18n('resolve_failed') % (i18n('no_stream_found')), duration=7500)
return False
with kodi.WorkingDialog() as wd:
if direct:
logger.log('Treating hoster_url as direct: %s' % (hoster_url), log_utils.LOGDEBUG)
stream_url = hoster_url
else:
wd.update(25)
hmf = urlresolver.HostedMediaFile(url=hoster_url)
if not hmf:
logger.log('Indirect hoster_url not supported by urlresolver: %s' % (hoster_url), log_utils.LOGDEBUG)
stream_url = hoster_url
else:
try:
stream_url = hmf.resolve()
if not stream_url or not isinstance(stream_url, basestring):
try: msg = stream_url.msg
except: msg = hoster_url
raise Exception(msg)
except Exception as e:
try: msg = str(e)
except: msg = hoster_url
kodi.notify(msg=i18n('resolve_failed') % (msg), duration=7500)
return False
wd.update(50)
resume_point = 0
pseudo_tv = xbmcgui.Window(10000).getProperty('PseudoTVRunning').lower()
if pseudo_tv != 'true' and mode not in [MODES.DOWNLOAD_SOURCE, MODES.DIRECT_DOWNLOAD]:
if salts_utils.bookmark_exists(trakt_id, season, episode):
if salts_utils.get_resume_choice(trakt_id, season, episode):
resume_point = salts_utils.get_bookmark(trakt_id, season, episode)
logger.log('Resume Point: %s' % (resume_point), log_utils.LOGDEBUG)
with kodi.WorkingDialog() as wd:
from_library = xbmc.getInfoLabel('Container.PluginName') == ''
wd.update(50)
win = xbmcgui.Window(10000)
win.setProperty('salts.playing', 'True')
win.setProperty('salts.playing.trakt_id', str(trakt_id))
win.setProperty('salts.playing.season', str(season))
win.setProperty('salts.playing.episode', str(episode))
win.setProperty('salts.playing.library', str(from_library))
if resume_point > 0:
if kodi.get_setting('trakt_bookmark') == 'true':
win.setProperty('salts.playing.trakt_resume', str(resume_point))
else:
win.setProperty('salts.playing.salts_resume', str(resume_point))
art = {'thumb': '', 'fanart': ''}
info = {}
show_meta = {}
try:
if video_type == VIDEO_TYPES.EPISODE:
path = kodi.get_setting('tv-download-folder')
file_name = utils2.filename_from_title(trakt_id, VIDEO_TYPES.TVSHOW)
file_name = file_name % ('%02d' % int(season), '%02d' % int(episode))
ep_meta = trakt_api.get_episode_details(trakt_id, season, episode)
show_meta = trakt_api.get_show_details(trakt_id)
win.setProperty('script.trakt.ids', json.dumps(show_meta['ids']))
people = trakt_api.get_people(SECTIONS.TV, trakt_id) if kodi.get_setting('include_people') == 'true' else None
info = salts_utils.make_info(ep_meta, show_meta, people)
art = image_scraper.get_images(VIDEO_TYPES.EPISODE, show_meta['ids'], season, episode)
path = make_path(path, VIDEO_TYPES.TVSHOW, show_meta['title'], season=season)
file_name = utils2.filename_from_title(show_meta['title'], VIDEO_TYPES.TVSHOW)
file_name = file_name % ('%02d' % int(season), '%02d' % int(episode))
else:
path = kodi.get_setting('movie-download-folder')
file_name = utils2.filename_from_title(trakt_id, video_type)
movie_meta = trakt_api.get_movie_details(trakt_id)
win.setProperty('script.trakt.ids', json.dumps(movie_meta['ids']))
people = trakt_api.get_people(SECTIONS.MOVIES, trakt_id) if kodi.get_setting('include_people') == 'true' else None
info = salts_utils.make_info(movie_meta, people=people)
art = image_scraper.get_images(VIDEO_TYPES.MOVIE, movie_meta['ids'])
path = make_path(path, video_type, movie_meta['title'], movie_meta['year'])
file_name = utils2.filename_from_title(movie_meta['title'], video_type, movie_meta['year'])
except TransientTraktError as e:
logger.log('During Playback: %s' % (str(e)), log_utils.LOGWARNING) # just log warning if trakt calls fail and leave meta and art blank
wd.update(75)
if mode in [MODES.DOWNLOAD_SOURCE, MODES.DIRECT_DOWNLOAD]:
utils.download_media(stream_url, path, file_name, kodi.Translations(strings.STRINGS))
return True
with kodi.WorkingDialog() as wd:
wd.update(75)
if video_type == VIDEO_TYPES.EPISODE and utils2.srt_download_enabled() and show_meta:
srt_path = download_subtitles(kodi.get_setting('subtitle-lang'), show_meta['title'], show_meta['year'], season, episode)
if utils2.srt_show_enabled() and srt_path:
logger.log('Setting srt path: %s' % (srt_path), log_utils.LOGDEBUG)
win.setProperty('salts.playing.srt', srt_path)
listitem = xbmcgui.ListItem(path=stream_url, iconImage=art['thumb'], thumbnailImage=art['thumb'])
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass
listitem.setPath(stream_url)
listitem.setInfo('video', info)
wd.update(100)
if mode == MODES.RESOLVE_SOURCE or from_library or utils2.from_playlist():
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
else:
xbmc.Player().play(stream_url, listitem)
return True
def auto_play_sources(hosters, video_type, trakt_id, season, episode):
total_hosters = len(hosters)
active = False if kodi.get_setting('pd_force_disable') == 'true' else True
with kodi.ProgressDialog(i18n('trying_autoplay'), active=active) as pd:
prev = ''
for i, item in enumerate(hosters):
if item['multi-part']:
continue
percent = i * 100 / total_hosters
current = i18n('trying_source') % (item['quality'], item['host'], item['class'].get_name())
pd.update(percent, current, prev)
if pd.is_canceled(): return False
hoster_url = item['class'].resolve_link(item['url'])
logger.log('Auto Playing: %s' % (hoster_url), log_utils.LOGDEBUG)
if play_source(MODES.GET_SOURCES, hoster_url, item['direct'], video_type, trakt_id, season, episode):
return True
if pd.is_canceled(): return False
prev = i18n('failed_source') % (item['quality'], item['host'], item['class'].get_name())
else:
msg = i18n('all_sources_failed')
logger.log(msg, log_utils.LOGERROR)
kodi.notify(msg=msg, duration=5000)
def pick_source_dialog(hosters):
for item in hosters:
if item['multi-part']:
continue
item['label'] = utils2.format_source_label(item)
dialog = xbmcgui.Dialog()
index = dialog.select(i18n('choose_stream'), [item['label'] for item in hosters if 'label' in item])
if index > -1:
try:
hoster = hosters[index]
if hoster['url']:
hoster_url = hoster['class'].resolve_link(hoster['url'])
logger.log('Attempting to play url: %s as direct: %s from: %s' % (hoster_url, hoster['direct'], hoster['class'].get_name()), log_utils.LOGNOTICE)
return hoster_url, hoster['direct']
except Exception as e:
logger.log('Error (%s) while trying to resolve %s' % (str(e), hoster['url']), log_utils.LOGERROR)
return None, None
def pick_source_dir(prev_mode, hosters, video_type, trakt_id, season, episode):
db_connection.cache_sources(hosters)
queries = {'mode': MODES.BUILD_SOURCE_DIR, 'prev_mode': prev_mode, 'video_type': video_type, 'trakt_id': trakt_id, 'season': season, 'episode': episode}
plugin_url = kodi.get_plugin_url(queries)
kodi.update_container(plugin_url)
@url_dispatcher.register(MODES.BUILD_SOURCE_DIR, ['prev_mode', 'video_type', 'trakt_id'], ['season', 'episode'])
def build_source_dir(prev_mode, video_type, trakt_id, season='', episode=''):
if prev_mode == MODES.DOWNLOAD_SOURCE:
next_mode = MODES.DIRECT_DOWNLOAD
playable = False
else:
next_mode = MODES.RESOLVE_SOURCE
playable = True
scrapers = salts_utils.relevant_scrapers(video_type, as_dict=True)
hosters = db_connection.get_cached_sources()
hosters_len = len(hosters)
for item in hosters:
if item['name'] in scrapers:
item['class'] = scrapers[item['name']]()
else:
logger.log('Skipping hoster with unknown name: %s' % (item))
continue
if item['multi-part']:
continue
menu_items = []
item['label'] = utils2.format_source_label(item)
queries = {'mode': MODES.SET_VIEW, 'content_type': CONTENT_TYPES.FILES}
menu_items.append((i18n('set_as_sources_view'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if next_mode == MODES.RESOLVE_SOURCE:
queries = {'mode': MODES.DIRECT_DOWNLOAD, 'class_url': item['url'], 'direct': item['direct'], 'video_type': video_type, 'trakt_id': trakt_id,
'season': season, 'episode': episode, 'class_name': item['class'].get_name()}
menu_items.append((i18n('download_source'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': next_mode, 'class_url': item['url'], 'direct': item['direct'], 'video_type': video_type, 'trakt_id': trakt_id,
'season': season, 'episode': episode, 'class_name': item['class'].get_name()}
kodi.create_item(queries, utils2.cleanse_title(item['label']), is_folder=False, is_playable=playable, total_items=hosters_len, menu_items=menu_items)
kodi.set_view(CONTENT_TYPES.FILES, True)
kodi.end_of_directory()
@url_dispatcher.register(MODES.SET_URL_MANUAL, ['mode', 'video_type', 'title', 'year', 'trakt_id'], ['season', 'episode', 'ep_title', 'ep_airdate'])
@url_dispatcher.register(MODES.SET_URL_SEARCH, ['mode', 'video_type', 'title', 'year', 'trakt_id'], ['season', 'episode', 'ep_title', 'ep_airdate'])
def set_related_url(mode, video_type, title, year, trakt_id, season='', episode='', ep_title='', ep_airdate=''):
try:
video = ScraperVideo(video_type, title, year, trakt_id, season, episode, ep_title, ep_airdate)
workers, related_list = get_related_urls(video)
while True:
dialog = xbmcgui.Dialog()
if mode == MODES.SET_URL_SEARCH:
select_list = [('***%s' % (i18n('manual_search_all')))]
adjustment = 2
else:
adjustment = 1
select_list = []
select_list += ['***%s' % (i18n('rescrape_all'))]
select_list += [related['label'] for related in related_list]
index = dialog.select(i18n('url_to_change') % (video_type), select_list)
if index == 0:
if mode == MODES.SET_URL_SEARCH:
related_list = sru_search_all(video, related_list)
else:
related_list = reset_all_urls(video, related_list)
elif index == 1 and mode == MODES.SET_URL_SEARCH:
related_list = reset_all_urls(video, related_list)
elif index > adjustment - 1:
index = index - adjustment
if mode == MODES.SET_URL_MANUAL:
related = related_list[index]
heading = i18n('rel_url_at') % (video_type, related['name'])
new_url = kodi.get_keyboard(heading, related['url'])
if new_url is not None:
salts_utils.update_url(video, related['name'], related['url'], new_url)
kodi.notify(msg=i18n('rel_url_set') % (related['name']), duration=5000)
related['label'] = '[%s] %s' % (related['name'], new_url)
elif mode == MODES.SET_URL_SEARCH:
sru_search(video, related_list[index])
else:
break
finally:
try: worker_pool.reap_workers(workers, None)
except UnboundLocalError: pass
def sru_search_all(video, related_list):
blank_list = [related for related in related_list if not related['url']]
if not blank_list: return related_list
temp_title, temp_year, temp_season = get_search_fields(video.video_type, video.title, video.year, video.season)
timeout = max_timeout = int(kodi.get_setting('source_timeout'))
if max_timeout == 0: timeout = None
begin = time.time()
with kodi.ProgressDialog(i18n('set_related_url'), utils2.make_progress_msg(video)) as pd:
try:
wp = worker_pool.WorkerPool()
total_scrapers = len(blank_list)
for i, related in enumerate(blank_list):
logger.log('Searching for: |%s|%s|%s|%s|' % (related['name'], temp_title, temp_year, temp_season), log_utils.LOGDEBUG)
wp.request(salts_utils.parallel_search, [related['class'], video.video_type, temp_title, temp_year, temp_season])
progress = i * 50 / total_scrapers
pd.update(progress, line2=i18n('req_result') % (related['name']))
fails = set([item['name'] for item in blank_list])
result_count = 0
while result_count < total_scrapers:
try:
logger.log('Waiting for Urls - Timeout: %s' % (timeout), log_utils.LOGDEBUG)
results = wp.receive(timeout)
fails.remove(results['name'])
result_count += 1
logger.log('Got result: %s' % (results), log_utils.LOGDEBUG)
if results['results']:
for i, item in enumerate(related_list):
if item['name'] == results['name']:
first = results['results'][0]
salts_utils.update_url(video, item['name'], item['url'], first['url'])
item['url'] = first['url']
item['label'] = '[%s] %s' % (item['name'], first['url'])
progress = (result_count * 50 / total_scrapers) + 50
if len(fails) > 5:
line3 = i18n('remaining_over') % (len(fails), total_scrapers)
else:
line3 = i18n('remaining_under') % (', '.join(fails))
pd.update(progress, line2=i18n('recv_result') % (results['name']), line3=line3)
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except worker_pool.Empty:
logger.log('Get Url Timeout', log_utils.LOGWARNING)
break
else:
logger.log('All source results received', log_utils.LOGDEBUG)
finally:
workers = wp.close()
salts_utils.record_sru_failures(fails, total_scrapers, related_list)
worker_pool.reap_workers(workers, None)
return related_list
def reset_all_urls(video, related_list):
for related in related_list:
salts_utils.update_url(video, related['name'], related['url'], '')
try:
workers, related_list = get_related_urls(video)
return related_list
finally:
try: worker_pool.reap_workers(workers, None)
except UnboundLocalError: pass
def get_related_urls(video):
timeout = max_timeout = int(kodi.get_setting('source_timeout'))
if max_timeout == 0: timeout = None
begin = time.time()
with kodi.ProgressDialog(i18n('set_related_url'), utils2.make_progress_msg(video)) as pd:
try:
wp = worker_pool.WorkerPool()
scrapers = salts_utils.relevant_scrapers(video.video_type, order_matters=True)
related_list = []
total_scrapers = len(scrapers)
for i, cls in enumerate(scrapers):
scraper = cls(max_timeout)
wp.request(salts_utils.parallel_get_url, [scraper, video])
related_list.append({'class': scraper, 'url': '', 'name': cls.get_name(), 'label': '[%s]' % (cls.get_name())})
progress = i * 50 / total_scrapers
pd.update(progress, line2=i18n('req_result') % (cls.get_name()))
fails = set([item['name'] for item in related_list])
result_count = 0
while result_count < total_scrapers:
try:
logger.log('Waiting for Urls - Timeout: %s' % (timeout), log_utils.LOGDEBUG)
result = wp.receive(timeout)
result_count += 1
logger.log('Got result: %s' % (result), log_utils.LOGDEBUG)
for i, item in enumerate(related_list):
if item['name'] == result['name']:
related_list[i] = result
fails.remove(result['name'])
progress = (result_count * 50 / total_scrapers) + 50
if len(fails) > 5:
line3 = i18n('remaining_over') % (len(fails), total_scrapers)
else:
line3 = i18n('remaining_under') % (', '.join(fails))
pd.update(progress, line2=i18n('recv_result') % (result['name']), line3=line3)
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except worker_pool.Empty:
logger.log('Get Url Timeout', log_utils.LOGWARNING)
break
else:
logger.log('All source results received', log_utils.LOGDEBUG)
finally:
workers = wp.close()
salts_utils.record_sru_failures(fails, total_scrapers, related_list)
return workers, related_list
def sru_search(video, related):
temp_title, temp_year, temp_season = video.title, video.year, video.season
while True:
dialog = xbmcgui.Dialog()
choices = [i18n('manual_search'), '[COLOR green]%s[/COLOR]' % (i18n('force_no_match'))]
try:
logger.log('Searching for: |%s|%s|%s|' % (temp_title, temp_year, temp_season), log_utils.LOGDEBUG)
results = related['class'].search(video.video_type, temp_title, temp_year, temp_season)
choices += ['%s (%s)' % (result['title'], result['year']) if result['year'] else result['title'] for result in results]
results_index = dialog.select(i18n('select_related'), choices)
if results_index == 0:
temp_title, temp_year, temp_season = get_search_fields(video.video_type, temp_title, temp_year, temp_season)
elif results_index >= 1:
if results_index == 1:
salts_utils.update_url(video, related['name'], related['url'], FORCE_NO_MATCH)
related['label'] = '[%s] [COLOR green]%s[/COLOR]' % (related['name'], i18n('force_no_match'))
else:
salts_utils.update_url(video, related['name'], related['url'], results[results_index - 2]['url'])
related['label'] = '[%s] %s' % (related['name'], results[results_index - 2]['url'])
kodi.notify(msg=i18n('rel_url_set') % (related['name']), duration=5000)
break
else:
break
except NotImplementedError:
logger.log('%s Scraper does not support searching.' % (related['class'].get_name()), log_utils.LOGDEBUG)
kodi.notify(msg=i18n('scraper_no_search'), duration=5000)
break
def get_search_fields(video_type, search_title, search_year, search_season):
text = search_title
if search_year: text = '%s (%s)' % (text, search_year)
if video_type == VIDEO_TYPES.SEASON and search_season:
text += ' Season %s' % (search_season)
search_text = kodi.get_keyboard(i18n('enter_search'), text)
if search_text is not None:
match = re.match('(.*?)\(?(\d{4})\)?', search_text)
if match:
search_title, search_year = match.groups()
search_title = search_title.strip()
else:
search_title = search_text
search_year = ''
match = re.search('Season\s+(\d+)', search_text)
try: search_season = match.group(1)
except: search_season = ''
return search_title, search_year, search_season
@url_dispatcher.register(MODES.RATE, ['section', 'id_type', 'show_id'], ['season', 'episode'])
def rate_media(section, id_type, show_id, season='', episode=''):
# disabled until fixes for rating are made in official addon
if id_type == 'imdb' and xbmc.getCondVisibility('System.HasAddon(script.trakt)'):
run = 'RunScript(script.trakt, action=rate, media_type=%s, remoteid=%s'
if section == SECTIONS.MOVIES:
run = (run + ')') % ('movie', show_id)
else:
if season and episode:
run = (run + ', season=%s, episode=%s)') % ('episode', show_id, season, episode)
elif season:
run = (run + ', season=%s)') % ('season', show_id, season)
else:
run = (run + ')') % ('show', show_id)
xbmc.executebuiltin(run)
else:
item = {id_type: show_id}
while True:
rating = kodi.get_keyboard(i18n('enter_rating'))
if rating is not None:
rating = rating.lower()
if rating in ['unrate'] + [str(i) for i in range(1, 11)]:
break
else:
return
if rating == 'unrate': rating = None
trakt_api.rate(section, item, rating, season, episode)
@url_dispatcher.register(MODES.EDIT_TVSHOW_ID, ['title'], ['year'])
def edit_tvshow_id(title, year=''):
tvshow_id = SRT_Scraper().get_tvshow_id(title, year)
new_id = kodi.get_keyboard(i18n('input_tvshow_id'), tvshow_id)
if new_id is not None:
db_connection.set_related_url(VIDEO_TYPES.TVSHOW, title, year, SRT_SOURCE, new_id)
@url_dispatcher.register(MODES.REM_FROM_LIST, ['slug', 'section', 'id_type', 'show_id'])
def remove_from_list(slug, section, id_type, show_id):
item = {'type': TRAKT_SECTIONS[section][:-1], id_type: show_id}
remove_many_from_list(section, item, slug)
kodi.refresh_container()
def remove_many_from_list(section, items, slug):
if slug == utils.WATCHLIST_SLUG:
response = trakt_api.remove_from_watchlist(section, items)
else:
response = trakt_api.remove_from_list(section, slug, items)
return response
@url_dispatcher.register(MODES.ADD_TO_COLL, ['mode', 'section', 'id_type', 'show_id'])
@url_dispatcher.register(MODES.REM_FROM_COLL, ['mode', 'section', 'id_type', 'show_id'])
def manage_collection(mode, section, id_type, show_id):
item = {id_type: show_id}
if mode == MODES.ADD_TO_COLL:
trakt_api.add_to_collection(section, item)
msg = i18n('item_to_collection')
else:
trakt_api.remove_from_collection(section, item)
msg = i18n('item_from_collection')
kodi.notify(msg=msg)
kodi.refresh_container()
@url_dispatcher.register(MODES.ADD_TO_LIST, ['section', 'id_type', 'show_id'], ['slug'])
def add_to_list(section, id_type, show_id, slug=None):
response = add_many_to_list(section, {id_type: show_id}, slug)
if response is not None:
kodi.notify(msg=i18n('item_to_list'))
def add_many_to_list(section, items, slug=None):
if not slug:
result = utils.choose_list(Trakt_API, kodi.Translations(strings.STRINGS))
if result:
slug, _name = result
if slug == utils.WATCHLIST_SLUG:
response = trakt_api.add_to_watchlist(section, items)
elif slug:
response = trakt_api.add_to_list(section, slug, items)
else:
response = None
return response
@url_dispatcher.register(MODES.COPY_LIST, ['section', 'slug'], ['username', 'target_slug'])
def copy_list(section, slug, username=None, target_slug=None):
if slug == COLLECTION_SLUG:
items = trakt_api.get_collection(section)
else:
items = trakt_api.show_list(slug, section, username)
copy_items = []
for item in items:
query = utils2.show_id(item)
copy_item = {'type': TRAKT_SECTIONS[section][:-1], query['id_type']: query['show_id']}
copy_items.append(copy_item)
response = add_many_to_list(section, copy_items, target_slug)
if response:
added = sum(response['added'].values())
exists = sum(response['existing'].values())
not_found = sum([len(item) for item in response['not_found'].values()])
kodi.notify(msg=i18n('list_copied') % (added, exists, not_found), duration=5000)
@url_dispatcher.register(MODES.TOGGLE_TITLE, ['trakt_id'])
def toggle_title(trakt_id):
trakt_id = str(trakt_id)
filter_list = utils2.get_force_title_list()
if trakt_id in filter_list:
del filter_list[filter_list.index(trakt_id)]
else:
filter_list.append(trakt_id)
filter_str = '|'.join(filter_list)
kodi.set_setting('force_title_match', filter_str)
kodi.refresh_container()
@url_dispatcher.register(MODES.MANAGE_PROGRESS, ['action', 'trakt_id'])
def manage_progress_cache(action, trakt_id):
trakt_id = str(trakt_id)
filter_list = utils2.get_progress_skip_list()
force_list = utils2.get_force_progress_list()
filtered = trakt_id in filter_list
forced = trakt_id in force_list
if action == ACTIONS.REMOVE and filtered:
del filter_list[filter_list.index(trakt_id)]
force_list.append(trakt_id)
elif action == ACTIONS.ADD and not filtered and not forced:
filter_list.append(trakt_id)
filter_str = '|'.join(filter_list)
kodi.set_setting('progress_skip_cache', filter_str)
force_str = '|'.join(force_list)
kodi.set_setting('force_include_progress', force_str)
if action == ACTIONS.REMOVE:
kodi.refresh_container()
@url_dispatcher.register(MODES.TOGGLE_WATCHED, ['section', 'id_type', 'show_id'], ['watched', 'season', 'episode'])
def toggle_watched(section, id_type, show_id, watched=True, season='', episode=''):
logger.log('In Watched: |%s|%s|%s|%s|%s|%s|' % (section, id_type, show_id, season, episode, watched), log_utils.LOGDEBUG)
item = {id_type: show_id}
trakt_api.set_watched(section, item, season, episode, watched)
w_str = i18n('watched') if watched else i18n('unwatched')
kodi.notify(msg=i18n('marked_as') % (w_str), duration=5000)
kodi.refresh_container()
@url_dispatcher.register(MODES.URL_EXISTS, ['trakt_id'])
def toggle_url_exists(trakt_id):
trakt_id = str(trakt_id)
show_str = kodi.get_setting('exists_list')
if show_str:
show_list = show_str.split('|')
else:
show_list = []
if trakt_id in show_list:
show_list.remove(trakt_id)
else:
show_list.append(trakt_id)
show_str = '|'.join(show_list)
kodi.set_setting('exists_list', show_str)
kodi.refresh_container()
@url_dispatcher.register(MODES.UPDATE_SUBS)
def update_subscriptions():
logger.log('Updating Subscriptions', log_utils.LOGDEBUG)
active = kodi.get_setting(MODES.UPDATE_SUBS + '-notify') == 'true'
with kodi.ProgressDialog(kodi.get_name(), line1=i18n('updating_subscriptions'), background=True, active=active) as pd:
update_strms(SECTIONS.TV, pd)
if kodi.get_setting('include_movies') == 'true':
update_strms(SECTIONS.MOVIES, pd)
if kodi.get_setting('library-update') == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
if kodi.get_setting('cleanup-subscriptions') == 'true':
clean_subs()
now = datetime.datetime.now()
db_connection.set_setting('%s-last_run' % MODES.UPDATE_SUBS, now.strftime("%Y-%m-%d %H:%M:%S.%f"))
if active and kodi.get_setting('auto-' + MODES.UPDATE_SUBS) == 'true':
kodi.notify(msg=i18n('next_update') % (float(kodi.get_setting(MODES.UPDATE_SUBS + '-interval'))), duration=5000)
kodi.refresh_container()
def update_strms(section, dialog=None):
section_params = utils2.get_section_params(section)
slug = kodi.get_setting('%s_sub_slug' % (section))
if not slug:
return
elif slug == utils.WATCHLIST_SLUG:
items = trakt_api.show_watchlist(section)
else:
items = trakt_api.show_list(slug, section)
length = len(items)
for i, item in enumerate(items):
percent_progress = (i + 1) * 100 / length
title = re.sub('\s+\(\d{4}\)$', '', item['title'])
dialog.update(percent_progress, '%s %s: %s (%s)' % (i18n('updating'), section, title, item['year']))
try:
add_to_library(section_params['video_type'], item['title'], item['year'], item['ids']['trakt'])
except Exception as e:
logger.log('Subscription Update Exception: |%s|%s|%s|%s| - %s' % (section_params['video_type'], item['title'], item['year'], item['ids']['trakt'], e), log_utils.LOGDEBUG)
@url_dispatcher.register(MODES.CLEAN_SUBS)
def clean_subs():
slug = kodi.get_setting('TV_sub_slug')
if not slug:
return
elif slug == utils.WATCHLIST_SLUG:
items = trakt_api.show_watchlist(SECTIONS.TV)
else:
items = trakt_api.show_list(slug, SECTIONS.TV)
del_items = []
for item in items:
show = trakt_api.get_show_details(item['ids']['trakt'])
if show['status'].upper() in ['ENDED', 'CANCELED', 'CANCELLED']:
show_id = utils2.show_id(item)
del_items.append({show_id['id_type']: show_id['show_id']})
if del_items:
if slug == utils.WATCHLIST_SLUG:
trakt_api.remove_from_watchlist(SECTIONS.TV, del_items)
else:
trakt_api.remove_from_list(SECTIONS.TV, slug, del_items)
@url_dispatcher.register(MODES.REFRESH_IMAGES, ['video_type', 'ids'], ['season', 'episode'])
def refresh_images(video_type, ids, season='', episode=''):
ids = json.loads(ids)
images = image_scraper.get_images(video_type, ids, season, episode)
salts_utils.clear_thumbnails(images)
image_scraper.clear_cache(video_type, ids, season, episode)
image_scraper.get_images(video_type, ids, season, episode, cached=False)
kodi.refresh_container()
@url_dispatcher.register(MODES.FLUSH_CACHE)
def flush_cache():
dlg = xbmcgui.Dialog()
ln1 = i18n('flush_cache_line1')
ln2 = i18n('flush_cache_line2')
ln3 = ''
yes = i18n('keep')
no = i18n('delete')
if dlg.yesno(i18n('flush_web_cache'), ln1, ln2, ln3, yes, no):
with kodi.WorkingDialog() as wd:
start = None
while not xbmc.abortRequested:
days_left = db_connection.prune_cache(prune_age=0)
if start is None: start = days_left
if days_left:
wd.update(100 * (start - days_left) / start)
else:
# call flush_cache at the end to trigger vacuum for SQLITE
wd.update(100)
db_connection.flush_cache()
break
kodi.refresh_container()
@url_dispatcher.register(MODES.FLUSH_IMAGES)
def flush_image_cache():
dlg = xbmcgui.Dialog()
ln1 = i18n('flush_image_line1')
ln2 = i18n('flush_image_line2')
ln3 = ''
yes = i18n('keep')
no = i18n('delete')
if dlg.yesno(i18n('flush_image_cache'), ln1, ln2, ln3, yes, no):
with kodi.WorkingDialog():
db_connection.flush_image_cache()
kodi.notify(msg=i18n('flush_complete'))
@url_dispatcher.register(MODES.PRUNE_CACHE)
def prune_cache():
monitor = xbmc.Monitor()
while not monitor.abortRequested():
if xbmc.getInfoLabel('Container.PluginName') != kodi.get_id():
if not db_connection.prune_cache():
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
db_connection.set_setting('%s-last_run' % (MODES.PRUNE_CACHE), now)
logger.log('Prune Completed Successfully @ %s' % (now), log_utils.LOGDEBUG)
break
else:
logger.log('SALTS Active... Busy... Postponing [%s]' % (MODES.PRUNE_CACHE), log_utils.LOGDEBUG)
if monitor.waitForAbort(30000):
break
@url_dispatcher.register(MODES.RESET_DB)
def reset_db():
if db_connection.reset_db():
message = i18n('db_reset_success')
else:
message = i18n('db_on_sqlite')
kodi.notify(msg=message)
@url_dispatcher.register(MODES.EXPORT_DB)
def export_db():
try:
dialog = xbmcgui.Dialog()
export_path = dialog.browse(0, i18n('select_export_dir'), 'files').encode('utf-8')
if export_path:
export_path = kodi.translate_path(export_path)
export_filename = kodi.get_keyboard(i18n('enter_export_name'), 'export.csv')
if export_filename is not None:
export_file = export_path + export_filename
db_connection.export_from_db(export_file)
kodi.notify(header=i18n('export_successful'), msg=i18n('exported_to') % (export_file), duration=5000)
except Exception as e:
logger.log('Export Failed: %s' % (e), log_utils.LOGERROR)
kodi.notify(header=i18n('export'), msg=i18n('export_failed'))
@url_dispatcher.register(MODES.IMPORT_DB)
def import_db():
try:
dialog = xbmcgui.Dialog()
import_file = dialog.browse(1, i18n('select_import_file'), 'files').encode('utf-8')
if import_file:
import_file = kodi.translate_path(import_file)
db_connection.import_into_db(import_file)
kodi.notify(header=i18n('import_success'), msg=i18n('imported_from') % (import_file))
except Exception as e:
logger.log('Import Failed: %s' % (e), log_utils.LOGERROR)
kodi.notify(header=i18n('import'), msg=i18n('import_failed'))
@url_dispatcher.register(MODES.ADD_TO_LIBRARY, ['video_type', 'title', 'year', 'trakt_id'])
def man_add_to_library(video_type, title, year, trakt_id):
try:
if video_type == VIDEO_TYPES.MOVIE and year:
msg = '%s (%s)' % (title, year)
else:
msg = title
add_to_library(video_type, title, year, trakt_id)
except Exception as e:
kodi.notify(msg=i18n('not_added_to_lib') % (msg, e), duration=5000)
return
kodi.notify(msg=i18n('added_to_lib') % (msg), duration=5000)
def add_to_library(video_type, title, year, trakt_id):
logger.log('Creating .strm for |%s|%s|%s|%s|' % (video_type, title, year, trakt_id), log_utils.LOGDEBUG)
scraper = local_scraper.Scraper()
exclude_local = kodi.get_setting('exclude_local') == 'true'
create_nfo = int(kodi.get_setting('create_nfo')) # 0 = None | 1 = Won't scrape | 2 = All
if video_type == VIDEO_TYPES.TVSHOW:
save_path = kodi.get_setting('tvshow-folder')
save_path = kodi.translate_path(save_path)
show = trakt_api.get_show_details(trakt_id)
show['title'] = re.sub(' \(\d{4}\)$', '', show['title']) # strip off year if it's part of show title
seasons = trakt_api.get_seasons(trakt_id)
include_unknown = kodi.get_setting('include_unknown') == 'true'
if not seasons:
logger.log('No Seasons found for %s (%s)' % (show['title'], show['year']), log_utils.LOGERROR)
else:
if create_nfo > 0:
show_path = make_path(save_path, video_type, show['title'], show['year'])
if ((create_nfo == 1) and (show['title'] not in show_path)) or create_nfo == 2:
write_nfo(show_path, video_type, show['ids'])
for season in seasons:
season_num = season['number']
if kodi.get_setting('include_specials') == 'true' or season_num != 0:
episodes = trakt_api.get_episodes(trakt_id, season_num)
for episode in episodes:
ep_num = episode['number']
air_date = utils2.make_air_date(episode['first_aired'])
if exclude_local:
ep_url = scraper.get_url(ScraperVideo(VIDEO_TYPES.EPISODE, title, year, trakt_id, season_num, ep_num, episode['title'], air_date))
if ep_url and ep_url != FORCE_NO_MATCH:
continue
if utils2.show_requires_source(trakt_id):
require_source = True
else:
if (episode['first_aired'] is not None and utils.iso_2_utc(episode['first_aired']) <= time.time()) or (include_unknown and episode['first_aired'] is None):
require_source = False
else:
continue
filename = utils2.filename_from_title(show['title'], video_type) + '.strm'
filename = filename % ('%02d' % int(season_num), '%02d' % int(ep_num))
final_path = os.path.join(make_path(save_path, video_type, show['title'], show['year'], season=season_num), filename)
strm_string = kodi.get_plugin_url({'mode': MODES.GET_SOURCES, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': year, 'season': season_num,
'episode': ep_num, 'trakt_id': trakt_id, 'ep_title': episode['title'], 'ep_airdate': air_date})
write_strm(strm_string, final_path, VIDEO_TYPES.EPISODE, show['title'], show['year'], trakt_id, season_num, ep_num, require_source=require_source)
elif video_type == VIDEO_TYPES.MOVIE:
if exclude_local:
movie_url = scraper.get_url(ScraperVideo(video_type, title, year, trakt_id))
if movie_url and movie_url != FORCE_NO_MATCH:
raise Exception(i18n('local_exists'))
save_path = kodi.get_setting('movie-folder')
save_path = kodi.translate_path(save_path)
if create_nfo > 0:
movie_path = make_path(save_path, video_type, title, year)
if ((create_nfo == 1) and (title not in movie_path)) or create_nfo == 2:
movie = trakt_api.get_movie_details(trakt_id)
write_nfo(movie_path, video_type, movie['ids'])
strm_string = kodi.get_plugin_url({'mode': MODES.GET_SOURCES, 'video_type': video_type, 'title': title, 'year': year, 'trakt_id': trakt_id})
filename = utils2.filename_from_title(title, VIDEO_TYPES.MOVIE, year) + '.strm'
final_path = os.path.join(make_path(save_path, video_type, title, year), filename)
write_strm(strm_string, final_path, VIDEO_TYPES.MOVIE, title, year, trakt_id, require_source=kodi.get_setting('require_source') == 'true')
def make_path(base_path, video_type, title, year='', season=''):
show_folder = re.sub(r'[^\w\-_\. ]', '_', title)
show_folder = '%s (%s)' % (show_folder, year) if year else show_folder
path = os.path.join(base_path, show_folder)
if (video_type == VIDEO_TYPES.TVSHOW) and season:
path = os.path.join(path, 'Season %s' % (season))
return path
def nfo_url(video_type, ids):
tvdb_url = 'http://thetvdb.com/?tab=series&id=%s'
tmdb_url = 'https://www.themoviedb.org/%s/%s'
imdb_url = 'http://www.imdb.com/title/%s/'
if 'tvdb' in ids:
return tvdb_url % (str(ids['tvdb']))
elif 'tmdb' in ids:
if video_type == VIDEO_TYPES.TVSHOW:
media_string = 'tv'
else:
media_string = 'movie'
return tmdb_url % (media_string, str(ids['tmdb']))
elif 'imdb' in ids:
return imdb_url % (str(ids['imdb']))
else:
return ''
def write_nfo(path, video_type, meta_ids):
nfo_string = nfo_url(video_type, meta_ids)
if nfo_string:
filename = video_type.lower().replace(' ', '') + '.nfo'
path = os.path.join(path, filename)
path = xbmc.makeLegalFilename(path)
if not xbmcvfs.exists(os.path.dirname(path)):
try:
try: xbmcvfs.mkdirs(os.path.dirname(path))
except: os.makedirs(os.path.dirname(path))
except Exception as e:
logger.log('Failed to create directory %s: %s' % (path, str(e)), log_utils.LOGERROR)
old_nfo_string = ''
try:
f = xbmcvfs.File(path, 'r')
old_nfo_string = f.read()
f.close()
except: pass
if nfo_string != old_nfo_string:
try:
logger.log('Writing nfo: %s' % nfo_string, log_utils.LOGDEBUG)
file_desc = xbmcvfs.File(path, 'w')
file_desc.write(nfo_string)
file_desc.close()
except Exception as e:
logger.log('Failed to create .nfo file (%s): %s' % (path, e), log_utils.LOGERROR)
def write_strm(stream, path, video_type, title, year, trakt_id, season='', episode='', require_source=False):
path = xbmc.makeLegalFilename(path)
if not xbmcvfs.exists(os.path.dirname(path)):
try:
try: xbmcvfs.mkdirs(os.path.dirname(path))
except: os.makedirs(os.path.dirname(path))
except Exception as e:
logger.log('Failed to create directory %s: %s' % (path, str(e)), log_utils.LOGERROR)
try:
f = xbmcvfs.File(path, 'r')
old_strm_string = f.read()
f.close()
except:
old_strm_string = ''
# print "Old String: %s; New String %s" %(old_strm_string,strm_string)
# string will be blank if file doesn't exist or is blank
if stream != old_strm_string:
try:
if not require_source or salts_utils.url_exists(ScraperVideo(video_type, title, year, trakt_id, season, episode)):
logger.log('Writing strm: %s' % stream, log_utils.LOGDEBUG)
file_desc = xbmcvfs.File(path, 'w')
file_desc.write(stream)
file_desc.close()
else:
logger.log('No strm written for |%s|%s|%s|%s|%s|' % (video_type, title, year, season, episode), log_utils.LOGWARNING)
except Exception as e:
logger.log('Failed to create .strm file (%s): %s' % (path, e), log_utils.LOGERROR)
def show_pickable_list(slug, pick_label, pick_mode, section):
if not slug:
kodi.create_item({'mode': pick_mode, 'section': section}, pick_label, is_folder=False, is_playable=False)
kodi.set_content(CONTENT_TYPES.ADDONS)
kodi.end_of_directory()
else:
show_list(section, slug)
def make_dir_from_list(section, list_data, slug=None, query=None, page=None):
section_params = utils2.get_section_params(section)
watched = {}
in_collection = {}
if TOKEN:
for item in trakt_api.get_watched(section):
if section == SECTIONS.MOVIES:
watched[item['movie']['ids']['trakt']] = item['plays'] > 0
else:
watched[item['show']['ids']['trakt']] = len([e for s in item['seasons'] if s['number'] != 0 for e in s['episodes']])
if slug == COLLECTION_SLUG:
in_collection = dict.fromkeys([show['ids']['trakt'] for show in list_data], True)
else:
collection = trakt_api.get_collection(section, full=False)
in_collection = dict.fromkeys([show['ids']['trakt'] for show in collection], True)
total_items = len(list_data)
for show in list_data:
menu_items = []
show_id = utils2.show_id(show)
trakt_id = show['ids']['trakt']
if slug and slug != COLLECTION_SLUG:
queries = {'mode': MODES.REM_FROM_LIST, 'slug': slug, 'section': section}
queries.update(show_id)
menu_items.append((i18n('remove_from_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
sub_slug = kodi.get_setting('%s_sub_slug' % (section))
if TOKEN and sub_slug:
if sub_slug != slug:
queries = {'mode': MODES.ADD_TO_LIST, 'section': section_params['section'], 'slug': sub_slug}
queries.update(show_id)
menu_items.append((i18n('subscribe'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
elif section == SECTIONS.TV:
if utils2.show_requires_source(trakt_id):
label = i18n('require_aired_only')
else:
label = i18n('require_page_only')
queries = {'mode': MODES.URL_EXISTS, 'trakt_id': trakt_id}
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if section == SECTIONS.MOVIES:
show['watched'] = watched.get(trakt_id, False)
else:
try:
logger.log('%s/%s: Watched: %s - Aired: %s' % (trakt_id, show['ids']['slug'], watched.get(trakt_id, 'NaN'), show['aired_episodes']), log_utils.LOGDEBUG)
show['watched'] = watched[trakt_id] >= show['aired_episodes']
show['watched_count'] = watched[trakt_id]
except: show['watched'] = False
show['in_collection'] = in_collection.get(trakt_id, False)
liz, liz_url = make_item(section_params, show, menu_items)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=section_params['folder'], totalItems=total_items)
if query and page and total_items >= int(kodi.get_setting('list_size')):
query['page'] = int(page) + 1
label = '%s >>' % (i18n('next_page'))
kodi.create_item(query, label, thumb=utils2.art('nextpage.png'), fanart=utils2.art('fanart.jpg'), is_folder=True)
kodi.set_view(section_params['content_type'], True)
kodi.end_of_directory()
def make_dir_from_cal(mode, start_date, days):
start_date = utils2.to_datetime(start_date, '%Y-%m-%d')
last_week = start_date - datetime.timedelta(days=7)
next_week = start_date + datetime.timedelta(days=7)
last_str = datetime.datetime.strftime(last_week, '%Y-%m-%d')
next_str = datetime.datetime.strftime(next_week, '%Y-%m-%d')
label = '<< %s' % (i18n('previous_week'))
kodi.create_item({'mode': mode, 'start_date': last_str}, label, thumb=utils2.art('previous.png'), fanart=utils2.art('fanart.jpg'), is_folder=True)
watched = {}
if TOKEN:
watched_history = trakt_api.get_watched(SECTIONS.TV)
for item in watched_history:
trakt_id = item['show']['ids']['trakt']
watched[trakt_id] = {}
for season in item['seasons']:
watched[trakt_id][season['number']] = {}
for episode in season['episodes']:
watched[trakt_id][season['number']][episode['number']] = True
totalItems = len(days)
for item in days:
episode = item['episode']
show = item['show']
utc_secs = utils.iso_2_utc(episode['first_aired'])
show_date = datetime.date.fromtimestamp(utc_secs)
try: episode['watched'] = watched[show['ids']['trakt']][episode['season']][episode['number']]
except: episode['watched'] = False
if show_date < start_date.date():
logger.log('Skipping show date |%s| before start: |%s|' % (show_date, start_date.date()), log_utils.LOGDEBUG)
continue
elif show_date >= next_week.date():
logger.log('Stopping because show date |%s| >= end: |%s|' % (show_date, next_week.date()), log_utils.LOGDEBUG)
break
date = utils2.make_day(datetime.date.fromtimestamp(utc_secs).isoformat())
if kodi.get_setting('calendar_time') != '0':
date_time = '%s@%s' % (date, utils2.make_time(utc_secs, 'calendar_time'))
else:
date_time = date
menu_items = []
queries = {'mode': MODES.SEASONS, 'trakt_id': show['ids']['trakt'], 'title': show['title'], 'year': show['year'], 'tvdb_id': show['ids']['tvdb']}
menu_items.append((i18n('browse_seasons'), 'Container.Update(%s)' % (kodi.get_plugin_url(queries))),)
liz, liz_url = make_episode_item(show, episode, show_subs=False, menu_items=menu_items)
label = liz.getLabel()
label = '[[COLOR deeppink]%s[/COLOR]] %s - %s' % (date_time, show['title'], label)
if episode['season'] == 1 and episode['number'] == 1:
label = '[COLOR green]%s[/COLOR]' % (label)
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False, totalItems=totalItems)
label = '%s >>' % (i18n('next_week'))
kodi.create_item({'mode': mode, 'start_date': next_str}, label, thumb=utils2.art('next.png'), fanart=utils2.art('fanart.jpg'), is_folder=True)
kodi.set_content(CONTENT_TYPES.EPISODES)
kodi.end_of_directory()
def make_season_item(season, info, trakt_id, title, year, tvdb_id):
label = '%s %s' % (i18n('season'), season['number'])
ids = {'trakt': trakt_id, 'tvdb': tvdb_id}
art = image_scraper.get_images(VIDEO_TYPES.SEASON, ids, season['number'])
liz = utils.make_list_item(label, season, art)
logger.log('Season Info: %s' % (info), log_utils.LOGDEBUG)
info['mediatype'] = 'season'
liz.setInfo('video', info)
menu_items = []
if 'playcount' in info and info['playcount']:
watched = False
label = i18n('mark_as_unwatched')
else:
watched = True
label = i18n('mark_as_watched')
if TOKEN:
queries = {'mode': MODES.RATE, 'section': SECTIONS.TV, 'season': season['number'], 'id_type': 'trakt', 'show_id': trakt_id}
menu_items.append((i18n('rate_on_trakt'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': SECTIONS.TV, 'season': season['number'], 'id_type': 'trakt', 'show_id': trakt_id, 'watched': watched}
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_VIEW, 'content_type': CONTENT_TYPES.SEASONS}
menu_items.append((i18n('set_as_season_view'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.REFRESH_IMAGES, 'video_type': VIDEO_TYPES.SEASON, 'ids': json.dumps(ids), 'season': season['number']}
menu_items.append((i18n('refresh_images'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_SEARCH, 'video_type': VIDEO_TYPES.SEASON, 'title': title, 'year': year, 'trakt_id': trakt_id, 'season': season['number']}
menu_items.append((i18n('set_rel_url_search'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_MANUAL, 'video_type': VIDEO_TYPES.SEASON, 'title': title, 'year': year, 'trakt_id': trakt_id, 'season': season['number']}
menu_items.append((i18n('set_rel_url_manual'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
return liz
def make_episode_item(show, episode, show_subs=True, menu_items=None):
# logger.log('Make Episode: Show: %s, Episode: %s, Show Subs: %s' % (show, episode, show_subs), log_utils.LOGDEBUG)
# logger.log('Make Episode: Episode: %s' % (episode), log_utils.LOGDEBUG)
if menu_items is None: menu_items = []
show['title'] = re.sub(' \(\d{4}\)$', '', show['title'])
if episode['title'] is None:
label = '%sx%s' % (episode['season'], episode['number'])
else:
label = '%sx%s %s' % (episode['season'], episode['number'], episode['title'])
if 'first_aired' in episode: utc_air_time = utils.iso_2_utc(episode['first_aired'])
try: time_str = time.asctime(time.localtime(utc_air_time))
except: time_str = i18n('unavailable')
logger.log('First Aired: Title: %s S/E: %s/%s fa: %s, utc: %s, local: %s' %
(show['title'], episode['season'], episode['number'], episode['first_aired'], utc_air_time, time_str), log_utils.LOGDEBUG)
if kodi.get_setting('unaired_indicator') == 'true' and (not episode['first_aired'] or utc_air_time > time.time()):
label = '[I][COLOR chocolate]%s[/COLOR][/I]' % (label)
if show_subs and utils2.srt_indicators_enabled():
srt_scraper = SRT_Scraper()
language = kodi.get_setting('subtitle-lang')
tvshow_id = srt_scraper.get_tvshow_id(show['title'], show['year'])
if tvshow_id is not None:
srts = srt_scraper.get_episode_subtitles(language, tvshow_id, episode['season'], episode['number'])
else:
srts = []
label = utils2.format_episode_label(label, episode['season'], episode['number'], srts)
meta = salts_utils.make_info(episode, show)
art = image_scraper.get_images(VIDEO_TYPES.EPISODE, show['ids'], episode['season'], episode['number'])
liz = utils.make_list_item(label, meta, art)
liz.setInfo('video', meta)
air_date = ''
if episode['first_aired']:
air_date = utils2.make_air_date(episode['first_aired'])
queries = {'mode': MODES.GET_SOURCES, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'], 'episode': episode['number'],
'ep_title': episode['title'], 'ep_airdate': air_date, 'trakt_id': show['ids']['trakt'], 'random': time.time()}
liz_url = kodi.get_plugin_url(queries)
queries = {'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'], 'episode': episode['number'],
'ep_title': episode['title'], 'ep_airdate': air_date, 'trakt_id': show['ids']['trakt']}
if kodi.get_setting('auto-play') == 'true':
queries['mode'] = MODES.SELECT_SOURCE
label = i18n('select_source')
if kodi.get_setting('source-win') == 'Dialog':
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % kodi.get_plugin_url(queries)
else:
queries['mode'] = MODES.AUTOPLAY
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
label = i18n('auto-play')
menu_items.insert(0, (label, runstring),)
if kodi.get_setting('show_download') == 'true':
queries = {'mode': MODES.DOWNLOAD_SOURCE, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'], 'episode': episode['number'],
'ep_title': episode['title'], 'ep_airdate': air_date, 'trakt_id': show['ids']['trakt']}
if kodi.get_setting('source-win') == 'Dialog':
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % kodi.get_plugin_url(queries)
menu_items.append((i18n('download_source'), runstring),)
show_id = utils2.show_id(show)
queries = {'mode': MODES.ADD_TO_LIST, 'section': SECTIONS.TV}
queries.update(show_id)
menu_items.append((i18n('add_show_to_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if episode.get('watched', False):
watched = False
label = i18n('mark_as_unwatched')
else:
watched = True
label = i18n('mark_as_watched')
queries = {'mode': MODES.REFRESH_IMAGES, 'video_type': VIDEO_TYPES.EPISODE, 'ids': json.dumps(show['ids']), 'season': episode['season'], 'episode': episode['number']}
menu_items.append((i18n('refresh_images'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if TOKEN:
show_id = utils2.show_id(show)
queries = {'mode': MODES.RATE, 'section': SECTIONS.TV, 'season': episode['season'], 'episode': episode['number']}
# favor imdb_id for ratings to work with official trakt addon
if show['ids'].get('imdb'):
queries.update({'id_type': 'imdb', 'show_id': show['ids']['imdb']})
else:
queries.update(show_id)
menu_items.append((i18n('rate_on_trakt'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': SECTIONS.TV, 'season': episode['season'], 'episode': episode['number'], 'watched': watched}
queries.update(show_id)
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_SEARCH, 'video_type': VIDEO_TYPES.TVSHOW, 'title': show['title'], 'year': show['year'], 'trakt_id': show['ids']['trakt']}
menu_items.append((i18n('set_rel_show_url_search'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_SEARCH, 'video_type': VIDEO_TYPES.SEASON, 'title': show['title'], 'year': show['year'], 'trakt_id': show['ids']['trakt'], 'season': episode['season']}
menu_items.append((i18n('set_rel_season_url_search'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_MANUAL, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'],
'episode': episode['number'], 'ep_title': episode['title'], 'ep_airdate': air_date, 'trakt_id': show['ids']['trakt']}
menu_items.append((i18n('set_rel_url_manual'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
return liz, liz_url
def make_item(section_params, show, menu_items=None):
if menu_items is None: menu_items = []
if not isinstance(show['title'], basestring): show['title'] = ''
show['title'] = re.sub(' \(\d{4}\)$', '', show['title'])
label = '%s (%s)' % (show['title'], show['year'])
trakt_id = show['ids']['trakt']
art = image_scraper.get_images(section_params['video_type'], show['ids'])
if kodi.get_setting('include_people') == 'true':
people = trakt_api.get_people(section_params['section'], trakt_id)
cast = salts_utils.make_cast(show['ids'], people)
else:
people = None
cast = None
liz = utils.make_list_item(label, show, art, cast)
liz.setProperty('trakt_id', str(trakt_id))
info = salts_utils.make_info(show, people=people)
# mix-in cast in liz metadata if the setCast method doesn't exist
if cast and getattr(liz, 'setCast', None) is None:
info['castandrole'] = info['cast'] = [(person['name'], person['role']) for person in cast]
if 'TotalEpisodes' in info:
liz.setProperty('TotalEpisodes', str(info['TotalEpisodes']))
liz.setProperty('WatchedEpisodes', str(info['WatchedEpisodes']))
liz.setProperty('UnWatchedEpisodes', str(info['UnWatchedEpisodes']))
if section_params['section'] == SECTIONS.TV:
queries = {'mode': section_params['next_mode'], 'trakt_id': trakt_id, 'title': show['title'], 'year': show['year'], 'tvdb_id': show['ids']['tvdb']}
info['TVShowTitle'] = info['title']
else:
queries = {'mode': section_params['next_mode'], 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'trakt_id': trakt_id}
queries['random'] = time.time()
liz.setInfo('video', info)
liz_url = kodi.get_plugin_url(queries)
queries = {'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'trakt_id': trakt_id}
if section_params['next_mode'] == MODES.GET_SOURCES:
if kodi.get_setting('auto-play') == 'true':
queries['mode'] = MODES.SELECT_SOURCE
label = i18n('select_source')
if kodi.get_setting('source-win') == 'Dialog':
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % kodi.get_plugin_url(queries)
else:
queries['mode'] = MODES.AUTOPLAY
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
label = i18n('auto-play')
menu_items.insert(0, (label, runstring),)
if section_params['next_mode'] == MODES.GET_SOURCES and kodi.get_setting('show_download') == 'true':
queries = {'mode': MODES.DOWNLOAD_SOURCE, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'trakt_id': trakt_id}
if kodi.get_setting('source-win') == 'Dialog':
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % kodi.get_plugin_url(queries)
menu_items.append((i18n('download_source'), runstring),)
if TOKEN:
show_id = utils2.show_id(show)
if show.get('in_collection', False):
queries = {'mode': MODES.REM_FROM_COLL, 'section': section_params['section']}
queries.update(show_id)
menu_items.append((i18n('remove_from_collection'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
else:
queries = {'mode': MODES.ADD_TO_COLL, 'section': section_params['section']}
queries.update(show_id)
menu_items.append((i18n('add_to_collection'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.ADD_TO_LIST, 'section': section_params['section']}
queries.update(show_id)
menu_items.append((i18n('add_to_list'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.RATE, 'section': section_params['section']}
# favor imdb_id for ratings to work with official trakt addon
if show['ids'].get('imdb'):
queries.update({'id_type': 'imdb', 'show_id': show['ids']['imdb']})
else:
queries.update(show_id)
menu_items.append((i18n('rate_on_trakt'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.ADD_TO_LIBRARY, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'trakt_id': trakt_id}
menu_items.append((i18n('add_to_library'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.REFRESH_IMAGES, 'video_type': section_params['video_type'], 'ids': json.dumps(show['ids'])}
menu_items.append((i18n('refresh_images'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if TOKEN:
if show.get('watched', False):
watched = False
label = i18n('mark_as_unwatched')
else:
watched = True
label = i18n('mark_as_watched')
if watched or section_params['section'] == SECTIONS.MOVIES:
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': section_params['section'], 'watched': watched}
queries.update(show_id)
menu_items.append((label, 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if section_params['section'] == SECTIONS.TV and kodi.get_setting('enable-subtitles') == 'true':
queries = {'mode': MODES.EDIT_TVSHOW_ID, 'title': show['title'], 'year': show['year']}
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
menu_items.append((i18n('set_addicted_tvshowid'), runstring,))
if section_params['section'] == SECTIONS.TV:
if str(trakt_id) in utils2.get_progress_skip_list():
queries = {'mode': MODES.MANAGE_PROGRESS, 'action': ACTIONS.REMOVE, 'trakt_id': trakt_id}
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
menu_items.append((i18n('include_in_mne'), runstring,))
else:
if str(trakt_id) in utils2.get_force_title_list():
label = i18n('use_def_ep_matching')
else:
label = i18n('use_ep_title_match')
queries = {'mode': MODES.TOGGLE_TITLE, 'trakt_id': trakt_id}
runstring = 'RunPlugin(%s)' % kodi.get_plugin_url(queries)
menu_items.append((label, runstring,))
queries = {'mode': MODES.SET_URL_SEARCH, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'trakt_id': trakt_id}
menu_items.append((i18n('set_rel_url_search'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_MANUAL, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'trakt_id': trakt_id}
menu_items.append((i18n('set_rel_url_manual'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
if len(menu_items) < 10 and 'trailer' in info:
queries = {'mode': MODES.PLAY_TRAILER, 'stream_url': info['trailer']}
menu_items.insert(-3, (i18n('play_trailer'), 'RunPlugin(%s)' % (kodi.get_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
liz.setProperty('resumetime', str(0))
liz.setProperty('totaltime', str(1))
return liz, liz_url
def get_list(section, slug, username=None, cached=True):
if slug == utils.WATCHLIST_SLUG:
items = trakt_api.show_watchlist(section, cached=cached)
else:
try:
items = trakt_api.show_list(slug, section, username, auth=bool(TOKEN), cached=cached)
except TraktNotFoundError:
msg = i18n('list_not_exist') % (slug)
kodi.notify(msg=msg, duration=5000)
logger.log(msg, log_utils.LOGWARNING)
return
return items
def main(argv=None):
if sys.argv: argv = sys.argv
queries = kodi.parse_query(sys.argv[2])
logger.log('Version: |%s| Queries: |%s|' % (kodi.get_version(), queries), log_utils.LOGNOTICE)
logger.log('Args: |%s|' % (argv), log_utils.LOGNOTICE)
# don't process params that don't match our url exactly. (e.g. plugin://plugin.video.1channel/extrafanart)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if argv[0] != plugin_url:
return
try:
global db_connection
db_connection = DB_Connection()
mode = queries.get('mode', None)
url_dispatcher.dispatch(mode, queries)
except (TransientTraktError, TraktError, TraktAuthError) as e:
logger.log(str(e), log_utils.LOGERROR)
kodi.notify(msg=str(e), duration=5000)
except DatabaseRecoveryError as e:
logger.log('Attempting DB recovery due to Database Error: %s' % (e), log_utils.LOGWARNING)
db_connection.attempt_db_recovery()
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -731,917,117,904,936,000 | 50.453526 | 236 | 0.602577 | false |
mathandy/svgtree | rings4rings.py | 1 | 48738 | from misc4rings import (Theta_Tstar, normalLineAtT_toInner_intersects_withOuter,
isDegenerateSegment, isCCW, areaEnclosed,
isApproxClosedPath, pathXpathIntersections,
remove_degenerate_segments)
import options4rings as opt
from andysmod import boolset
from andysSVGpathTools import (path2str, printPath, pathT2tseg, cropPath,
reversePath, cubPoints, minRadius, maxRadius,
trimSeg, segt2PathT, reverseSeg,
closestPointInPath, concatPaths,
lineXlineIntersections)
from svgpathtools import parse_path, Path, Line, CubicBezier, disvg, wsvg
from copy import deepcopy as copyobject
from operator import itemgetter
disvg = disvg if opt.try_to_open_svgs_in_browser else wsvg
def sortby(x, k):
return sorted(x, key=itemgetter(k))
class Ring(object):
def __init__(self, path_string, color, brook_tag, rad, path, xml=None):
self.string = str(path_string)
self.xml = xml # original xml string the this ring came from in input svg
self.center = rad.origin
self.rad = rad
self.color = color
self.brook_tag = brook_tag
self._path = path
self.path_before_removing_intersections = None
if opt.save_path_length_in_pickle:
#Calculate lengths of segments in path so this info gets stored in
# pickle file (time consuming)
self.path._calc_lengths()
self.minR = minRadius(self.center,self.path)
self.maxR = maxRadius(self.center,self.path)
self.isAbove = set()
self.isBelow = set()
self.sort_index = None # flattened sort index (unique numerical)
self.psort_index = None # partial sort index (alphanumeric string)
self.wasClosed = None # records the closure before removing intersections (shouldn't ever differ from isClosed())
self.svgname = None
self.nL2bdry_a = None # Path(Line(curve_pt,bdry_pt))
self.nL2bdry_b = None # Path(Line(curve_pt,bdry_pt))
self.pathAroundBdry = None # closed path given by path+l2b1+bdry_path+l2b0
@property
def path(self):
return self._path
@path.setter
def path(self, new_path):
self._path = new_path
self.minR = minRadius(self.center, self._path)
self.maxR = maxRadius(self.center, self._path)
self.string = path2str(new_path)
def findLines2Bdry(self,bdry_rectangle):
((T_a,S2_a),(T_b,S2_b)) = find_intersection_of_rectangle_with_paths_outward_pointing_normal_line_bundle(self.path,bdry_rectangle)
self.nL2bdry_a = Path(Line(self.path.point(T_a),bdry_rectangle.point(S2_a)))
self.nL2bdry_b = Path(Line(self.path.point(T_b),bdry_rectangle.point(S2_b)))
def path_around_bdry(self,bdry_path):
if not self.pathAroundBdry:
raise Exception("The following normalLines don't work... the don't even attach to endpoints")
nL0,seg0,t0 = normalLineAtT_toInner_intersects_withOuter(self.nL2bdry_a,self.path,bdry_path,self.center)
nL1,seg1,t1 = normalLineAtT_toInner_intersects_withOuter(self.nL2bdry_b,self.path,bdry_path,self.center)
l2b0 = Path(reverseSeg(nL0))
l2b1 = Path(nL1)
T0 = segt2PathT(bdry_path,seg0,t0)
T1 = segt2PathT(bdry_path,seg1,t1)
inters = lineXlineIntersections(nL0,nL1)
if not inters:
bdry_part = reversePath(cropPath(bdry_path,T0,T1))
elif len(inters)==1:
bdry_part = reversePath(cropPath(bdry_path,T1,T0))
else:
raise Exception("This case should never be reached.")
self.pathAroundBdry = concatPaths([self.path,l2b1,bdry_part,l2b0])
return self.pathAroundBdry
def record_wasClosed(self):
self.wasClosed = self.isApproxClosedRing()
def isApproxClosedRing(self):
return abs(self.path[-1].end - self.path[0].start) < opt.tol_isApproxClosedPath
def isClosed(self):
return self.path[-1].end == self.path[0].start
def endpt(self):
return self.path[-1].end
def startpt(self):
return self.path[0].start
def fixClosure(self):
#Remove degenerate segments
for i,seg in enumerate(self.path):
if seg.start==seg.end:
del self.path[i]
#Close the ring
if (abs(self.endpt() - self.startpt()) < opt.tol_isApproxClosedPath and
self.path.length() > opt.appropriate_ring_length_minimum):
self.path[-1].end = self.path[0].start
# def __repr__(self):
# return '<Ring object of color = %s, Brook_tag = %s, minR = %s, maxR = %s>' %(self.color,self.brooke_tag,self.minR, self.maxR)
def updatePath(self, new_path):
if self.path_before_removing_intersections is None:
self.path_before_removing_intersections = self.path
self.path = new_path
def __eq__(self, other):
if not isinstance(other, Ring):
return NotImplemented
if self.path != other.path or self.string!=other.string:
return False
return True
def __ne__(self, other):
if not isinstance(other, Ring):
return NotImplemented
return not self == other
def point(self, pos):
return self.path.point(pos)
def parseCCW(self):
orig_path = parse_path(self.string)
#fix degenerate segments here
for i,seg in enumerate(orig_path):
if abs(seg.start-seg.end) < 1:
del orig_path[i]
orig_path[i].start = orig_path[i-1].end
if isCCW(orig_path,self.center):
return orig_path
else:
return reversePath(orig_path)
def aveR(self):
# return aveRadius_path(self.path,self.center)
return "Not Implimented"
def area(self):
if not self.isClosed():
raise Exception("Area of ring object can can only be measured with this function if it is a closed (complete) ring. You must make it into an incomplete ring object and give a completed_path.")
return areaEnclosed(self.path)
# def info(self, cp_index):
# ###### "complete ring index, complete?, inner BrookID, outer BrookID, inner color, outer color, area, area Ignoring IRs, averageRadius, minR, maxR, IRs contained"
# return str(cp_index) + "," + "True" + ".... sorry not implimented yet"
class IncompleteRing(object):
def __init__(self, ring):
self.ring = ring
self.innerCR_ring = None
self.outerCR_ring = None
self.completed_path = Path()
self.overlap0 = False #This is related to a deprecated piece of code and must be False.
self.overlap1 = False #This is related to a deprecated piece of code and must be False.
self.corrected_start = None #set in case of overlap (point, seg,t) where seg is a segment in self and seg(t)=point
self.corrected_end = None #set in case of overlap (point, seg,t) where seg is a segment in self and seg(t)=point
self.ir_start = self.ring.point(0)
self.ir_end = self.ring.point(1)
self.up_ladders = []
self.down_ladder0 = None #(irORcr0,T0) ~ startpoint down-ladder on this ir and (and T-val on connecting ring it connects at - irORcr0 can be incompleteRing object or completeRing object)
self.down_ladder1 = None
self.transect0fails = [] #records IRs are "below" self, but failed to provide a transect to self.ir_start
self.transect1fails = [] #records IRs are "below" self, but failed to provide a transect to self.ir_end
self.transect0found = False
self.transect1found = False
self.isCore = False
self.ORring = self.ring
# def __repr__(self):
# return '<IncompleteRing based on ring = %s>' %self.ring
def __eq__(self, other):
if not isinstance(other, IncompleteRing):
return NotImplemented
if self.ring != other.ring:
return False
return True
def __ne__(self, other):
if not isinstance(other, CompleteRing):
return NotImplemented
return not self == other
def set_inner(self, ring):
self.innerCR_ring = ring
def set_outer(self, ring):
self.outerCR_ring = ring
def sortUpLadders(self):
self.up_ladders = sortby(self.up_ladders,1)
self.up_ladders.reverse()
# this as my newer cleaned up version, but I broke it i think (7-19-16)
# def addSegsToCP(self, segs, tol_closure=opt.tol_isApproxClosedPath):
# """input a list of segments to append to self.completed_path
# this function will stop adding segs if a seg endpoint is near the
# completed_path startpoint"""
# if len(segs)==0:
# raise Exception("No segs given to insert")
#
# # Iterate through segs to check if segments join together nicely
# # and (fix them if need be and) append them to completed_path
# for seg in segs:
# # This block checks if cp is (nearly) closed.
# # If so, closes it with a Line, and returns the fcn
# if len(self.completed_path)!=0:
# cp_start, cp_end = self.completed_path[0].start, self.completed_path[-1].end
# if abs(cp_start - cp_end) < tol_closure:
# if cp_start==cp_end:
# # then do nothing else and return
# return
# else:
# # then close completed_path with a line and return
# self.completed_path.append(Line(cp_start, cp_end))
# return
#
# elif seg.start != self.completed_path[-1].end:
# # then seg does not start at the end of completed_path,
# # fix it then add it on
# current_endpoint = self.completed_path[-1].end
# if abs(seg.start - current_endpoint) < tol_closure:
# # then seg is slightly off from current end of
# # completed_path, fix seg and insert it into
# # completed_path
# if isinstance(seg, CubicBezier):
# P0, P1, P2, P3 = seg.bpoints()
# newseg = CubicBezier(current_endpoint, P1, P2, P3)
# elif isinstance(seg, Line):
# newseg = Line(current_endpoint, seg.end)
# else:
# raise Exception('Path segment is neither Line '
# 'object nor CubicBezier object.')
# self.completed_path.insert(len(self.completed_path), newseg)
# else:
# raise Exception("Segment being added to path does not "
# "start at path endpoint.")
# else:
# # then seg does not need to be fixed, so go ahead and insert it
# self.completed_path.insert(len(self.completed_path), seg)
def addSegsToCP(self, segs, tol_closure=opt.tol_isApproxClosedPath):
#input a list of segments to append to self.completed_path
#this function will stop adding segs if a seg endpoint is near the completed_path startpoint
if len(segs)==0:
raise Exception("No segs given to insert")
#Iterate through segs to check if segments join together nicely
#and (fix them if need be and) append them to completed_path
for seg in segs:
#This block checks if cp is (nearly) closed.
#If so, closes it with a Line, and returns the fcn
if len(self.completed_path)!=0:
cp_start, cp_end = self.completed_path[0].start, self.completed_path[-1].end
if abs(cp_start - cp_end) < tol_closure:
if cp_start==cp_end:
#then do nothing else and return
return
else:
#then close completed_path with a line and return
self.completed_path.append(Line(cp_start,cp_end))
return
if len(self.completed_path)!=0 and seg.start != self.completed_path[-1].end:
#then seg does not start at the end of completed_path, fix it then add it on
current_endpoint = self.completed_path[-1].end
if abs(seg.start - current_endpoint) < tol_closure:
#then seg is slightly off from current end of completed_path, fix seg and insert it into completed_path
if isinstance(seg,CubicBezier):
P0,P1,P2,P3 = cubPoints(seg)
newseg = CubicBezier(current_endpoint,P1,P2,P3)
elif isinstance(seg,Line):
newseg = Line(current_endpoint,seg.end)
else:
raise Exception('Path segment is neither Line object nor CubicBezier object.')
self.completed_path.insert(len(self.completed_path),newseg)
else:
raise Exception("Segment being added to path does not start at path endpoint.")
else:
#then seg does not need to be fixed, so go ahead and insert it
self.completed_path.insert(len(self.completed_path),seg)
def addConnectingPathToCP(self, connecting_path, seg0, t0, seg1, t1):
# first find orientation by checking whether t0 is closer to start or end.
T0, T1 = segt2PathT(connecting_path, seg0, t0), segt2PathT(connecting_path, seg1, t1)
i0, i1 = connecting_path.index(seg0), connecting_path.index(seg1)
first_seg = reverseSeg(trimSeg(seg1, 0, t1))
last_seg = reverseSeg(trimSeg(seg0, t0, 1))
if T0 > T1: # discontinuity between intersection points
if isApproxClosedPath(connecting_path):
middle_segs = [reverseSeg(connecting_path[i1-i]) for i in range(1, (i1-i0) % len(connecting_path))]
else:
raise Exception("ir jumps another ir's gap. This case is not "
"implimented yet")
elif T0 < T1: # discontinuity NOT between intersection points
middle_segs = [reverseSeg(connecting_path[i1+i0-i]) for i in range(i0 + 1, i1)]
else:
raise Exception("T0=T1, this means there's a bug in either "
"pathXpathIntersections fcn or "
"trimAndAddTransectsBeforeCompletion fcn")
# first seg
if isDegenerateSegment(first_seg):
tmpSeg = copyobject(middle_segs.pop(0))
tmpSeg.start = first_seg.start
first_seg = tmpSeg
if first_seg.end == self.completed_path[0].start:
self.completed_path.insert(0,first_seg)
else:
printPath(first_seg)
printPath(last_seg)
printPath(connecting_path)
raise Exception("first_seg is set up wrongly")
# middle segs
self.addSegsToCP(middle_segs)
# last seg
if isDegenerateSegment(last_seg):
middle_segs[-1].end = last_seg.end
else:
self.addSegsToCP([last_seg])
def trimAndAddTransectsBeforeCompletion(self):
# Retrieve transect endpoints if necessary
(irORcr0, T0), (irORcr1, T1) = self.down_ladder0, self.down_ladder1
tr0_start_pt = irORcr0.ORring.point(T0)
tr1_end_pt = irORcr1.ORring.point(T1)
if not self.overlap0:
# then no overlap at start, add transect0 to beginning of
# connected path (before the ir itself)
i0 = -1
startSeg = Line(tr0_start_pt, self.ir_start)
else:
# overlap at start, trim the first seg in the ir (don't connect
# anything, just trim)
i0 = self.ring.path.index(self.corrected_start[1])
startSeg = trimSeg(self.corrected_start[1], self.corrected_start[2],1)
if not self.overlap1:
# then no overlap at end to add transect1 to connected path
# (append to end of the ir)
i1 = len(self.ring.path)
endSeg = Line(self.ir_end, tr1_end_pt)
else:
# overlap at end, trim the last seg in the ir (don't connect
# anything, just trim)
i1 = self.ring.path.index(self.corrected_end[1])
endSeg = trimSeg(self.corrected_end[1], 0, self.corrected_end[2])
# first seg
if isDegenerateSegment(startSeg):
tmpSeg = copyobject(self.ring.path[i0 + 1])
tmpSeg.start = startSeg.start
startSeg = tmpSeg
i0 += 1
self.addSegsToCP([startSeg])
else:
self.addSegsToCP([startSeg])
# middle segs
if i0 + 1 != i1:
self.addSegsToCP([self.ring.path[i] for i in range(i0+1, i1)])
# last seg
if isDegenerateSegment(endSeg):
self.completed_path[-1].end = endSeg.end
else:
self.addSegsToCP([endSeg])
def irpoint(self, pos):
return self.ring.point(pos)
def area(self):
if not isinstance(self.completed_path,Path):
return "Fail"
if (self.completed_path is None or not isApproxClosedPath(self.completed_path)):
# raise Exception("completed_path not complete. Distance between start and end: %s"%abs(self.completed_path.point(0) - self.completed_path.point(1)))
return "Fail"
return areaEnclosed(self.completed_path)
def type(self, colordict):
for (key, val) in colordict.items():
if self.ring.color == val:
return key
else:
raise Exception("Incomplete Ring color not in colordict... you shouldn't have gotten this far. Bug detected.")
# def info(self,cp_index):
# ###### "complete ring index, complete?, inner BrookID, outer BrookID, inner color, outer color, area, area Ignoring IRs, averageRadius, minR, maxR, IRs contained"
# return str(cp_index) + "," + "Incomplete"+"," + "N/A" + ", " + self.ring.brook_tag + "," + "N/A" + ", " + self.ring.color +"," + str(self.area()) +", "+ "N/A"+","+str(self.ring.aveR())+","+str(self.ring.minR)+","+str(self.ring.maxR)+","+"N/A"
def info(self, cp_index, colordict):
###### "complete ring index, type, # of IRs contained, minR, maxR, aveR, area, area Ignoring IRs"
return str(cp_index)+","+self.type(colordict)+","+"N/A"+","+str(self.ring.minR)+","+ str(self.ring.maxR)+","+str(self.ring.aveR())+","+str(self.area())+","+"N/A"
def followPathBackwards2LadderAndUpDown(self, irORcr, T0):
"""irORcr is the path being followed, self is the IR to be completed
returns (traveled_path,irORcr_new,t_new) made from the part of irORcr's
path before T0 (and after ladder) plus the line from ladder (the first
one that is encountered)"""
rds = remove_degenerate_segments
irORcr_path = irORcr.ORring.path
thetaprekey = Theta_Tstar(T0)
thetakey = lambda lad: thetaprekey.distfcn(lad[1])
sorted_upLadders = sorted(irORcr.up_ladders, key=thetakey)
if isinstance(irORcr, CompleteRing):
ir_new, T = sorted_upLadders[0]
if T != T0:
reversed_path_followed = reversePath(cropPath(irORcr_path, T, T0))
else: # this happens when up and down ladder are at same location
reversed_path_followed = Path()
# add the ladder to reversed_path_followed
if (irORcr, T) == ir_new.down_ladder0:
if not ir_new.overlap0:
ladder = Line(irORcr_path.point(T), ir_new.irpoint(0))
reversed_path_followed.append(ladder)
T_ir_new = 0
else:
T_ir_new = segt2PathT(ir_new.ring.path,
ir_new.corrected_start[1],
ir_new.corrected_start[2])
elif (irORcr, T) == ir_new.down_ladder1:
if not ir_new.overlap1:
ladder = Line(irORcr_path.point(T), ir_new.irpoint(1))
reversed_path_followed.append(ladder)
T_ir_new = 1
else:
T_ir_new = segt2PathT(ir_new.ring.path,
ir_new.corrected_end[1],
ir_new.corrected_end[2])
else:
raise Exception("this case shouldn't be reached, mistake in "
"logic or didn't set downladder somewhere.")
return rds(reversed_path_followed), ir_new, T_ir_new
else: # current ring to follow to ladder is incomplete ring
irORcr_path = irORcr.ring.path
for ir_new, T in sorted_upLadders:
if T < T0: # Note: always following path backwards
reversed_path_followed = irORcr_path.cropped(T, T0).reversed()
if (irORcr, T) == ir_new.down_ladder0:
if not ir_new.overlap0:
ladder = Line(irORcr_path.point(T), ir_new.irpoint(0))
reversed_path_followed.append(ladder)
T_ir_new = 0
else:
T_ir_new = segt2PathT(ir_new.ring.path,
ir_new.corrected_start[1],
ir_new.corrected_start[2])
elif (irORcr, T) == ir_new.down_ladder1:
if not ir_new.overlap1:
ladder = Line(irORcr_path.point(T), ir_new.irpoint(1))
reversed_path_followed.append(ladder)
T_ir_new = 1
else:
T_ir_new = segt2PathT(ir_new.ring.path,
ir_new.corrected_end[1],
ir_new.corrected_end[2])
else:
tmp_mes = ("this case shouldn't be reached, mistake "
"in logic or didn't set downladder "
"somewhere.")
raise Exception(tmp_mes)
return rds(reversed_path_followed), ir_new, T_ir_new
# none of the upladder were between 0 and T0,
# so use downladder at 0
else:
(irORcr_new, T_new) = irORcr.down_ladder0
irORcr_new_path = irORcr_new.ORring.path
###Should T0==0 ever?
if T0 != 0:
reversed_path_followed = irORcr.ring.path.cropped(0, T0).reversed()
else:
reversed_path_followed = Path()
if irORcr.overlap0 == False:
ladder = Line(irORcr_path.point(0), irORcr_new_path.point(T_new))
reversed_path_followed.append(ladder)
return rds(reversed_path_followed), irORcr_new, T_new
def findMiddleOfConnectingPath(self):
#creates path starting of end of down_ladder1, go to nearest (with t> t0) up-ladder or if no up-ladder then down_ladder at end and repeat until getting to bottom of down_ladder0
maxIts = 1000 #### Tolerance
traveled_path = Path()
iters = 0
(irORcr_new,T_new) = self.down_ladder1
doneyet = False
while iters < maxIts and not doneyet:
iters =iters+ 1
# ##DEBUG sd;fjadsfljkjl;
# if self.ring.path[0].start == (260.778+153.954j):
# from misc4rings import dis
# from svgpathtools import Line
# p2d=[self.completed_path,
# self.down_ladder0[0].ORring.path,
# self.down_ladder1[0].ORring.path]
# clrs = ['blue','green','red']
# if iters>1:
# p2d.append(Path(*traveled_path))
# clrs.append('black')
# lad0a = self.down_ladder0[0].ORring.path.point(self.down_ladder0[1])
# lad0b = self.ORring.path[0].start
# lad0 = Line(lad0a,lad0b)
# lad1a = self.ORring.path[-1].end
# lad1b = self.down_ladder1[0].ORring.path.point(self.down_ladder1[1])
# lad1 = Line(lad1a,lad1b)
# dis(p2d,clrs,lines=[lad0,lad1])
# print abs(lad0.start-lad0.end)
# print abs(lad1.start-lad1.end)
# bla=1
# ##end of DEBUG sd;fjadsfljkjl;
traveled_path_part, irORcr_new, T_new = self.followPathBackwards2LadderAndUpDown(irORcr_new, T_new)
for seg in traveled_path_part:
traveled_path.append(seg)
if irORcr_new == self:
return traveled_path
# if irORcr_new == self.down_ladder0[0]:
# doneyet = True
# irORcr_new_path = irORcr_new.ORring.path
# if T_new != self.down_ladder0[1]:
# for seg in reversePath(cropPath(irORcr_new_path,self.down_ladder0[1],T_new)):
# traveled_path.append(seg)
# break
if (irORcr_new, T_new) == self.down_ladder0:
return traveled_path
if iters >= maxIts-1:
raise Exception("findMiddleOfConnectingPath reached maxIts")
return traveled_path
def hardComplete(self, tol_closure=opt.tol_isApproxClosedPath):
self.trimAndAddTransectsBeforeCompletion()
meatOf_connecting_path = self.findMiddleOfConnectingPath() ###this is a Path object
self.addSegsToCP(meatOf_connecting_path)
cp_start,cp_end = self.completed_path[0].start, self.completed_path[-1].end
#check newly finished connecting_path is closed
if abs(cp_start - cp_end) >= tol_closure:
raise Exception("Connecting path should be finished but is not closed.")
#test for weird .point() bug where .point(1) != end
if (abs(cp_start - self.completed_path.point(0)) >= tol_closure or
abs(cp_end - self.completed_path.point(1)) >= tol_closure):
self.completed_path = parse_path(path2str(self.completed_path))
raise Exception("weird .point() bug where .point(1) != end... I just added this check in on 3-5-15, so maybe if this happens it doesn't even matter. Try removing this code-block... or change svgpathtools.Path.point() method to return one or the other.")
def findTransect2endpointFromInnerPath_normal(self,irORcr_innerPath,innerPath,T_range,Tpf,endBin):
#Tpf: If The T0 transect intersects self and the T1 does not, then Tpf should be True, otherwise it should be false.
#Note: If this endpoint's transect is already found, then this function returns (False,False,False,False)
#Returns: (irORcr,nL,seg_irORcr,t_irORcr) where irORcr is the inner path that the transect, nL, leaves from and seg_irORcr and t_irORcr correspond to innerPath and nL points from seg_irORcr.point(t_irORcr) to the desired endpoint
#Note: irORcr will differ from irORcr_innerPath in the case where irORcr_innerPath admits a transect but this transect intersects with a (less-inner) previously failed path. The less-inner path is then output.
(T0,T1) = T_range
if T1<T0:
if T1==0:
T1=1
else:
Exception("I don't think T_range should ever have T0>T1. Check over findTransect2endpointsFromInnerPath_normal to see if this is acceptable.")
if endBin == 0 and self.transect0found:
return False, False, False, False
elif endBin == 1 and self.transect1found:
return False, False, False, False
elif endBin not in {0,1}:
raise Exception("endBin not a binary - so there is a typo somewhere when calling this fcn")
if irORcr_innerPath.isCore:
return irORcr_innerPath, Line(irORcr_innerPath.inner.path.point(0), self.irpoint(endBin)), irORcr_innerPath.inner.path[0], 0 # make transect from center (actually path.point(0)) to the endpoint
maxIts = 100 ##### tolerance
its = 0
while (abs(innerPath.point(T0) - innerPath.point(T1)) >= opt.tol_isApproxClosedPath
and its <= maxIts):
its += 1
T = float((T0+T1))/2
center = self.ring.center
nL = normalLineAtT_toInner_intersects_withOuter(T,innerPath,self.ring.path,center)[0] #check if transect from innerPath.point(T) intersects with outerPath
if Tpf: #p x f
if nL != False: #p p f
T0 = T
else: #p f f
T1 = T
else: #f x p
if nL != False: #f p p
T1 = T
else: #f f p
T0 = T
# ###DEBUG asdfkljhjdkdjjjdkkk
# if self.ORring.point(0)==(296.238+285.506j):
# from misc4rings import dis
# print "endBin=%s\nT0=%s\nT=%s\nT1=%s\n"%(endBin,T0,T,T1)
# if isNear(innerPath.point(T0),innerPath.point(T1)):
# print "Exit Criterion Met!!!"
# if nL==False:
# nLtmp = normalLineAtT_toInner_intersects_withOuter(T,innerPath,self.ring.path,center,'debug')[0]
# else:
# nLtmp = nL
# dis([innerPath,self.ring.path,Path(nLtmp)],['green','red','blue'],nodes=[center,innerPath.point(T0),innerPath.point(T1)],node_colors=['blue','green','red'])
# bla=1
# ###end of DEBUG asdfkljhjdkdjjjdkkk
if its>=maxIts:
raise Exception("while loop for finding transect by bisection reached maxIts without terminating")
if nL != False: #x p x
t_inner, seg_inner = pathT2tseg(innerPath, T)
else: #x f x
if Tpf: #p f f
nL = normalLineAtT_toInner_intersects_withOuter(T0,innerPath,self.ring.path,center)[0]
(t_inner,seg_inner) = pathT2tseg(innerPath,T0)
else: #f f p
nL = normalLineAtT_toInner_intersects_withOuter(T1,innerPath,self.ring.path,center)[0]
(t_inner,seg_inner) = pathT2tseg(innerPath,T1)
transect_info = (irORcr_innerPath,nL,seg_inner,t_inner)
###Important Note: check that transect does not go through any other rings while headed to its target endpoint (Andy has note explaining this "7th case")
###If this intersection does happen... just cut off the line at the intersection point - this leads to transect not being normal to the ring it emanates from.
if endBin == 0:
failed_IRs_2check = self.transect0fails
else:
failed_IRs_2check = self.transect1fails
keyfcn = lambda x: x.ORring.sort_index
failed_IRs_2check = sorted(failed_IRs_2check,key=keyfcn)
tr_line = transect_info[1]
exclusions = [] #used to check the line from closest_pt to endpoint doesn't intersect
num_passed = 0
run_again = True
while run_again:
run_again = False
for idx,fIR in enumerate(failed_IRs_2check):
num_passed +=1
if idx in exclusions:
continue
intersectionList = pathXpathIntersections(Path(tr_line),fIR.ring.path)
if len(intersectionList) == 0:
continue
else:
if len(intersectionList) > 1:
print("Warning: Transect-FailedPath intersection check returned multiple intersections. This is possible, but should be very rare.")
# (seg_tl, seg_fIR, t_tl, t_fIR) = intersectionList[0]
t_fIR,seg_fIR = closestPointInPath(self.ORring.point(endBin),fIR.ring.path)[1:3]
fIR_closest_pt = seg_fIR.point(t_fIR)
if endBin:
new_nonNormal_transect = Line(self.ring.path[-1].end,fIR_closest_pt)
else:
new_nonNormal_transect = Line(fIR_closest_pt,self.ring.path[0].start)
transect_info = (fIR,new_nonNormal_transect,seg_fIR,t_fIR)
exclusions += range(idx+1)
run_again = True
break
return transect_info
def findTransects2endpointsFromInnerPath_normal(self,irORcr_innerPath,innerPath):
"""Finds transects to both endpoints (not just that specified by
endBin - see outdated description below)
Note: This fcn will attempt to find transects for endpoints where the
transects have not been found and will return (False, False, False)
for those that have.
Returns: (irORcr,nL,seg_irORcr,t_irORcr) where irORcr is the inner
path that the transect, nL, leaves from and seg_irORcr and t_irORcr
correspond to innerPath and nL points from seg_irORcr.point(t_irORcr)
to the desired endpoint.
Note: irORcr will differ from irORcr_innerPath in the case where
irORcr_innerPath admits a transect but this transect intersects with a
(less-inner) previously failed path. The less-inner path is then
output."""
#Outdated Instructions
#This fcn is meant to find the transect line from (and normal to) the
# inner path that goes through OuterPt. It does this using the
# bisection method.
#INPUT: innerPath and outerPath are Path objects, center is a point
# representing the core, endBin specifies which end point in outerPath
# we hope to find the transect headed towards (so must be a 0 or a 1)
#OUTPUT: Returns (transect_Line,inner_seg,inner_t) where normal_line
# is the transverse Line object normal to innerPath intersecting
# outerPath at outerPt or, if such a line does not exist, returns
# (False,False,False,False)
# inner_seg is the segment of innerPath that this normal transect line
# begins at, s.t. seg.point(inner_t) = transect_Line.point(0)
outerPath = self.ring.path
center = self.ring.center
tol_numberDetectionLines_transectLine_normal = 20 ##### tolerance
N = tol_numberDetectionLines_transectLine_normal
# if self.transect0found and not self.transect1found:
# return (False,False,False,False), self.findTransect2endpointFromInnerPath_normal(innerPath)
# if not self.transect0found and self.transect1found:
# return self.findTransect2endpoint0FromInnerPath_normal(innerPath), (False,False,False,False)
# if self.transect0found and self.transect1found:
# raise Exception("Both transects already found... this is a bug.")
# For a visual explanation of the following code block and the six
# cases, see Andy's "Gap Analysis of Transects to Endpoints"
# check if transect from innerPath.point(0) intersect with outerPath
nL_from0, seg_from0, t_from0 = normalLineAtT_toInner_intersects_withOuter(0, innerPath, outerPath, center)
if isApproxClosedPath(innerPath):
(nL_from1,seg_from1,t_from1) = (nL_from0,seg_from0,t_from0)
else:
#check if transect from innerPath.point(1) intersect with outerPath
nL_from1, seg_from1, t_from1 = normalLineAtT_toInner_intersects_withOuter(1, innerPath, outerPath, center)
#Case: TF
if nL_from0 and not nL_from1:
return (False,False,False,False), self.findTransect2endpointFromInnerPath_normal(irORcr_innerPath,innerPath,(0,1),True,1)
#Case: FT
if (not nL_from0) and nL_from1:
return self.findTransect2endpointFromInnerPath_normal(irORcr_innerPath,innerPath,(0,1),False,0), (False,False,False,False)
# determine All, None, or Some (see notes in notebook on this agorithm
# for explanation)
max_pass_Tk = 0
min_pass_Tk = 1
max_fail_Tk = 0
min_fail_Tk = 1
somePass = False
someFail = False
dT = float(1)/(N-1)
for k in range(1,N-1):
Tk = k*dT
nLk, outer_segk, outer_tk = normalLineAtT_toInner_intersects_withOuter(Tk, innerPath, outerPath, center)
if nLk != False:
somePass = True
if Tk > max_pass_Tk:
max_pass_Tk = Tk
# max_pass = (nLk,outer_segk,outer_tk)
if Tk < min_pass_Tk:
min_pass_Tk = Tk
# min_pass = (nLk,outer_segk,outer_tk)
else:
someFail = True
if Tk > max_fail_Tk:
max_fail_Tk = Tk
# max_fail = (nLk,outer_segk,outer_tk)
if Tk < min_fail_Tk:
min_fail_Tk = Tk
# min_fail = (nLk,outer_segk,outer_tk)
if somePass and someFail:
#Case: TT & only some pass [note: TT & some iff TT & T0>T1]
if nL_from0 and nL_from1:
Trange0 = (max_fail_Tk, (max_fail_Tk + dT)%1)
Tpf0 = False
Trange1 = ((min_fail_Tk - dT)%1, min_fail_Tk)
Tpf1 = True
#Case: FF & only some pass
elif (not nL_from0) and (not nL_from1):
Trange0 = ((min_pass_Tk - dT)%1, min_pass_Tk)
Tpf0 = False
Trange1 = (max_pass_Tk, (max_pass_Tk + dT)%1)
Tpf1 = True
for Ttestindex,T2test in enumerate(Trange0 + Trange1): #debugging only
if T2test>1 or T2test < 0:
print(Ttestindex)
print(T2test)
raise Exception()
args = irORcr_innerPath, innerPath, Trange0, Tpf0, 0
tmp1 = self.findTransect2endpointFromInnerPath_normal(*args)
args = irORcr_innerPath, innerPath, Trange1, Tpf1, 1
tmp2 = self.findTransect2endpointFromInnerPath_normal(*args)
return tmp1, tmp2
#Cases: (TT & all) or (FF & none) [note: TT & all iff TT & T0<T1]
else:
return (False, False, False, False), (False, False, False, False)
class CompleteRing(object): # this must remain fast to initialize
def __init__(self, innerRing, outerRing, *internalRings):
self.inner = innerRing
self.outer = outerRing
self.ir_boolset = boolset(internalRings)
self.up_ladders = []
self.isCore = False
self.ORring = self.inner
def sortUpLadders(self):
self.up_ladders = sortby(self.up_ladders,1)
self.up_ladders.reverse()
def sortIRs(self):
self.ir_boolset = sorted(self.ir_boolset, key = lambda ir: ir.ring.sort_index)
def completeIncompleteRings(self):
"""This fcn takes each ir included in self and makes a closed path to
use for its area computation."""
# make sure ir_boolset is sorted (by sort index found in topological sort)
self.sortIRs()
if len(self.ir_boolset) == 0:
return
# iterate through IRs to complete them one by one, inner-most to outer-most
for i,ir in enumerate(self.ir_boolset):
# try all more-inner IRs (and the inner CR) starting with
# most-outer among them - this is for finding transects.
# Note: #poten[j] = ir_boolset[j-1].ring for j=1,...,i
potential_rings = [self.inner] + [x.ring for x in self.ir_boolset[0:i]]
# Find transects from the endpoints of ir to the next most-Outer
# of the more-inner acceptable rings
# Note: the ir's are sorted, but need to make sure each transect
# is well-defined (i.e. check the potential connecting ir does in
# fact travel "below" that endpoint)
# Note: findTransects fcn used below will return
# (False, False, False, False) for any transects already found
nextRing2Try_index = i # note this is right, not i-1 cause potential rings has self.inner at beginning
while not (ir.transect0found and ir.transect1found) and nextRing2Try_index >= 0:
nextRing2Try = potential_rings[nextRing2Try_index]
if nextRing2Try_index == 0:
irORcr_2Try = self
else:
irORcr_2Try = self.ir_boolset[nextRing2Try_index-1]
tmp = ir.findTransects2endpointsFromInnerPath_normal(irORcr_2Try, nextRing2Try.path)
(irORcr0, tL0, seg0, t0), (irORcr1, tL1, seg1, t1) = tmp
# check if nextRing2Try is has a transect going to the
# startpoint, if so we're done with this endpoint
if tL0 != False:
# ring.path.point(T0) is where the transect, tL, meets
# this ring
T0 = segt2PathT(irORcr0.ORring.path,seg0,t0)
# record up-ladder on the ring the transect connects to
# (and t-val it connects at)
irORcr0.up_ladders.append((ir,T0))
# record startpoint down-ladder on this ir and (and t-val
# on connecting ring it connects at)
ir.down_ladder0 = (irORcr0,T0)
if not ir.transect0found:
ir.transect0found = True
else:
ir.transect0fails.append(irORcr_2Try)
# check if nextRing2Try has a transect going to the endpoint,
# if so we're done with this endpoint
if tL1 != False:
# ring.path.point(T0) is where the transect, tL, meets
# this ring
T1 = segt2PathT(irORcr1.ORring.path, seg1, t1)
# record up-ladder on the ring the transect connects to
# (and t-val it connects at)
irORcr1.up_ladders.append((ir, T1))
# record startpoint down_ladder on this ir and (and t-val
# on connecting ring it connects at)
ir.down_ladder1 = (irORcr1, T1)
if not ir.transect1found:
ir.transect1found = True
else:
ir.transect1fails.append(irORcr_2Try)
# unsuccessful while-loop termination conditions
if (nextRing2Try_index == 0 and
not (ir.transect0found and ir.transect1found)):
printPath(ir.ring.path)
print(i)
colors = ['blue']*(len(self.ir_boolset)+2) + ['red']
paths2disp = ([self.inner.path] +
[x.ring.path for x in self.ir_boolset] +
[self.outer.path] +
[ir.ring.path])
disvg(paths2disp, colors)
raise Exception("Acceptable more-inner ring could not be "
"found.")
else:
nextRing2Try_index -= 1
# Now that all up/down ladders are set in this CR, iterate through IRs
# again and create completed_path for each
for ir in self.ir_boolset:
try:
ir.hardComplete()
except:
from options4rings import colordict
# highlight ir in output SVG containing troubled section
# (see area4rings)
ir.ring.color = colordict['safe1']
raise
# #record this info to use after done with all IRs in this self
# ir_info_list.append((ir,info0,info1))
# ir_info_list.append((ir,ring0,irORcr0,seg0,t0,ring1,irORcr1,seg1,t1))
# #now use this recorded info to complete the incomplete rings
# for ir_item in ir_info_list:
# (ir,info0,info1) = ir_item
# (ring0,irORcr0,seg0,t0),(ring1,irORcr1,seg1,t1) = info0,info1
# #create a new ring made of transect0 -> ir -> trans1-> partial ring from (closest path - make sure to transverse in correct direction)
# if ring0 == ring1:
# # try:
# ir.ezComplete_ir(ring0.path,seg0,t0,seg1,t1)
# # except Exception as e:
# # ir.completed_path = e
# else:
# self.hardComplete_ir(ir,irORcr0,ring0.path,t0,T0,irORcr0,ring1.path,t1, T1)
# ir.completed_path = "There is an incomplete ring whose starting point is closest to a different ring than its ending point is closest to. Handling this case is functionality Andy has yet to add, please let him know."
# raise Exception("There is an incomplete ring whose starting point is closest to a different ring than its ending point is closest to. Handling this case is functionality Andy has yet to add, please let him know.")
def minR(self):
return self.inner.minR
def maxR(self):
return self.outer.maxR
def aveR(self):
return "Not Implemented"
def add(self, value):
self.ir_boolset.booladd(value)
# def __repr__(self):
# return '<CompleteRing containing %s incomplete rings: radius range = [%s,%s]>' %(len(self.ir_boolset),self.minR(),self.maxR())
def __eq__(self, other):#just checks that inner ring is the same
if not isinstance(other, CompleteRing):
return NotImplemented
if self.inner!=other.inner:
return False
return True
def __ne__(self, other):
if not isinstance(other, CompleteRing):
return NotImplemented
return not self == other
def areaIgnoringIRs(self):
return areaEnclosed(self.outer.path) - areaEnclosed(self.inner.path)
def area(self):
for iring in self.ir_boolset:
if (not isinstance(iring.completed_path, Path)) or (iring.area() == "Fail"):
return "Fail"
return self.areaIgnoringIRs() - sum([iring.area() for iring in self.ir_boolset])
def type(self, colordict):
for (key,val) in colordict.items():
if self.inner.color == val:
innerkey = key
if self.outer.color == val:
outerkey = key
try:
return innerkey + "-" + outerkey
except:
raise Exception("inner ring color or outer ring color not in "
"colordict... you shouldn't have gotten this "
"far. In other words, bug detected.")
# def info(self, cp_index):
# ###### "complete ring index, complete?, inner BrookID, outer BrookID, inner color, outer color, area, area Ignoring IRs, averageRadius, minR, maxR, IRs contained"
# return str(cp_index) + "," + "True" + self.inner.brook_tag + ", " + self.outer.brook_tag + "," + self.inner.color + ", " + self.outer.color +"," + str(self.area()) +", "+ str(self.areaIgnoringIRs())+","+str(self.aveR())+","+str(self.minR)+","+str(self.maxR)+","+str(len(self.ir_boolset))
def info(self,cp_index,colordict):
###### "complete ring index, type, # of IRs contained, minR, maxR, aveR, area, area Ignoring IRs"
outputString = str(cp_index)+","+self.type(colordict)+","+str(len(self.ir_boolset))+","+str(self.minR())+","+str(self.maxR())+","+str(self.aveR())+","+str(self.area())+","+str(self.areaIgnoringIRs())
for ir in self.ir_boolset:
outputString += "\n"+ir.info(cp_index,colordict)
return outputString
class CP_BoolSet(boolset):
def cpUpdate(self,new_cp):
for cp in self:
if cp == new_cp:
for ir in new_cp.ir_boolset:
cp.ir_boolset.booladd(ir)
return
self.append(new_cp) | mit | 2,867,992,715,726,004,000 | 48.330972 | 296 | 0.569843 | false |
roadmapper/ansible | lib/ansible/modules/network/nxos/nxos_snmp_traps.py | 18 | 7641 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_traps
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP traps.
description:
- Manages SNMP traps configurations.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- This module works at the group level for traps. If you need to only
enable/disable 1 specific trap within a group, use the M(nxos_command)
module.
- Be aware that you can set a trap only for an enabled feature.
options:
group:
description:
- Case sensitive group.
required: true
choices: ['aaa', 'bfd', 'bgp', 'bridge', 'callhome', 'cfs', 'config',
'eigrp', 'entity', 'feature-control', 'generic', 'hsrp', 'license',
'link', 'lldp', 'mmode', 'ospf', 'pim', 'rf', 'rmon', 'snmp',
'storm-control', 'stpx', 'switchfabric', 'syslog', 'sysmgr', 'system',
'upgrade', 'vtp', 'all']
state:
description:
- Manage the state of the resource.
required: false
default: enabled
choices: ['enabled','disabled']
'''
EXAMPLES = '''
# ensure lldp trap configured
- nxos_snmp_traps:
group: lldp
state: enabled
# ensure lldp trap is not configured
- nxos_snmp_traps:
group: lldp
state: disabled
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: "snmp-server enable traps lldp ;"
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import get_capabilities
from ansible.module_utils.basic import AnsibleModule
def get_platform_id(module):
info = get_capabilities(module).get('device_info', {})
return (info.get('network_os_platform', ''))
def execute_show_command(command, module):
command = {
'command': command,
'output': 'text',
}
return run_commands(module, command)
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_traps(group, module):
body = execute_show_command('show run snmp all', module)[0].split('\n')
resource = {}
feature_list = ['aaa', 'bfd', 'bgp', 'bridge', 'callhome', 'cfs', 'config',
'eigrp', 'entity', 'feature-control', 'generic', 'hsrp',
'license', 'link', 'lldp', 'mmode', 'ospf', 'pim',
'rf', 'rmon', 'snmp', 'storm-control', 'stpx',
'switchfabric', 'syslog', 'sysmgr', 'system', 'upgrade',
'vtp']
if 'all' in group and 'N3K-C35' in get_platform_id(module):
module.warn("Platform does not support bfd traps; bfd ignored for 'group: all' request")
feature_list.remove('bfd')
for each in feature_list:
for line in body:
if each == 'ospf':
# ospf behaves differently when routers are present
if 'snmp-server enable traps ospf' == line:
resource[each] = True
break
else:
if 'enable traps {0}'.format(each) in line:
if 'no ' in line:
resource[each] = False
break
else:
resource[each] = True
for each in feature_list:
if resource.get(each) is None:
# on some platforms, the 'no' cmd does not
# show up and so check if the feature is enabled
body = execute_show_command('show run | inc feature', module)[0]
if 'feature {0}'.format(each) in body:
resource[each] = False
find = resource.get(group, None)
if group == 'all'.lower():
return resource
elif find is not None:
trap_resource = {group: find}
return trap_resource
else:
# if 'find' is None, it means that 'group' is a
# currently disabled feature.
return {}
def get_trap_commands(group, state, existing, module):
commands = []
enabled = False
disabled = False
if group == 'all':
if state == 'disabled':
for feature in existing:
if existing[feature]:
trap_command = 'no snmp-server enable traps {0}'.format(feature)
commands.append(trap_command)
elif state == 'enabled':
for feature in existing:
if existing[feature] is False:
trap_command = 'snmp-server enable traps {0}'.format(feature)
commands.append(trap_command)
else:
if group in existing:
if existing[group]:
enabled = True
else:
disabled = True
if state == 'disabled' and enabled:
commands.append(['no snmp-server enable traps {0}'.format(group)])
elif state == 'enabled' and disabled:
commands.append(['snmp-server enable traps {0}'.format(group)])
else:
module.fail_json(msg='{0} is not a currently '
'enabled feature.'.format(group))
return commands
def main():
argument_spec = dict(
state=dict(choices=['enabled', 'disabled'], default='enabled'),
group=dict(choices=['aaa', 'bfd', 'bgp', 'bridge', 'callhome', 'cfs', 'config',
'eigrp', 'entity', 'feature-control', 'generic', 'hsrp',
'license', 'link', 'lldp', 'mmode', 'ospf', 'pim',
'rf', 'rmon', 'snmp', 'storm-control', 'stpx',
'switchfabric', 'syslog', 'sysmgr', 'system', 'upgrade',
'vtp', 'all'],
required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = {'changed': False, 'commands': [], 'warnings': warnings}
group = module.params['group'].lower()
state = module.params['state']
existing = get_snmp_traps(group, module)
commands = get_trap_commands(group, state, existing, module)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,242,341,957,177,713,000 | 31.514894 | 96 | 0.574663 | false |
somic/paasta | paasta_tools/frameworks/task_store.py | 1 | 8406 | import copy
import json
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from kazoo.client import KazooClient
from kazoo.exceptions import BadVersionError
from kazoo.exceptions import NodeExistsError
from kazoo.exceptions import NoNodeError
from kazoo.protocol.states import ZnodeStat
from paasta_tools.utils import _log
class MesosTaskParametersIsImmutableError(Exception):
pass
_SelfT = TypeVar('_SelfT', bound='MesosTaskParameters')
class MesosTaskParameters(object):
health: Any
mesos_task_state: str
is_draining: bool
is_healthy: bool
offer: Any
resources: Any
def __init__(
self,
health=None,
mesos_task_state=None,
is_draining=None,
is_healthy=None,
offer=None,
resources=None,
):
self.__dict__['health'] = health
self.__dict__['mesos_task_state'] = mesos_task_state
self.__dict__['is_draining'] = is_draining
self.__dict__['is_healthy'] = is_healthy
self.__dict__['offer'] = offer
self.__dict__['resources'] = resources
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "%s(\n %s)" % (type(self).__name__, ',\n '.join(["%s=%r" % kv for kv in self.__dict__.items()]))
def __setattr__(self, name, value):
raise MesosTaskParametersIsImmutableError()
def __delattr__(self, name):
raise MesosTaskParametersIsImmutableError()
def merge(self: _SelfT, **kwargs) -> 'MesosTaskParameters':
"""Return a merged MesosTaskParameters object, where attributes in other take precedence over self."""
new_dict = copy.deepcopy(self.__dict__)
new_dict.update(kwargs)
return MesosTaskParameters(**new_dict)
@classmethod
def deserialize(cls: Type[_SelfT], serialized_params: Union[str, bytes]) -> _SelfT:
return cls(**json.loads(serialized_params))
def serialize(self):
return json.dumps(self.__dict__).encode('utf-8')
class TaskStore(object):
def __init__(self, service_name, instance_name, framework_id, system_paasta_config):
self.service_name = service_name
self.instance_name = instance_name
self.framework_id = framework_id
self.system_paasta_config = system_paasta_config
def get_task(self, task_id: str) -> MesosTaskParameters:
"""Get task data for task_id. If we don't know about task_id, return None"""
raise NotImplementedError()
def get_all_tasks(self) -> Dict[str, MesosTaskParameters]:
"""Returns a dictionary of task_id -> MesosTaskParameters for all known tasks."""
raise NotImplementedError()
def overwrite_task(self, task_id: str, params: MesosTaskParameters) -> None:
raise NotImplementedError()
def add_task_if_doesnt_exist(self, task_id: str, **kwargs) -> None:
"""Add a task if it does not already exist. If it already exists, do nothing."""
if self.get_task(task_id) is not None:
return
else:
self.overwrite_task(task_id, MesosTaskParameters(**kwargs))
def update_task(self, task_id: str, **kwargs) -> MesosTaskParameters:
existing_task = self.get_task(task_id)
if existing_task:
merged_params = existing_task.merge(**kwargs)
else:
merged_params = MesosTaskParameters(**kwargs)
self.overwrite_task(task_id, merged_params)
return merged_params
def garbage_collect_old_tasks(self, max_dead_task_age: float) -> None:
# TODO: call me.
# TODO: implement in base class.
raise NotImplementedError()
def close(self):
pass
class DictTaskStore(TaskStore):
def __init__(self, service_name, instance_name, framework_id, system_paasta_config):
self.tasks: Dict[str, MesosTaskParameters] = {}
super(DictTaskStore, self).__init__(service_name, instance_name, framework_id, system_paasta_config)
def get_task(self, task_id: str) -> MesosTaskParameters:
return self.tasks.get(task_id)
def get_all_tasks(self) -> Dict[str, MesosTaskParameters]:
"""Returns a dictionary of task_id -> MesosTaskParameters for all known tasks."""
return dict(self.tasks)
def overwrite_task(self, task_id: str, params: MesosTaskParameters) -> None:
# serialize/deserialize to make sure the returned values are the same format as ZKTaskStore.
self.tasks[task_id] = MesosTaskParameters.deserialize(params.serialize())
class ZKTaskStore(TaskStore):
def __init__(self, service_name, instance_name, framework_id, system_paasta_config):
super(ZKTaskStore, self).__init__(service_name, instance_name, framework_id, system_paasta_config)
self.zk_hosts = system_paasta_config.get_zk_hosts()
# For some reason, I could not get the code suggested by this SO post to work to ensure_path on the chroot.
# https://stackoverflow.com/a/32785625/25327
# Plus, it just felt dirty to modify instance attributes of a running connection, especially given that
# KazooClient.set_hosts() doesn't allow you to change the chroot. Must be for a good reason.
chroot = 'task_store/%s/%s/%s' % (service_name, instance_name, framework_id)
temp_zk_client = KazooClient(hosts=self.zk_hosts)
temp_zk_client.start()
temp_zk_client.ensure_path(chroot)
temp_zk_client.stop()
temp_zk_client.close()
self.zk_client = KazooClient(hosts='%s/%s' % (self.zk_hosts, chroot))
self.zk_client.start()
self.zk_client.ensure_path('/')
def close(self):
self.zk_client.stop()
self.zk_client.close()
def get_task(self, task_id: str) -> MesosTaskParameters:
params, stat = self._get_task(task_id)
return params
def _get_task(self, task_id: str) -> Tuple[MesosTaskParameters, ZnodeStat]:
"""Like get_task, but also returns the ZnodeStat that self.zk_client.get() returns """
try:
data, stat = self.zk_client.get('/%s' % task_id)
return MesosTaskParameters.deserialize(data), stat
except NoNodeError:
return None, None
except json.decoder.JSONDecodeError:
_log(
service=self.service_name,
instance=self.instance_name,
level='debug',
component='deploy',
line='Warning: found non-json-decodable value in zookeeper for task %s: %s' % (task_id, data),
)
return None, None
def get_all_tasks(self):
all_tasks = {}
for child_path in self.zk_client.get_children('/'):
task_id = self._task_id_from_zk_path(child_path)
params = self.get_task(task_id)
# sometimes there are bogus child ZK nodes. Ignore them.
if params is not None:
all_tasks[task_id] = params
return all_tasks
def update_task(self, task_id: str, **kwargs):
retry = True
while retry:
retry = False
existing_task, stat = self._get_task(task_id)
zk_path = self._zk_path_from_task_id(task_id)
if existing_task:
merged_params = existing_task.merge(**kwargs)
try:
self.zk_client.set(zk_path, merged_params.serialize(), version=stat.version)
except BadVersionError:
retry = True
else:
merged_params = MesosTaskParameters(**kwargs)
try:
self.zk_client.create(zk_path, merged_params.serialize())
except NodeExistsError:
retry = True
return merged_params
def overwrite_task(self, task_id: str, params: MesosTaskParameters, version=-1) -> None:
try:
self.zk_client.set(self._zk_path_from_task_id(task_id), params.serialize(), version=version)
except NoNodeError:
self.zk_client.create(self._zk_path_from_task_id(task_id), params.serialize())
def _zk_path_from_task_id(self, task_id: str) -> str:
return '/%s' % task_id
def _task_id_from_zk_path(self, zk_path: str) -> str:
return zk_path.lstrip('/')
| apache-2.0 | -8,859,340,874,580,179,000 | 35.547826 | 117 | 0.623007 | false |
GregWatson/PyVeri | VeriParser/PreProcess.py | 1 | 12739 | ##################################################
#
# PreProcess - class to perform preprcoessing on a raw source file
#
##################################################
import ParserError, re
from SourceText import SourceText
from VMacro import VMacro
from ParserHelp import *
import sys
''' Preprocess a source verilog file:
- strip comments but preserve lines.
- process `include "filename" commands: insert file into text but
track line numbers back to original file.
- process `define and corresponding use of a `define'd macro.
(Note: macros may invoke other macros, and macros can have params)
- process `undef to undefine a macro (remove it from table of known macros)
See detailed comments at end of file.
'''
class PreProcess(SourceText):
def __init__(self):
super(PreProcess, self).__init__();
self.text = [] # list of source text, no CR at EOL
def preprocess_text(self,debug=0):
''' Preprocess self.text. Return error if one occurs, else 0.
'''
if debug: print "Initial text code is\n", self.text
err_code = self.strip_comments(self.text)
if (err_code): return err_code
if debug:
print "After strip comments code is\n", self.text
err_code = self.preprocess_include_and_define()
if (err_code): return err_code
return 0
@staticmethod
def strip_comments(text):
'''
text: List of verilog text
Text lines must not end in CR (should have been stripped).
Output: error code (0=ok)
Effect: text is modified in-place.
\ '''
NONE = 0
IN_LONG = 1
state = NONE
for line_num, line in enumerate(text):
start = 0
while ( start < len(line) ) :
if state == NONE:
# Look for either // or /*
sc_ix = line.find('//',start) # short comment location
lc_ix = line.find('/*',start) # long comment location
# If we have neither then we are done for this line.
if ( sc_ix == -1 and lc_ix == -1 ) :
text[line_num] = line
break
s_ix = line.find('"',start) # double quote that starts a string.
# see which comes first
sc_first = ( sc_ix >= 0 ) and \
( ( s_ix == -1 or ( s_ix > sc_ix ) ) and
( lc_ix == -1 ) or ( lc_ix > sc_ix ) )
if sc_first:
line = line[0:sc_ix]
text[line_num] = line
break
# now check string ("....")
string_first = ( s_ix >= 0 ) and \
( lc_ix == -1 or ( lc_ix > s_ix ) )
if string_first:
# string starts before any comment. Advance to next "
s_ix = line.find('"',s_ix+1)
if ( s_ix == - 1 ) : # no closing " - error in verilog. ignore it here.
text[line_num] = line
break
# if char before " is \ then the " doesnt count (it's \")
while s_ix != -1 and line[s_ix - 1] == '\\' :
s_ix = line.find('"',s_ix+1)
if ( s_ix == - 1 ) : # no closing " - error in verilog. ignore it here.
text[line_num] = line
break
start = s_ix + 1
if (start >= len(line) ) :
text[line_num] = line
break
continue
# Must be a long comment.
# If the long comment ends this line then we strip it out
# and go round again.
e_ix = line.find('*/',lc_ix+2)
if e_ix >= 0:
line = line[0:lc_ix] + line[e_ix+2:]
start = lc_ix
if ( start >= len(line) ):
text[line_num] = line
break
else: # didnt see end of comment - must run on.
line = line[0:lc_ix]
text[line_num] = line
state = IN_LONG
break
else: # state is IN_LONG - look for first */ that ends the comment.
lc_ix = line.find('*/')
if lc_ix == -1 : # did not see */ in this line - comment continuing
text[line_num] = ''
break
# found */
line = line[lc_ix+2:]
if len(line) == 0:
text[line_num] = ''
state = NONE
if state == IN_LONG:
return ParserError.ERR_UNTERMINATED_COMMENT
else:
return 0
def add_macro(self, macro_text, line_num, filename):
''' macro_text is a text string defined in filename at line line_num.
Create macro object and add it to list of known macros.
Do not interpolate any `macro used in the definition - they are
interpolated when macro is used.
'''
macro = VMacro(macro_text, line_num, filename)
if macro.name in self.macros:
orig_macro = self.macros[macro.name]
print "WARNING: redefined macro '%s' in file %s at line %d." \
% (macro.name, macro.filename, macro.line_num)
print " Prev defined in file %s at line %d." \
% (orig_macro.filename, orig_macro.line_num)
self.macros[macro.name] = macro
if self.debug: print "Added macro:", macro
def undef_macro(self, macro_name, line_num, filename):
''' undef the specified macro name '''
if macro_name not in self.macros:
print "WARNING: Macro not previously defined: `undef macro '%s' in file %s at line %d." \
% (macro_name, filename, line_num)
return
if self.debug: print "Undef'd Macro '%s' in file %s at line %d." \
% (macro_name, filename, line_num)
del self.macros[macro_name]
def do_macro_substitution(self, line, line_num, filename):
''' Do a single macro substitution (if any).
Note: assumes that first backtick is a macro subst - i.e.
if it was a `include or something else then you better
have dealt with it already.
Returns modified line.
'''
tick_pos = line.find('`')
if tick_pos == -1: return line
# get macro name (after the tick)
(err, macro_name) = get_simple_identifier_at_offset(line, tick_pos+1)
if err:
ParserError.report_syntax_err(ParserError.SE_ID_EXPECTED_AFTER_TICK, line_num, filename)
if macro_name not in self.macros:
ParserError.report_syntax_err(ParserError.SE_MACRO_NOT_DEFINED, line_num, filename)
# Must differentiate between macro with args and without.
# A macro with args must be followed by '(' immediately after name.
nxt_pos = tick_pos + len(macro_name) + 1
if nxt_pos >= len(line) or line[nxt_pos] != '(': # simple macro (no args)
line = line[0:tick_pos] + self.macros[macro_name].text + line[nxt_pos:]
else: # macro with args
(err, arg_end_pos, argL) = get_comma_sep_exprs_from_balanced_string(line, nxt_pos)
if err:
ParserError.report_syntax_err(err, line_num, filename)
macro = self.macros[macro_name]
# get the original macro body, replacing formal params with actual args.
(err,new_body) = macro.subst_args_in_body(argL)
if err: ParserError.report_syntax_err(err, line_num, filename)
line = line[0:tick_pos] + new_body + line[arg_end_pos+1:]
return line
def insert_include_file(self, inc_file, index):
''' insert the specified `include file in self.text at the
location specified by index.
Remove the old line (the `include line)
Return err or 0
'''
if (self.debug): print "DBG: Including `include file '%s'" % inc_file
(err, new_text) = self.load_text_from_file_and_strip_CR(inc_file)
if err: return err
err = self.strip_comments(new_text)
if not err: # replace this line with new text into self.text
err = self.insert_source_from_string_array_to_line(new_text, inc_file, index)
if err: return err
self.delete_text_range( first=index+len(new_text) )
if (self.debug):
print "DBG: after including file",inc_file,"text is now:"
self.print_text()
return 0
def preprocess_include_and_define(self):
''' self.text already stripped of comments.
Process `include lines as well as `define macros.
Process macro instantiations (replace them with their definitions).
Process `undef
Modifies self.text in place.
returns 0 or error_num
'''
pat_keywords = re.compile(r'(?:`timescale)')
pat_include = re.compile(r'`include\s*"([^"]+)"')
pat_define = re.compile(r'`define\s+(.+)')
pat_undef = re.compile(r'`undef\s+([a-zA-Z_][\w_$]*)')
text_ix = 0
while text_ix < len(self.text):
line = self.text[text_ix]
line_num = self.original_line_num[text_ix]
filename = self.original_file_list[self.original_file_idx[text_ix]]
while line.find("`") != -1:
# Ignore keywords
match = pat_keywords.search(line)
if match: # it's a keyword other than `include, `define etc.
break
# Look for `include "filename". If found then read the text from that
# file, strip comments, and insert it into self.text, replacing the
# current `include line.
match = pat_include.search(line)
if match: # it's a `include.
inc_file = match.group(1)
err = self.insert_include_file( inc_file, text_ix )
if err: return err
line = self.text[text_ix] # this line has changed. process it again
else:
# Look for `define
match = pat_define.search(line)
if match: # it's a `define, so add the macro
def_text = [ match.group(1) ]
self.text[text_ix] = '' # remove text.
while def_text[-1].endswith('\\'): # macro continues to next line
def_text[-1] = def_text[-1].rstrip('\\')
text_ix += 1
if text_ix >= len(self.text): return ParserError.ERR_UNTERMINATED_MACRO
def_text.append(self.text[text_ix])
self.text[text_ix] = '' # remove text.
macro = ''.join(def_text)
self.add_macro( macro, line_num, filename )
break
else: # look for undef
match = pat_undef.search(line)
if match: # it's a `undef so delete the macro
macro_name = match.group(1)
self.undef_macro(macro_name, line_num, filename )
self.text[text_ix] = '' # remove text.
break
# Not a keyword, so check for macro substitution.
else:
self.text[text_ix] = self.do_macro_substitution(line, line_num, filename)
line = self.text[text_ix] # this line has changed. process it again
text_ix +=1
return 0
'''
Remove all comments (long and short) but preserve resulting blank lines:
Watch out for comment chars starting in strings - ignore them.
Once in a long comment (/* ... */) we ignore everything until we see */
Short comment ( // ) is to end of line.
'''
| mit | -2,614,302,628,154,292,000 | 39.699681 | 105 | 0.488578 | false |
SLAMon/SLAMon | slamon/afm/tests/testing_routes_tests.py | 2 | 5058 | from slamon.afm.routes.testing import testing_routes # Shows as unused but is actually required for routes
from slamon.afm.afm_app import app
from slamon.afm.tests.agent_routes_tests import AFMTest
from webtest import TestApp
import jsonschema
class TestDevRoutes(AFMTest):
task_list_response_schema = {
'type': 'object',
'properties': {
'tasks': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'task_id': {
'type': 'string',
'pattern': '^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'
},
'task_type': {
'type': 'string'
},
'task_version': {
'type': 'integer'
},
'task_data': {
'type': 'string'
}
},
'required': ['task_id', 'task_type', 'task_version']
}
}
},
'required': ['tasks'],
'additionalProperties': False
}
@staticmethod
def test_post_task_non_json():
test_app = TestApp(app)
assert test_app.post('/testing/tasks', expect_errors=True).status_int == 400
assert test_app.post('/testing/tasks/', expect_errors=True).status_int == 400
@staticmethod
def test_post_task_empty():
test_app = TestApp(app)
assert test_app.post_json('/testing/tasks', {}, expect_errors=True).status_int == 400
assert test_app.post_json('/testing/tasks/', {}, expect_errors=True).status_int == 400
@staticmethod
def test_post_task_invalid():
test_app = TestApp(app)
# Invalid type
assert test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 5,
'task_version': 1
}, expect_errors=True).status_int == 400
# Invalid uuid
assert test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e541',
'task_type': 'test-task-1',
'task_version': 1
}, expect_errors=True).status_int == 400
# Invalid version
assert test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'test-task-1',
'task_version': 'test version'
}, expect_errors=True).status_int == 400
@staticmethod
def test_post_task():
test_app = TestApp(app)
test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'wait',
'task_version': 1
})
test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546014',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'test-task-1',
'task_version': 1
})
@staticmethod
def test_post_task_with_data():
test_app = TestApp(app)
test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'wait',
'task_version': 1,
'task_data': {}
})
test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546014',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'test-task-1',
'task_version': 1,
'task_data': {'test': 'value'}
})
@staticmethod
def test_post_task_duplicate():
test_app = TestApp(app)
test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'wait',
'task_version': 1
})
# Try to post 1st task again
assert test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'test-task-1',
'task_version': 1
}, expect_errors=True).status_int == 400
@staticmethod
def test_get_tasks():
test_app = TestApp(app)
test_app.post_json('/testing/tasks', {
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013',
'task_type': 'wait',
'task_version': 1
})
resp = test_app.get('/testing/tasks')
jsonschema.validate(resp.json, TestDevRoutes.task_list_response_schema)
| apache-2.0 | -4,732,729,330,317,374,000 | 34.125 | 118 | 0.507513 | false |
systers/hyperkitty | example_project/wsgi.py | 2 | 1173 | """
WSGI config for HyperKitty project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/wsgi/
"""
import os
# import sys
# import site
# For some unknown reason, sometimes mod_wsgi fails to set the python paths to
# the virtualenv, with the 'python-path' option. You can do it here too.
#
# # Remember original sys.path.
# prev_sys_path = list(sys.path)
# # Add here, for the settings module
# site.addsitedir(os.path.abspath(os.path.dirname(__file__)))
# # Add the virtualenv
# venv = os.path.join(os.path.abspath(os.path.dirname(__file__)),
# '..', 'lib', 'python2.6', 'site-packages')
# site.addsitedir(venv)
# # Reorder sys.path so new directories at the front.
# new_sys_path = []
# for item in list(sys.path):
# if item not in prev_sys_path:
# new_sys_path.append(item)
# sys.path.remove(item)
# sys.path[:0] = new_sys_path
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
| gpl-3.0 | -6,744,704,900,840,194,000 | 29.868421 | 78 | 0.685422 | false |
tswast/google-cloud-python | vision/google/cloud/vision_v1p3beta1/gapic/product_search_client_config.py | 4 | 4551 | config = {
"interfaces": {
"google.cloud.vision.v1p3beta1.ProductSearch": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"CreateProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListProductSets": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateProduct": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListProducts": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetProduct": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateProduct": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteProduct": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateReferenceImage": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteReferenceImage": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListReferenceImages": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetReferenceImage": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"AddProductToProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"RemoveProductFromProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListProductsInProductSet": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ImportProductSets": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| apache-2.0 | 3,453,082,347,446,821,000 | 39.274336 | 67 | 0.3931 | false |
margguo/python-ivi | ivi/agilent/agilent6038A.py | 7 | 1739 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent603xA import *
class agilent6038A(agilent603xA):
"Agilent 6038A IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '6038A')
super(agilent6038A, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P60V': (61.425, 10.2375)
},
'ovp_max': 63.0,
'voltage_max': 61.425,
'current_max': 10.2375
}
]
| mit | -7,735,010,441,181,392,000 | 33.78 | 77 | 0.67165 | false |
number7/budgetminder | GData/Tests/GDataTestHTTPServer.py | 2 | 9502 | #!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple server for testing the Objective-C GData Framework
This http server is for use by GDataServiceTest.m in testing
both authentication and object retrieval.
Requests to the path /accounts/ClientLogin are assumed to be
for login; other requests are for object retrieval
"""
import string
import cgi
import time
import os
import sys
import re
import mimetypes
import socket
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from optparse import OptionParser
class ServerTimeoutException(Exception):
pass
class HTTPTimeoutServer(HTTPServer):
"""HTTP server for testing network requests.
This server will throw an exception if it receives no connections for
several minutes. We use this to ensure that the server will be cleaned
up if something goes wrong during the unit testing.
"""
def get_request(self):
self.socket.settimeout(120.0)
result = None
while result is None:
try:
result = self.socket.accept()
except socket.timeout:
raise ServerTimeoutException
result[0].settimeout(None)
return result
class SimpleServer(BaseHTTPRequestHandler):
"""HTTP request handler for testing GData network requests.
This is an implementation of a request handler for BaseHTTPServer,
specifically designed for GData service code usage.
Normal requests for GET/POST/PUT simply retrieve the file from the
supplied path, starting in the current directory. A cookie called
TestCookie is set by the response header, with the value of the filename
requested.
DELETE requests always succeed.
Appending ?status=n results in a failure with status value n.
Paths ending in .auth have the .auth extension stripped, and must have
an authorization header of "GoogleLogin auth=GoodAuthToken" to succeed.
Paths ending in .authsub have the .authsub extension stripped, and must have
an authorization header of "AuthSub token=GoodAuthSubToken" to succeed.
Successful results have a Last-Modified header set; if that header's value
("thursday") is supplied in a request's "If-Modified-Since" header, the
result is 304 (Not Modified).
Requests to /accounts/ClientLogin will fail if supplied with a body
containing Passwd=bad. If they contain logintoken and logincaptcha values,
those must be logintoken=CapToken&logincaptch=good to succeed.
"""
def do_GET(self):
self.doAllRequests()
def do_POST(self):
self.doAllRequests()
def do_PUT(self):
self.doAllRequests()
def do_DELETE(self):
self.doAllRequests()
def doAllRequests(self):
# This method handles all expected incoming requests
#
# Requests to path /accounts/ClientLogin are assumed to be for signing in
#
# Other paths are for retrieving a local xml file. An .auth appended
# to an xml file path will require authentication (meaning the Authorization
# header must be present with the value "GoogleLogin auth=GoodAuthToken".)
# Delete commands succeed but return no data.
#
# GData override headers are supported.
#
# Any auth password is valid except "bad", which will fail, and "captcha",
# which will fail unless the authentication request's post string includes
# "logintoken=CapToken&logincaptcha=good"
# We will use a readable default result string since it should never show up
# in output
resultString = "default GDataTestServer result\n";
resultStatus = 0
headerType = "text/plain"
postString = ""
modifiedDate = "thursday" # clients should treat dates as opaque, generally
# auth queries and some GData queries include post data
postLength = int(self.headers.getheader("Content-Length", "0"));
if postLength > 0:
postString = self.rfile.read(postLength)
ifModifiedSince = self.headers.getheader("If-Modified-Since", "");
# retrieve the auth header; require it if the file path ends
# with the string ".auth" or ".authsub"
authorization = self.headers.getheader("Authorization", "")
if self.path.endswith(".auth"):
if authorization != "GoogleLogin auth=GoodAuthToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-5] # remove the .auth at the end
if self.path.endswith(".authsub"):
if authorization != "AuthSub token=GoodAuthSubToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-8] # remove the .authsub at the end
overrideHeader = self.headers.getheader("X-HTTP-Method-Override", "")
httpCommand = self.command
if httpCommand == "POST" and len(overrideHeader) > 0:
httpCommand = overrideHeader
try:
if self.path.endswith("/accounts/ClientLogin"):
#
# it's a sign-in attempt; it's good unless the password is "bad" or
# "captcha"
#
# use regular expression to find the password
password = ""
searchResult = re.search("(Passwd=)([^&\n]*)", postString)
if searchResult:
password = searchResult.group(2)
if password == "bad":
resultString = "Error=BadAuthentication\n"
resultStatus = 403
elif password == "captcha":
logintoken = ""
logincaptcha = ""
# use regular expressions to find the captcha token and answer
searchResult = re.search("(logintoken=)([^&\n]*)", postString);
if searchResult:
logintoken = searchResult.group(2)
searchResult = re.search("(logincaptcha=)([^&\n]*)", postString);
if searchResult:
logincaptcha = searchResult.group(2)
# if the captcha token is "CapToken" and the answer is "good"
# then it's a valid sign in
if (logintoken == "CapToken") and (logincaptcha == "good"):
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
else:
# incorrect captcha token or answer provided
resultString = ("Error=CaptchaRequired\nCaptchaToken=CapToken\n"
"CaptchaUrl=CapUrl\n")
resultStatus = 403
else:
# valid username/password
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
elif httpCommand == "DELETE":
#
# it's an object delete; read and return empty data
#
resultString = ""
resultStatus = 200
headerType = "text/plain"
else:
# queries that have something like "?status=456" should fail with the
# status code
searchResult = re.search("(status=)([0-9]+)", self.path)
if searchResult:
status = searchResult.group(2)
self.send_error(int(status),
"Test HTTP server status parameter: %s" % self.path)
return
# if the client gave us back our modified date, then say there's no
# change in the response
if ifModifiedSince == modifiedDate:
self.send_response(304) # Not Modified
return
else:
#
# it's an object fetch; read and return the XML file
#
f = open("." + self.path)
resultString = f.read()
f.close()
resultStatus = 200
fileTypeInfo = mimetypes.guess_type("." + self.path)
headerType = fileTypeInfo[0] # first part of the tuple is mime type
self.send_response(resultStatus)
self.send_header("Content-type", headerType)
self.send_header("Last-Modified", modifiedDate)
# set TestCookie to equal the file name requested
cookieValue = os.path.basename("." + self.path)
self.send_header('Set-Cookie', 'TestCookie=%s' % cookieValue)
self.end_headers()
self.wfile.write(resultString)
except IOError:
self.send_error(404,"File Not Found: %s" % self.path)
def main():
try:
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", help="Port to run server on",
type="int", default="80")
parser.add_option("-r", "--root", dest="root", help="Where to root server",
default=".")
(options, args) = parser.parse_args()
os.chdir(options.root)
server = HTTPTimeoutServer(("127.0.0.1", options.port), SimpleServer)
sys.stdout.write("started GDataTestServer.py...");
sys.stdout.flush();
server.serve_forever()
except KeyboardInterrupt:
print "^C received, shutting down server"
server.socket.close()
except ServerTimeoutException:
print "Too long since the last request, shutting down server"
server.socket.close()
if __name__ == "__main__":
main()
| mit | -7,799,563,558,175,543,000 | 33.933824 | 80 | 0.656072 | false |
jtk54/spinnaker | testing/citest/spinnaker_testing/base_scenario_support.py | 2 | 4469 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for platform-specific support in SpinnakerTestScenario."""
import logging
import threading
class BaseScenarioPlatformSupport(object):
"""Interface for adding a specific platform to SpinnakerTestScenario."""
@property
def platform_name(self):
"""Returns the platform name bound at construction."""
return self.__platform_name
@property
def scenario(self):
"""Returns the scenario instance bound at construction."""
return self.__scenario
@property
def observer(self):
"""Returns the default observer for this platform as configured.
Raises:
This will throw an exception if the observer is not available for
whatever reason. The reason may vary depending on the platform.
"""
with self.__lock:
if self.__observer is None:
logger = logging.getLogger(__name__)
logger.info('Initializing observer for "%s"', self.__platform_name)
try:
self.__observer = self._make_observer()
except:
logger.exception('Failed to create observer for "%s"',
self.__platform_name)
raise
return self.__observer
@classmethod
def init_bindings_builder(cls, scenario_class, builder, defaults):
"""Mediates to the specific methods in this interface.
This is not intended to be overriden further. Instead, override
the remaining methods.
Args:
scenario_class: [class spinnaker_testing.SpinnakerTestScenario]
builder: [citest.base.ConfigBindingsBuilder]
defaults: [dict] Default binding value overrides.
This is used to initialize the default commandline parameters.
"""
cls.add_commandline_parameters(builder, defaults)
@classmethod
def add_commandline_parameters(cls, scenario_class, builder, defaults):
"""Adds commandline arguments to the builder.
Args:
scenario_class: [class spinnaker_testing.SpinnakerTestScenario]
builder: [citest.base.ConfigBindingsBuilder]
defaults: [dict] Default binding value overrides.
This is used to initialize the default commandline parameters.
"""
raise NotImplementedError('{0} not implemented'.format(cls))
def __init__(self, platform_name, scenario):
"""Constructor.
This ensures the local bindings for:
SPINNAKER_<platform>_ACCOUNT
SPINNAKER_<platform>_ENABLED
where <platform> is the platform_name or OS for openstack.
It will use the scenario's deployed configuration if available and needed
so that these variables will correspond to the agent's target.
The default ACCOUNT is the configured primary account.
Args:
platform_name: [string] Identifies which platform this is.
This should be the name used in the Spinnaker "provider".
scenario: [SpinnakerTestScenario] The scenario being supported.
"""
self.__lock = threading.Lock()
self.__observer = None
self.__scenario = scenario
self.__platform_name = platform_name
test_platform_key = platform_name if platform_name != 'openstack' else 'os'
bindings = scenario.bindings
agent = scenario.agent
account_key = 'spinnaker_{0}_account'.format(test_platform_key)
if not bindings.get(account_key):
bindings[account_key] = agent.deployed_config.get(
'providers.{0}.primaryCredentials.name'.format(platform_name))
enabled_key = 'spinnaker_{0}_enabled'.format(test_platform_key)
if bindings.get(enabled_key, None) is None:
bindings[enabled_key] = agent.deployed_config.get(
'providers.{0}.enabled'.format(platform_name))
def _make_observer(self):
"""Hook for specialized classes to instantiate their observer.
This method is called internally as needed when accessing the
observer property.
"""
raise NotImplementedError('{0} not implemented'.format(cls))
| apache-2.0 | 3,706,213,101,113,180,700 | 35.333333 | 79 | 0.703066 | false |
fluentpython/example-code | 03-dict-set/support/container_perftest.py | 7 | 1460 | """
Container ``in`` operator performance test
"""
import sys
import timeit
SETUP = '''
import array
selected = array.array('d')
with open('selected.arr', 'rb') as fp:
selected.fromfile(fp, {size})
if {container_type} is dict:
haystack = dict.fromkeys(selected, 1)
else:
haystack = {container_type}(selected)
if {verbose}:
print(type(haystack), end=' ')
print('haystack: %10d' % len(haystack), end=' ')
needles = array.array('d')
with open('not_selected.arr', 'rb') as fp:
needles.fromfile(fp, 500)
needles.extend(selected[::{size}//500])
if {verbose}:
print(' needles: %10d' % len(needles), end=' ')
'''
TEST = '''
found = 0
for n in needles:
if n in haystack:
found += 1
if {verbose}:
print(' found: %10d' % found)
'''
def test(container_type, verbose):
MAX_EXPONENT = 7
for n in range(3, MAX_EXPONENT + 1):
size = 10**n
setup = SETUP.format(container_type=container_type,
size=size, verbose=verbose)
test = TEST.format(verbose=verbose)
tt = timeit.repeat(stmt=test, setup=setup, repeat=5, number=1)
print('|{:{}d}|{:f}'.format(size, MAX_EXPONENT + 1, min(tt)))
if __name__=='__main__':
if '-v' in sys.argv:
sys.argv.remove('-v')
verbose = True
else:
verbose = False
if len(sys.argv) != 2:
print('Usage: %s <container_type>' % sys.argv[0])
else:
test(sys.argv[1], verbose)
| mit | 2,378,384,408,277,433,000 | 25.545455 | 70 | 0.583562 | false |
hiuwo/acq4 | acq4/analysis/tools/Utility.py | 1 | 37603 | """
Utils.py - general utility routines
- power spectrum
- elliptical filtering
- handling very long input lines for dictionaries
- general measurement routines for traces (mean, std, spikes, etc)
"declassed", 7/28/09 p. manis
Use as:
import Utility as Utils
then call Utils.xxxxx()
"""
# January, 2009
# Paul B. Manis, Ph.D.
# UNC Chapel Hill
# Department of Otolaryngology/Head and Neck Surgery
# Supported by NIH Grants DC000425-22 and DC004551-07 to PBM.
# Copyright Paul Manis, 2009
#
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, re, os
import numpy
import numpy.ma as ma
#import numpy.linalg.lstsq
import scipy.fftpack as spFFT
import scipy.signal as spSignal
from sets import Set
from random import sample
debugFlag = False
def setDebug(debug=False):
if debug:
debugFlag = True
else:
debugFlag = False
def pSpectrum(data=None, samplefreq=44100):
npts = len(data)
# we should window the data here
if npts == 0:
print "? no data in pSpectrum"
return
# pad to the nearest higher power of 2
(a,b) = numpy.frexp(npts)
if a <= 0.5:
b = b = 1
npad = 2**b -npts
if debugFlag:
print "npts: %d npad: %d npad+npts: %d" % (npts, npad, npad+npts)
padw = numpy.append(data, numpy.zeros(npad))
npts = len(padw)
sigfft = spFFT.fft(padw)
nUniquePts = numpy.ceil((npts+1)/2.0)
sigfft = sigfft[0:nUniquePts]
spectrum = abs(sigfft)
spectrum = spectrum / float(npts) # scale by the number of points so that
# the magnitude does not depend on the length
# of the signal or on its sampling frequency
spectrum = spectrum**2 # square it to get the power
spmax = numpy.amax(spectrum)
spectrum = spectrum + 1e-12*spmax
# multiply by two (see technical document for details)
# odd nfft excludes Nyquist point
if npts % 2 > 0: # we've got odd number of points fft
spectrum[1:len(spectrum)] = spectrum[1:len(spectrum)] * 2
else:
spectrum[1:len(spectrum) -1] = spectrum[1:len(spectrum) - 1] * 2 # we've got even number of points fft
freqAzero = numpy.arange(0, nUniquePts, 1.0) * (samplefreq / npts)
return(spectrum, freqAzero)
def sinefit(x, y, F):
""" LMS fit of a sine wave with period T to the data in x and y
aka "cosinor" analysis.
"""
npar = 2
w = 2.0 * numpy.pi * F
A = numpy.zeros((len(x), npar), float)
A[:,0] = numpy.sin(w*x)
A[:,1] = numpy.cos(w*x)
(p, residulas, rank, s) = numpy.linalg.lstsq(A, y)
Amplitude = numpy.sqrt(p[0]**2+p[1]**2)
Phase = numpy.arctan2(p[1],p[0]) # better check this...
# yest=Amplitude*cos(w*x+Phase) # estimated y
#
# f=numpy.sum((yest-numpy.mean(y)).^2)/numpy.sum((y-yest).^2)*(length(y)-3)/2
# P=1-fcdf(f,2,length(y)-3);
return (Amplitude, Phase)
def sinefit_precalc(x, y, A):
""" LMS fit of a sine wave with period T to the data in x and y
aka "cosinor" analysis.
assumes that A (in sinefit) is precalculated
"""
(p, residulas, rank, s) = numpy.linalg.lstsq(A, y)
Amplitude = numpy.sqrt(p[0]**2+p[1]**2)
Phase = numpy.arctan2(p[1],p[0]) # better check this...
# yest=Amplitude*cos(w*x+Phase) # estimated y
#
# f=numpy.sum((yest-numpy.mean(y)).^2)/numpy.sum((y-yest).^2)*(length(y)-3)/2
# P=1-fcdf(f,2,length(y)-3);
return (Amplitude, Phase)
def savitzky_golay(data, kernel = 11, order = 4):
"""
applies a Savitzky-Golay filter
input parameters:
- data => data as a 1D numpy array
- kernel => a positiv integer > 2*order giving the kernel size
- order => order of the polynomal
returns smoothed data as a numpy array
invoke like:
smoothed = savitzky_golay(<rough>, [kernel = value], [order = value]
"""
try:
kernel = abs(int(kernel))
order = abs(int(order))
except ValueError, msg:
raise ValueError("kernel and order have to be of type int (floats will be converted).")
if kernel % 2 != 1 or kernel < 1:
raise TypeError("kernel size must be a positive odd number, was: %d" % kernel)
if kernel < order + 2:
raise TypeError("kernel is to small for the polynomals\nshould be > order + 2")
# a second order polynomal has 3 coefficients
order_range = range(order+1)
half_window = (kernel -1) // 2
b = numpy.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# since we don't want the derivative, else choose [1] or [2], respectively
m = numpy.linalg.pinv(b).A[0]
window_size = len(m)
half_window = (window_size-1) // 2
# precompute the offset values for better performance
offsets = range(-half_window, half_window+1)
offset_data = zip(offsets, m)
smooth_data = list()
# temporary data, with padded zeros (since we want the same length after smoothing)
#data = numpy.concatenate((numpy.zeros(half_window), data, numpy.zeros(half_window)))
# temporary data, with padded first/last values (since we want the same length after smoothing)
firstval=data[0]
lastval=data[len(data)-1]
data = numpy.concatenate((numpy.zeros(half_window)+firstval, data, numpy.zeros(half_window)+lastval))
for i in range(half_window, len(data) - half_window):
value = 0.0
for offset, weight in offset_data:
value += weight * data[i + offset]
smooth_data.append(value)
return numpy.array(smooth_data)
# filter signal with elliptical filter
def SignalFilter(signal, LPF, HPF, samplefreq):
if debugFlag:
print "sfreq: %f LPF: %f HPF: %f" % (samplefreq, LPF, HPF)
flpf = float(LPF)
fhpf = float(HPF)
sf = float(samplefreq)
sf2 = sf/2
wp = [fhpf/sf2, flpf/sf2]
ws = [0.5*fhpf/sf2, 2*flpf/sf2]
if debugFlag:
print "signalfilter: samplef: %f wp: %f, %f ws: %f, %f lpf: %f hpf: %f" % (
sf, wp[0], wp[1], ws[0], ws[1], flpf, fhpf)
filter_b,filter_a=spSignal.iirdesign(wp, ws,
gpass=1.0,
gstop=60.0,
ftype="ellip")
msig = numpy.mean(signal)
signal = signal - msig
w=spSignal.lfilter(filter_b, filter_a, signal) # filter the incoming signal
signal = signal + msig
if debugFlag:
print "sig: %f-%f w: %f-%f" % (numpy.amin(signal), numpy.amax(signal), numpy.amin(w), numpy.amax(w))
return(w)
# filter with Butterworth low pass, using time-causal lfilter
def SignalFilter_LPFButter(signal, LPF, samplefreq, NPole = 8):
flpf = float(LPF)
sf = float(samplefreq)
wn = [flpf/(sf/2.0)]
b, a = spSignal.butter(NPole, wn, btype='low', output='ba')
zi = spSignal.lfilter_zi(b,a)
out, zo = spSignal.lfilter(b, a, signal, zi=zi*signal[0])
return(numpy.array(out))
# filter with Butterworth high pass, using time-causal lfilter
def SignalFilter_HPFButter(signal, HPF, samplefreq, NPole = 8):
flpf = float(HPF)
sf = float(samplefreq)
wn = [flpf/(sf/2.0)]
b, a = spSignal.butter(NPole, wn, btype='high', output='ba')
zi = spSignal.lfilter_zi(b,a)
out, zo = spSignal.lfilter(b, a, signal, zi=zi*signal[0])
return(numpy.array(out))
# filter signal with low-pass Bessel
def SignalFilter_LPFBessel(signal, LPF, samplefreq, NPole = 8, reduce = False):
""" Low pass filter a signal, possibly reducing the number of points in the
data array.
signal: a numpya array of dim = 1, 2 or 3. The "last" dimension is filtered.
LPF: low pass filter frequency, in Hz
samplefreq: sampline frequency (points/second)
NPole: number of poles in the filter.
reduce: Flag that controls whether the resulting data is subsampled or not
"""
if debugFlag:
print "sfreq: %f LPF: %f HPF: %f" % (samplefreq, LPF)
flpf = float(LPF)
sf = float(samplefreq)
wn = [flpf/(sf/2.0)]
reduction = 1
if reduce:
if LPF <= samplefreq/2.0:
reduction = int(samplefreq/LPF)
if debugFlag is True:
print "signalfilter: samplef: %f wn: %f, lpf: %f, NPoles: %d " % (
sf, wn, flpf, NPole)
filter_b,filter_a=spSignal.bessel(
NPole,
wn,
btype = 'low',
output = 'ba')
if signal.ndim == 1:
sm = numpy.mean(signal)
w=spSignal.lfilter(filter_b, filter_a, signal-sm) # filter the incoming signal
w = w + sm
if reduction > 1:
w = spSignal.resample(w, reduction)
return(w)
if signal.ndim == 2:
sh = numpy.shape(signal)
for i in range(0, numpy.shape(signal)[0]):
sm = numpy.mean(signal[i,:])
w1 = spSignal.lfilter(filter_b, filter_a, signal[i,:]-sm)
w1 = w1 + sm
if reduction == 1:
w1 = spSignal.resample(w1, reduction)
if i == 0:
w = numpy.empty((sh[0], numpy.shape(w1)[0]))
w[i,:] = w1
return w
if signal.ndim == 3:
sh = numpy.shape(signal)
for i in range(0, numpy.shape(signal)[0]):
for j in range(0, numpy.shape(signal)[1]):
sm = numpy.mean(signal[i,j,:])
w1 = spSignal.lfilter(filter_b, filter_a, signal[i,j,:]-sm)
w1 = w1 + sm
if reduction == 1:
w1 = spSignal.resample(w1, reduction)
if i == 0 and j == 0:
w = numpy.empty((sh[0], sh[1], numpy.shape(w1)[0]))
w[i,j,:] = w1
return(w)
if signal.ndim > 3:
print "Error: signal dimesions of > 3 are not supported (no filtering applied)"
return signal
# do an eval on a long line (longer than 512 characters)
# assumes input is a dictionary (as a string) that is too long
# parses by breaking the string down and then reconstructing each element
#
def long_Eval(line):
inpunct = False
sp = ''
u={}
i = 0
inpunct = 0
colonFound = False
inquote = False
for c in line:
if c is '{':
continue
if (c is ',' or c is '}') and colonFound and not inpunct and not inquote: # separator is ','
r = eval('{%s}' % sp)
u[r.keys()[0]] = r[r.keys()[0]]
colonFound = False
sp = ''
continue
sp = sp + c
if c is ':':
colonFound = True
continue
if c is '(' or c is '[' :
inpunct += 1
continue
if c is ')' or c is ']':
inpunct -= 1
continue
if c is "'" and inquote:
inquote = False
continue
if c is "'" and not inquote:
inquote is True
return u
# long_Eval()
#
# routine to flatten an array/list.
#
def flatten(l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
# flatten()
def unique(seq, keepstr=True):
t = type(seq)
if t in (str, unicode):
t = (list, ''.join)[bool(keepstr)]
seen = []
return t(c for c in seq if not (c in seen or seen.append(c)))
######################
# Frequently used analysis routines
######################
def _rollingSum(data, n):
d1 = data.copy()
d1[1:] += d1[:-1] # integrate
d2 = numpy.empty(len(d1) - n + 1, dtype=data.dtype)
d2[0] = d1[n-1] # copy first point
d2[1:] = d1[n:] - d1[:-n] # subtract the rest
return d2
# routine to find all the local maxima
def local_maxima(data, span=10, sign=1):
from scipy.ndimage import minimum_filter
from scipy.ndimage import maximum_filter
data = numpy.asarray(data)
print 'data size: ', data.shape
if sign <= 0: # look for minima
maxfits = minimum_filter(data, size=span, mode="wrap")
else:
maxfits = maximum_filter(data, size=span, mode="wrap")
print 'maxfits shape: ', maxfits.shape
maxima_mask = numpy.where(data == maxfits)
good_indices = numpy.arange(len(data))[maxima_mask]
print 'len good index: ', len(good_indices)
good_fits = data[maxima_mask]
order = good_fits.argsort()
return good_indices[order], good_fits[order]
def clementsBekkers(data, template, threshold=1.0, minpeakdist=15):
D = data.view(numpy.ndarray)
T = template.view(numpy.ndarray)
N = len(T)
window = numpy.ones(N)
sumT = T.sum()
sumT2 = (T**2).sum()
sumD = _rollingSum(D, N)
sumD2 = _rollingSum(D**2, N)
sumTD = numpy.correlate(D, T, mode='valid')
scale = (sumTD - sumT * sumD /N) / (sumT2 - sumT**2 /N)
offset = (sumD - scale * sumT) /N
SSE = sumD2 + scale**2 * sumT2 + N * offset**2 - 2 * (scale*sumTD + offset*sumD - scale*offset*sumT)
error = numpy.sqrt(SSE / (N-1))
sf = scale/error
# isolate events from the sf signal
a=sf*numpy.where(sf >= threshold, 1, 0)
(evp, eva) = local_maxima(a, span=minpeakdist, sign=1)
# now clean it up
u = numpy.where(eva > 0.0)
t_start = t[evp[u]]
d_start = eva[evp[u]]
return (t_start, d_start) # just return the list of the starts
def RichardsonSilberberg(data, tau, time = None):
D = data.view(numpy.ndarray)
rn = tau*numpy.diff(D) + D[:-2,:]
rn = SavitzyGolay(rn, kernel = 11, order = 4)
if time is not None:
vn = rn - tau*SavitzyGolay(numpy.diff(D), kernel = 11, order = 4)
return(rn, vn);
else:
return rn
def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):
""" findspikes identifies the times of action potential in the trace v, with the
times in t. An action potential is simply timed at the first point that exceeds
the threshold... or is the peak.
4/1/11 - added peak mode
if mode is none or schmitt, we work as in the past.
if mode is peak, we return the time of the peak of the AP instead
7/15/11 - added interpolation flag
if True, the returned time is interpolated, based on a spline fit
if False, the returned time is just taken as the data time.
2012/10/9: Removed masked arrays and forced into ndarray from start
(metaarrays were really slow...)
"""
# if debug:
# # this does not work with pyside...
# import matplotlib
# matplotlib.use('Qt4Agg')
# import pylab
# from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# from matplotlib.figure import Figure
#
# #MP.rcParams['interactive'] = False
st=numpy.array([])
spk = []
if xin is None:
return(st, spk)
xt = xin.view(numpy.ndarray)
v = vin.view(numpy.ndarray)
if t1 is not None and t0 is not None:
it0 = int(t0/dt)
it1 = int(t1/dt)
if not isinstance(xin, numpy.ndarray):
xt = xt[it0:it1]
v = v[it0:it1]
else:
xt = xt[it0:it1]
v = v[it0:it1]
# if debug:
# f = pylab.figure(1)
# print "xt: ", xt
# print "v: ", v
# pylab.plot(numpy.array(xt), v, 'k-')
# pylab.draw()
# pylab.show()
dv = numpy.diff(v, axis=0) # compute slope
dv /= dt
st=numpy.array([])
spk = []
spv = numpy.where(v > thresh)[0].tolist() # find points above threshold
sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive
sp = list(Set.intersection(Set(spv),Set(sps))) # intersection defines putative spikes
sp.sort() # make sure all detected events are in order (sets is unordered)
sp = tuple(sp) # convert to tuple
if sp is ():
return(st, spk) # nothing detected
dx = 1
mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)
# normal operating mode is fixed voltage threshold
# for this we need to just get the FIRST positive crossing,
if mode is 'schmitt':
sthra = list(numpy.where(numpy.diff(sp) > mingap))
sthr = [sp[x] for x in sthra[0]] # bump indices by 1
for k in sthr:
x = xt[k-1:k+1]
y = v[k-1:k+1]
if interpolate:
dx = 0
m = (y[1]-y[0])/dt # local slope
b = y[0]-(x[0]*m)
s0 = (thresh-b)/m
else:
s0 = x[1]
st = numpy.append(st, x[1])
elif mode is 'peak':
pkwidth = 1.0e-3 # in same units as dt - usually msec
kpkw = int(pkwidth/dt)
z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()
z.insert(0, 0) # first element in spv is needed to get starting AP
spk = []
for k in z:
zk = spv[k]
spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position
x = xt[spkp-1:spkp+2]
y = v[spkp-1:spkp+2]
if interpolate:
try:
# mimic Igor FindPeak routine with B = 1
m1 = (y[1]-y[0])/dt # local slope to left of peak
b1 = y[0]-(x[0]*m1)
m2 = (y[2]-y[1])/dt # local slope to right of peak
b2 = y[1]-(x[1]*m2)
mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line
bprime = m2-((dt/2.0)*mprime)
st = numpy.append(st, -bprime/mprime+x[1])
spk.append(spkp)
except:
continue
else:
st = numpy.append(st, x[1]) # always save the first one
spk.append(spkp)
return(st, spk)
# getSpikes returns a dictionary with keys that are record numbers, each with values
# that are the array of spike timesin the spike window.
# data is studied from the "axis", and only ONE block should be in the selection.
# thresh sets the spike threshold.
def getSpikes(x, y, axis, tpts, tdel=0, thresh=0, selection = None, refractory=1.0, mode='schmitt', interpolate = False):
if selection is None: # really means whatever is displayed/selected
selected = numpy.arange(0, numpy.shape(y)[0]).astype(int).tolist()
else:
selected = selection
splist = {}
if y.ndim == 3:
for r in selected:
splist[r] = findspikes(x[tpts], y[r, axis, tpts], thresh, dt=refractory, mode=mode, interpolate=interpolate)
else:
splist = findspikes(x[tpts], y[tpts], thresh, dt=refractory, mode=mode, interpolate=interpolate)
return(splist)
# return a measurement made on a block of traces
# within the window t0-t1, on the data "axis", and according to the selected mode
def measureTrace(x, y, t0 = 0, t1 = 10, thisaxis = 0, mode='mean', selection = None, threshold = 0):
result = numpy.array([])
if selection is None: # whooops
return
else:
selected = selection
if numpy.ndim(y) == 4: # we have multiple block
for i in range(0, len(y)):
d = y[i][selected[i],thisaxis,:] # get data for this block
for j in range(0, numpy.shape(d)[0]):
if isinstance(threshold, int):
thr = threshold
else:
thr = threshold[j]
(m1, m2) = measure(mode, x[i], d[j,:], t0, t1, thresh= thr)
result = numpy.append(result, m1)
else:
d = y[selected,thisaxis,:] # get data for this block
for j in range(0, numpy.shape(d)[0]):
if isinstance(threshold, int):
thr = threshold
else:
thr = threshold[j]
(m1, m2) = measure(mode, x, d[j,:], t0, t1, thresh= thr)
result = numpy.append(result, m1)
return(result)
def measureTrace2(x, y, t0 = 0, t1 = 10, thisaxis = 0, mode='mean', threshold = 0):
"""
Simplified version that just expects a 2-d array for y, nothing fancy
"""
result = numpy.array([])
d = y.T # get data for this block
for j in range(0, numpy.shape(d)[0]):
if isinstance(threshold, int):
thr = threshold
else:
thr = threshold[j]
(m1, m2) = measure(mode, x, d[j][:], t0, t1, thresh= thr)
result = numpy.append(result, m1)
return(result)
def measure(mode, x, y, x0, x1, thresh = 0):
""" return the a measure of y in the window x0 to x1
"""
xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster!
v = y.view(numpy.ndarray)
xm = ma.masked_outside(xt, x0, x1).T
ym = ma.array(v, mask = ma.getmask(xm))
if mode == 'mean':
r1 = ma.mean(ym)
r2 = ma.std(ym)
if mode == 'max' or mode == 'maximum':
r1 = ma.max(ym)
r2 = xm[ma.argmax(ym)]
if mode == 'min' or mode == 'minimum':
r1 = ma.min(ym)
r2 = xm[ma.argmin(ym)]
if mode == 'median':
r1 = ma.median(ym)
r2 = 0
if mode == 'p2p': # peak to peak
r1 = ma.ptp(ym)
r2 = 0
if mode == 'std': # standard deviation
r1 = ma.std(ym)
r2 = 0
if mode == 'var': # variance
r1 = ma.var(ym)
r2 = 0
if mode == 'cumsum': # cumulative sum
r1 = ma.cumsum(ym) # Note: returns an array
r2 = 0
if mode == 'anom': # anomalies = difference from averge
r1 = ma.anom(ym) # returns an array
r2 = 0
if mode == 'sum':
r1 = ma.sum(ym)
r2 = 0
if mode == 'area' or mode == 'charge':
r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm))
r2 = 0
if mode == 'latency': # return first point that is > threshold
sm = ma.nonzero(ym > thresh)
r1 = -1 # use this to indicate no event detected
r2 = 0
if ma.count(sm) > 0:
r1 = sm[0][0]
r2 = len(sm[0])
if mode == 'count':
r1 = ma.count(ym)
r2 = 0
if mode == 'maxslope':
return(0,0)
slope = numpy.array([])
win = ma.flatnotmasked_contiguous(ym)
st = int(len(win)/20) # look over small ranges
for k in win: # move through the slope measurementwindow
tb = range(k-st, k+st) # get tb array
newa = numpy.array(self.dat[i][j, thisaxis, tb])
ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures
slope = numpy.append(slope, ppars[0]) # keep track of max slope
r1 = numpy.amax(slope)
r2 = numpy.argmax(slope)
return(r1, r2)
def mask(x, xm, x0, x1):
if numpy.ndim(xm) != 1:
print "utility.mask(): array to used to derive mask must be 1D"
return(numpy.array([]))
xmask = ma.masked_outside(xm, x0, x1)
tmask =ma.getmask(xmask)
if numpy.ndim(x) == 1:
xnew = ma.array(x, mask=tmask)
return(xnew.compressed())
if numpy.ndim(x) == 2:
for i in range(0, numpy.shape(x)[0]):
xnew= ma.array(x[i,:], mask=tmask)
xcmp = ma.compressed(xnew)
if i == 0:
print ma.shape(xcmp)[0]
print numpy.shape(x)[0]
xout = numpy.zeros((numpy.shape(x)[0], ma.shape(xcmp)[0]))
xout[i,:] = xcmp
return(xout)
else:
print "Utility.Mask: dimensions of input arrays are not acceptable"
return(numpy.array([]))
def clipdata(y, xm, x0, x1):
mx = ma.getdata(mask(xm, xm, x0, x1))
my = ma.getdata(mask(y, xm, x0, x1))
return(mx, my)
def count_spikes(spk):
""" mostly protection for an older error in the findspikes routine, but
now it should be ok to just get the first element of the shape """
shspk = numpy.shape(spk)
if len(shspk) == 0:
nspk = 0
elif shspk[0] == 0:
nspk = 0
else:
nspk = shspk[0]
return(nspk)
def analyzeIV(t, V, I, tw, thr):
""" analyze a set of voltage records (IV), with spike threshold
tw is a list of [tdelay, tdur, tssw], where tdelay is the delay to
the start of the step, tdur is the duration of the step, and tssw is
the duration of the steady-state window prior to the end of the
step
thr is the threshold that will be used for spike detection.
Returns:
a dictionary with:
vmin
vss
i for vmin and vss
spike count
ispk
eventually should also include time constant measures,and adaptation ratio
"""
ntraces = numpy.shape(V)[0]
vss = []
vmin = []
vm = []
ic = []
nspikes = []
ispikes = []
tmin = []
fsl = []
fisi = []
for j in range(0, ntraces):
ts = tw[0]
te = tw[1]
td = tw[2]
ssv = measure('mean', t, V[j,:], te-td, te)
ssi = measure('mean', t, I[j,:], te-td, te)
rvm = measure('mean', t, V[j,:], 0.0, ts-1.0)
minv = measure('min', t, V[j,:], ts, te)
spk = findspikes(t, V[j,:], thr, t0=ts, t1=te)
nspikes.append(count_spikes(spk)) # build spike list
ispikes.append(ssi[0])
if nspikes[-1] >= 1:
fsl.append(spk[0])
else:
fsl.append(None)
if nspikes[-1] >= 2:
fisi.append(spk[1]-spk[0])
else:
fisi.append(None)
vm.append(rvm[0])
if ssi[0] < 0.0: # just for hyperpolarizing pulses...
ic.append(ssi[0])
vss.append(ssv[0]) # get steady state voltage
vmin.append(minv[0]) # and min voltage
tmin.append(minv[1]) # and min time
return({'I': numpy.array(ic), 'Vmin': numpy.array(vmin), 'Vss': numpy.array(vss),
'Vm': numpy.array(vm), 'Tmin': numpy.array(tmin),
'Ispike': numpy.array(ispikes), 'Nspike': numpy.array(nspikes),
'FSL': numpy.array(fsl), 'FISI': numpy.array(fisi)})
import os, sys, types, re, fnmatch, itertools
class ScriptError(Exception): pass
def ffind(path, shellglobs=None, namefs=None, relative=True):
"""
Finds files in the directory tree starting at 'path' (filtered by
Unix shell-style wildcards ('shellglobs') and/or the functions in
the 'namefs' sequence).
The parameters are as follows:
- path: starting path of the directory tree to be searched
- shellglobs: an optional sequence of Unix shell-style wildcards
that are to be applied to the file *names* found
- namefs: an optional sequence of functions to be applied to the
file *paths* found
- relative: a boolean flag that determines whether absolute or
relative paths should be returned
Please not that the shell wildcards work in a cumulative fashion
i.e. each of them is applied to the full set of file *names* found.
Conversely, all the functions in 'namefs'
* only get to see the output of their respective predecessor
function in the sequence (with the obvious exception of the
first function)
* are applied to the full file *path* (whereas the shell-style
wildcards are only applied to the file *names*)
Returns a sequence of paths for files found.
"""
if not os.access(path, os.R_OK):
raise ScriptError("cannot access path: '%s'" % path)
fileList = [] # result list
try:
for dir, subdirs, files in os.walk(path):
if shellglobs:
matched = []
for pattern in shellglobs:
filterf = lambda s: fnmatch.fnmatchcase(s, pattern)
matched.extend(filter(filterf, files))
fileList.extend(['%s%s%s' % (dir, os.sep, f) for f in matched])
else:
fileList.extend(['%s%s%s' % (dir, os.sep, f) for f in files])
if not relative: fileList = map(os.path.abspath, fileList)
if namefs:
for ff in namefs: fileList = filter(ff, fileList)
except Exception, e: raise ScriptError(str(e))
return(fileList)
def seqparse(sequence):
""" parse the list of the format:
12;23/10 etc... like nxtrec in datac
now also parses matlab functions and array formats, using eval
first arg is starting number for output array
second arg is final number
/ indicates the skip arg type
basic: /n means skip n : e.g., 1;10/2 = 1,3,5,7,9
special: /##:r means randomize order (/##rn means use seed n for randomization)
special: /##:l means spacing of elements is logarithmic
special: /##:s means spacing is logarithmic, and order is randomized. (/##sn means use seed n for randomization)
special: /:a## means alternate with a number
multiple sequences are returned in a list... just like single sequences...
3 ways for list to be structured:
1. standard datac record parses. List is enclosed inbetween single quotes
2. matlab : (array) operator expressions. [0:10:100], for example
3. matlab functions (not enclosed in quotes). Each function generates a new list
note that matlab functions and matrices are treated identically
Updated 9/07/2000, 11/13/2000, 4/7/2004 (arbitrary matlab function argument with '=')
converted to python 3/2/2009
Paul B. Manis, Ph.D.
[email protected]
"""
seq=[]
target=[]
sequence.replace(' ', '') # remove all spaces - nice to read, not needed to calculate
sequence = str(sequence) #make sure we have a nice string
(seq2, sep, remain) = sequence.partition('&') # find and returnnested sequences
while seq2 is not '':
try:
(oneseq, onetarget) = recparse(seq2)
seq.append(oneseq)
target.append(onetarget)
except:
pass
(seq2, sep, remain) = remain.partition('&') # find and returnnested sequences
return (seq, target)
def recparse(cmdstr):
""" function to parse basic word unit of the list - a;b/c or the like
syntax is:
[target:]a;b[/c][*n]
where:
target is a parameter target identification (if present)
the target can be anything - a step, a duration, a level....
it just needs to be in a form that will be interepreted by the PyStim
sequencer.
a, b and c are numbers
n, if present *n implies a "mode"
such as linear, log, randomized, etc.
"""
recs=[]
target=[]
seed=0
skip = 1.0
(target, sep, rest) = cmdstr.partition(':') # get the target
if rest is '':
rest = target # no : found, so no target designated.
target=''
(sfn, sep, rest1) = rest.partition(';')
(sln, sep, rest2) = rest1.partition('/')
(sskip, sep, mo) = rest2.partition('*') # look for mode
fn = float(sfn)
ln = float(sln)
skip = float(sskip)
ln = ln + 0.01*skip
# print "mo: %s" % (mo)
if mo is '': # linear spacing; skip is size of step
recs=eval('arange(%f,%f,%f)' % (fn, ln, skip))
if mo.find('l') >= 0: # log spacing; skip is length of result
recs=eval('logspace(log10(%f),log10(%f),%f)' % (fn, ln, skip))
if mo.find('t') >= 0: # just repeat the first value
recs = eval('%f*[1]' % (fn))
if mo.find('n') >= 0: # use the number of steps, not the step size
if skip is 1.0:
sk = (ln - fn)
else:
sk = eval('(%f-%f)/(%f-1.0)' % (ln, fn, skip))
recs=eval('arange(%f,%f,%f)' % (fn, ln, sk))
if mo.find('r') >= 0: # randomize the result
if recs is []:
recs=eval('arange(%f,%f,%f)' % (fn, ln, skip))
recs = sample(recs, len(recs))
if mo.find('a') >= 0: # alternation - also test for a value after that
(arg, sep, value) = mo.partition('a') # is there anything after the letter?
if value is '':
value = 0.0
else:
value = float(value)
val = eval('%f' % (value))
c = [val]*len(recs)*2 # double the length of the sequence
c[0:len(c):2] = recs # fill the alternate positions with the sequence
recs = c # copy back
return((recs, target))
def makeRGB(ncol = 16, minc = 32, maxc = 216):
"""
ncol = 16 # number of color spaces
minc = 32 # limit color range
maxc = 216
"""
subd = int((maxc - minc)/ncol)
numpy.random.seed(1)
RGB = [[]]
for r in range(minc, maxc, subd):
for g in range(minc, maxc, subd):
for b in range(minc, maxc, subd):
RGB.append(numpy.array([r,g,b]))
#print "# of colors: ", len(self.RGB)
rgb_order = numpy.random.permutation(len(RGB)) # randomize the order
RGB = [RGB[x] for x in rgb_order]
return RGB
###############################################################################
#
# main entry
#
# If this file is called direclty, then provide tests of some of the routines.
if __name__ == "__main__":
from optparse import OptionParser
import matplotlib.pylab as MP
MP.rcParams['interactive'] = False
parser=OptionParser() # command line options
parser.add_option("-d", action="store_true", dest="dictionary", default=False)
parser.add_option("-s", action="store_true", dest="sinefit", default=False)
parser.add_option("-f", action="store_true", dest="findspikes", default=False)
parser.add_option("-c", action="store_true", dest="cb", default=False)
argsin = sys.argv[1:]
if argsin is not None:
(options, args) = parser.parse_args(argsin)
else:
(options, args) = parser.parse_args()
if options.dictionary:
d="{'CN_Dur': 100.0, 'PP_LP': 16000.0, 'ST_Dur': 50.0, 'Trials': 24.0, 'PP_HP': 8000.0, 'CN_Mode': 0, 'ITI_Var': 5.0, 'PP_GapFlag': False, 'PS_Dur': 50.0, 'ST_Level': 80.0, 'PP_Mode': 2, 'WavePlot': True, 'PP_Dur': 50.0, 'Analysis_LPF': 500.0, 'CN_Level': 70.0, 'NHabTrials': 2.0, 'PP_Notch_F2': 14000.0, 'PP_Notch_F1': 12000.0, 'StimEnable': True, 'PP_OffLevel': 0.0, 'Analysis_HPF': 75.0, 'CN_Var': 10.0, 'Analysis_Start': -100.0, 'ITI': 20.0, 'PP_Level': 90.0, 'Analysis_End': 100.0, 'PP_Freq': 4000.0, 'PP_MultiFreq': 'linspace(2.0,32.0,4.0)'} "
di = long_Eval(d)
print 'The dictionary is: ',
print di
if options.cb: # test clements bekkers
# first generate some events
t = numpy.arange(0, 1000.0, 0.1)
ta = numpy.arange(0, 50.0, 0.1)
events = numpy.zeros(t.shape)
events[[50,100,250,350, 475, 525, 900, 1500, 2800, 5000, 5200, 7000, 7500],] = 1
tau1 = 3
alpha = 1.0 * (ta/tau1) * numpy.exp(1 - ta/tau1)
sig = spSignal.fftconvolve(events, alpha, mode='full')
sig = sig[0:len(t)]+numpy.random.normal(0, 0.25, len(t))
f = MP.figure()
MP.plot(t, sig, 'r-')
MP.plot(t, events, 'k-')
# now call the finding routine, using the exact template (!)
(t_start, d_start) = clementsBekkers(sig, alpha, threshold=0.5, minpeakdist=15)
MP.plot(t_start, d_start, 'bs')
MP.show()
if options.findspikes: # test the findspikes routine
dt = 0.1
t = numpy.arange(0, 100, dt)
v = numpy.zeros_like(t)-60.0
p = range(20, 900, 50)
p1 = range(19,899,50)
p2 = range(21,901,50)
v[p] = 20.0
v[p1] = 15.0
v[p2] = -20.0
sp = findspikes(t, v, 0.0, dt = dt, mode = 'schmitt', interpolate = False)
print 'findSpikes'
print 'sp: ', sp
f = MP.figure(1)
MP.plot(t, v, 'ro-')
si = (numpy.floor(sp/dt))
print 'si: ', si
spk = []
for k in si:
spk.append(numpy.argmax(v[k-1:k+1])+k)
MP.plot(sp, v[spk], 'bs')
MP.ylim((0, 25))
MP.draw()
MP.show()
exit()
print "getSpikes"
y=[]*5
for j in range(0,1):
d = numpy.zeros((5,1,len(v)))
for k in range(0, 5):
p = range(20*k, 500, 50 + int(50.0*(k/2.0)))
vn = v.copy()
vn[p] = 20.0
d[k, 0, :] = numpy.array(vn) # load up the "spike" array
y.append(d)
tpts = range(0, len(t)) # numpy.arange(0, len(t)).astype(int).tolist()
#def findspikes(x, v, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False):
for k in range(0, len(y)):
sp = getSpikes(t, y[k], 0, tpts, tdel=0, thresh=0, selection = None, interpolate = True)
print 'r: %d' % k, 'sp: ', sp
# test the sine fitting routine
if options.sinefit:
from numpy.random import normal
F = 1.0/8.0
phi = 0.2
A = 2.0
t = numpy.arange(0.0, 60.0, 1.0/7.5)
# check over a range of values (is phase correct?)
for phi in numpy.arange(-2.0*numpy.pi, 2.0*numpy.pi, numpy.pi/8.0):
y = A * numpy.sin(2.*numpy.pi*t*F+phi) + normal(0.0, 0.5, len(t))
(a, p) = sinefit(t, y, F)
print "A: %f a: %f phi: %f p: %f" % (A, a, phi, p)
| mit | 8,074,997,900,721,364,000 | 35.975418 | 557 | 0.568705 | false |
ecreall/nova-ideo | novaideo/views/user_management/see_registrations.py | 1 | 2583 | # Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from substanced.util import Batch
from dace.util import getSite
from dace.objectofcollaboration.principal.util import get_current
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from novaideo.utilities.util import render_listing_objs
from novaideo.content.processes.user_management.behaviors import (
SeeRegistrations)
from novaideo.content.novaideo_application import (
NovaIdeoApplication)
from novaideo.core import BATCH_DEFAULT_SIZE
from novaideo import _
from novaideo.views.filter import find_entities
from novaideo.content.interface import IPreregistration
CONTENTS_MESSAGES = {
'0': _(u"""No registration found"""),
'1': _(u"""One registration found"""),
'*': _(u"""${number} registrations found""")
}
@view_config(
name='seeregistrations',
context=NovaIdeoApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeRegistrationsView(BasicView):
title = _('Registrations')
name = 'seeregistrations'
behaviors = [SeeRegistrations]
template = 'novaideo:views/novaideo_view_manager/templates/search_result.pt'
viewid = 'seeregistrations'
wrapper_template = 'novaideo:views/templates/simple_wrapper.pt'
css_class = 'panel-transparent'
def update(self):
self.execute(None)
user = get_current()
objects = find_entities(
user=user,
interfaces=[IPreregistration],
sort_on='release_date')
batch = Batch(
objects, self.request,
default_size=BATCH_DEFAULT_SIZE)
batch.target = "#results_registrations"
len_result = batch.seqlen
index = str(len_result)
if len_result > 1:
index = '*'
self.title = _(CONTENTS_MESSAGES[index],
mapping={'number': len_result})
result_body, result = render_listing_objs(
self.request, batch, user)
values = {
'bodies': result_body,
'length': len_result,
'batch': batch,
}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeRegistrations: SeeRegistrationsView})
| agpl-3.0 | -6,852,239,687,660,419,000 | 30.888889 | 80 | 0.663957 | false |
x2Ident/x2Ident | mitmproxy/test/netlib/http/test_response.py | 2 | 4795 | from __future__ import absolute_import, print_function, division
import email
import time
from netlib.http import Headers
from netlib.http import Response
from netlib.http.cookies import CookieAttrs
from netlib.tutils import raises, tresp
from .test_message import _test_passthrough_attr, _test_decoded_attr
class TestResponseData(object):
def test_init(self):
with raises(ValueError):
tresp(headers="foobar")
assert isinstance(tresp(headers=()).headers, Headers)
class TestResponseCore(object):
"""
Tests for builtins and the attributes that are directly proxied from the data structure
"""
def test_repr(self):
response = tresp()
assert repr(response) == "Response(200 OK, unknown content type, 7b)"
response.content = None
assert repr(response) == "Response(200 OK, no content)"
def test_make(self):
r = Response.make()
assert r.status_code == 200
assert r.content == b""
Response.make(content=b"foo")
Response.make(content="foo")
with raises(TypeError):
Response.make(content=42)
r = Response.make(headers=[(b"foo", b"bar")])
assert r.headers["foo"] == "bar"
r = Response.make(headers=({"foo": "baz"}))
assert r.headers["foo"] == "baz"
with raises(TypeError):
Response.make(headers=42)
def test_status_code(self):
_test_passthrough_attr(tresp(), "status_code")
def test_reason(self):
_test_decoded_attr(tresp(), "reason")
class TestResponseUtils(object):
"""
Tests for additional convenience methods.
"""
def test_get_cookies_none(self):
resp = tresp()
resp.headers = Headers()
assert not resp.cookies
def test_get_cookies_empty(self):
resp = tresp()
resp.headers = Headers(set_cookie="")
assert not resp.cookies
def test_get_cookies_simple(self):
resp = tresp()
resp.headers = Headers(set_cookie="cookiename=cookievalue")
result = resp.cookies
assert len(result) == 1
assert "cookiename" in result
assert result["cookiename"] == ("cookievalue", CookieAttrs())
def test_get_cookies_with_parameters(self):
resp = tresp()
cookie = "cookiename=cookievalue;domain=example.com;expires=Wed Oct 21 16:29:41 2015;path=/; HttpOnly"
resp.headers = Headers(set_cookie=cookie)
result = resp.cookies
assert len(result) == 1
assert "cookiename" in result
assert result["cookiename"][0] == "cookievalue"
attrs = result["cookiename"][1]
assert len(attrs) == 4
assert attrs["domain"] == "example.com"
assert attrs["expires"] == "Wed Oct 21 16:29:41 2015"
assert attrs["path"] == "/"
assert attrs["httponly"] is None
def test_get_cookies_no_value(self):
resp = tresp()
resp.headers = Headers(set_cookie="cookiename=; Expires=Thu, 01-Jan-1970 00:00:01 GMT; path=/")
result = resp.cookies
assert len(result) == 1
assert "cookiename" in result
assert result["cookiename"][0] == ""
assert len(result["cookiename"][1]) == 2
def test_get_cookies_twocookies(self):
resp = tresp()
resp.headers = Headers([
[b"Set-Cookie", b"cookiename=cookievalue"],
[b"Set-Cookie", b"othercookie=othervalue"]
])
result = resp.cookies
assert len(result) == 2
assert "cookiename" in result
assert result["cookiename"] == ("cookievalue", CookieAttrs())
assert "othercookie" in result
assert result["othercookie"] == ("othervalue", CookieAttrs())
def test_set_cookies(self):
resp = tresp()
resp.cookies["foo"] = ("bar", {})
assert len(resp.cookies) == 1
assert resp.cookies["foo"] == ("bar", CookieAttrs())
def test_refresh(self):
r = tresp()
n = time.time()
r.headers["date"] = email.utils.formatdate(n)
pre = r.headers["date"]
r.refresh(n)
assert pre == r.headers["date"]
r.refresh(n + 60)
d = email.utils.parsedate_tz(r.headers["date"])
d = email.utils.mktime_tz(d)
# Weird that this is not exact...
assert abs(60 - (d - n)) <= 1
cookie = "MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure"
r.headers["set-cookie"] = cookie
r.refresh()
# Cookie refreshing is tested in test_cookies, we just make sure that it's triggered here.
assert cookie != r.headers["set-cookie"]
| gpl-3.0 | -1,903,286,615,273,040,000 | 31.767606 | 111 | 0.582273 | false |
hguemar/cinder | cinder/tests/api/contrib/test_volume_type_access.py | 8 | 13569 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from cinder.api.contrib import volume_type_access as type_access
from cinder.api.v2 import types as types_api_v2
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
def generate_type(type_id, is_public):
return {
'id': type_id,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'deleted_at': None,
'is_public': bool(is_public)
}
VOLUME_TYPES = {
'0': generate_type('0', True),
'1': generate_type('1', True),
'2': generate_type('2', False),
'3': generate_type('3', False)}
PROJ1_UUID = '11111111-1111-1111-1111-111111111111'
PROJ2_UUID = '22222222-2222-2222-2222-222222222222'
PROJ3_UUID = '33333333-3333-3333-3333-333333333333'
ACCESS_LIST = [{'volume_type_id': '2', 'project_id': PROJ2_UUID},
{'volume_type_id': '2', 'project_id': PROJ3_UUID},
{'volume_type_id': '3', 'project_id': PROJ3_UUID}]
def fake_volume_type_get(context, id, inactive=False, expected_fields=None):
vol = VOLUME_TYPES[id]
if expected_fields and 'projects' in expected_fields:
vol['projects'] = [a['project_id']
for a in ACCESS_LIST if a['volume_type_id'] == id]
return vol
def _has_type_access(type_id, project_id):
for access in ACCESS_LIST:
if access['volume_type_id'] == type_id and \
access['project_id'] == project_id:
return True
return False
def fake_volume_type_get_all(context, inactive=False, filters=None):
if filters is None or filters['is_public'] is None:
return VOLUME_TYPES
res = {}
for k, v in VOLUME_TYPES.iteritems():
if filters['is_public'] and _has_type_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
return res
class FakeResponse(object):
obj = {'volume_type': {'id': '0'},
'volume_types': [
{'id': '0'},
{'id': '2'}]}
def attach(self, **kwargs):
pass
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
def cached_resource_by_id(self, resource_id, name=None):
return VOLUME_TYPES[resource_id]
class VolumeTypeAccessTest(test.TestCase):
def setUp(self):
super(VolumeTypeAccessTest, self).setUp()
self.type_controller_v2 = types_api_v2.VolumeTypesController()
self.type_access_controller = type_access.VolumeTypeAccessController()
self.type_action_controller = type_access.VolumeTypeActionController()
self.req = FakeRequest()
self.context = self.req.environ['cinder.context']
self.stubs.Set(db, 'volume_type_get',
fake_volume_type_get)
self.stubs.Set(db, 'volume_type_get_all',
fake_volume_type_get_all)
def assertVolumeTypeListEqual(self, expected, observed):
self.assertEqual(len(expected), len(observed))
expected = sorted(expected, key=lambda item: item['id'])
observed = sorted(observed, key=lambda item: item['id'])
for d1, d2 in zip(expected, observed):
self.assertEqual(d1['id'], d2['id'])
def test_list_type_access_public(self):
"""Querying os-volume-type-access on public type should return 404."""
req = fakes.HTTPRequest.blank('/v2/fake/types/os-volume-type-access',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.type_access_controller.index,
req, '1')
def test_list_type_access_private(self):
expected = {'volume_type_access': [
{'volume_type_id': '2', 'project_id': PROJ2_UUID},
{'volume_type_id': '2', 'project_id': PROJ3_UUID}]}
result = self.type_access_controller.index(self.req, '2')
self.assertEqual(expected, result)
def test_list_with_no_context(self):
req = fakes.HTTPRequest.blank('/v2/flavors/fake/flavors')
def fake_authorize(context, target=None, action=None):
raise exception.PolicyNotAuthorized(action='index')
self.stubs.Set(type_access, 'authorize', fake_authorize)
self.assertRaises(exception.PolicyNotAuthorized,
self.type_access_controller.index,
req, 'fake')
def test_list_type_with_admin_default_proj1(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types',
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ1_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_default_proj2(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types',
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ2_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_true(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=true',
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_false(self):
expected = {'volume_types': [{'id': '2'}, {'id': '3'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=false',
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_false_proj2(self):
expected = {'volume_types': [{'id': '2'}, {'id': '3'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=false',
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ2_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_none(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}, {'id': '2'},
{'id': '3'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=none',
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_default(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types',
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_true(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=true',
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_false(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=false',
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_none(self):
expected = {'volume_types': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=none',
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_show(self):
resp = FakeResponse()
self.type_action_controller.show(self.req, resp, '0')
self.assertEqual({'id': '0', 'os-volume-type-access:is_public': True},
resp.obj['volume_type'])
def test_detail(self):
resp = FakeResponse()
self.type_action_controller.detail(self.req, resp)
self.assertEqual(
[{'id': '0', 'os-volume-type-access:is_public': True},
{'id': '2', 'os-volume-type-access:is_public': False}],
resp.obj['volume_types'])
def test_create(self):
resp = FakeResponse()
self.type_action_controller.create(self.req, {}, resp)
self.assertEqual({'id': '0', 'os-volume-type-access:is_public': True},
resp.obj['volume_type'])
def test_add_project_access(self):
def stub_add_volume_type_access(context, type_id, project_id):
self.assertEqual('3', type_id, "type_id")
self.assertEqual(PROJ2_UUID, project_id, "project_id")
self.stubs.Set(db, 'volume_type_access_add',
stub_add_volume_type_access)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/fake/types/2/action',
use_admin_context=True)
result = self.type_action_controller._addProjectAccess(req, '3', body)
self.assertEqual(202, result.status_code)
def test_add_project_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank('/v2/fake/types/2/action',
use_admin_context=False)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
self.assertRaises(exception.PolicyNotAuthorized,
self.type_action_controller._addProjectAccess,
req, '2', body)
def test_add_project_access_with_already_added_access(self):
def stub_add_volume_type_access(context, type_id, project_id):
raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
project_id=project_id)
self.stubs.Set(db, 'volume_type_access_add',
stub_add_volume_type_access)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/fake/types/2/action',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict,
self.type_action_controller._addProjectAccess,
req, '3', body)
def test_remove_project_access_with_bad_access(self):
def stub_remove_volume_type_access(context, type_id, project_id):
raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id,
project_id=project_id)
self.stubs.Set(db, 'volume_type_access_remove',
stub_remove_volume_type_access)
body = {'removeProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/fake/types/2/action',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.type_action_controller._removeProjectAccess,
req, '3', body)
def test_remove_project_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank('/v2/fake/types/2/action',
use_admin_context=False)
body = {'removeProjectAccess': {'project': PROJ2_UUID}}
self.assertRaises(exception.PolicyNotAuthorized,
self.type_action_controller._removeProjectAccess,
req, '2', body)
| apache-2.0 | -7,550,927,305,561,521,000 | 43.782178 | 78 | 0.570713 | false |
cgstudiomap/cgstudiomap | main/eggs/python_stdnum-1.2-py2.7.egg/stdnum/at/businessid.py | 3 | 2268 | # businessid.py - functions for handling Austrian company register numbers
#
# Copyright (C) 2015 Holvi Payment Services Oy
# Copyright (C) 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Austrian Company Register Numbers.
The Austrian company register number consist of digits followed by a single
letter, e.g. "122119m". Sometimes it is presented with preceding "FN", e.g.
"FN 122119m".
>>> validate('FN 122119m')
'122119m'
>>> validate('122119m')
'122119m'
>>> validate('m123123')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('abc')
Traceback (most recent call last):
...
InvalidFormat: ...
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace.
Preceding "FN" is also removed."""
number = clean(number, ' -./').strip()
if number.upper().startswith('FN'):
number = number[2:]
return number
def validate(number):
"""Checks to see if the number provided is a valid company register
number. This only checks the formatting."""
number = compact(number)
if not number[-1:].isalpha() or not number[:-1].isdigit():
raise InvalidFormat()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid company register
number. This only checks the formatting."""
try:
return bool(validate(number))
except ValidationError:
return False
| agpl-3.0 | -8,678,424,513,366,203,000 | 31.4 | 75 | 0.71649 | false |
bafana5/wKRApp | wKRApp/views.py | 1 | 3366 | from wKRApp import app
from flask import Flask, render_template, url_for, request, redirect, session, flash, g
from flask.ext.sqlalchemy import SQLAlchemy
from ipdb import set_trace
from functools import wraps
# import sqlite3
# config
import os
# app.config.from_object(os.environ['APP_SETTINGS'])
# app.config.from_object('config.DevelopmentConfig')
app.secret_key = "]\x9f\x85nj\xe3\xb4;\xea\xe3\xfb\xb2\xe1\x14I\xff\x16\x9f\xa6'\xa0,\x11\x92"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/sample.db'
# create the sqlalchemy object
db = SQLAlchemy(app)
#
from models import Users
# login required decorator
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('index'))
return wrap
@app.route('/', methods=['GET', 'POST'])
def index():
error = None
if request.method == 'POST':
if request.form['username'] == 'Admin' or request.form['password'] == 'admin':
session['logged_in'] = True
flash('You were just logged in')
return redirect(url_for('admin'))
elif request.form['username'] == '' or request.form['password'] == '':
error = 'Invalid credentials. Please try again.'
else:
session['logged_in'] = True
flash('You were just logged in')
return redirect(url_for('team'))
return render_template('signin.html', error=error)
@app.route('/team')
@login_required
def team():
return render_template('team.html')
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
flash('You were just logged out')
return redirect(url_for('index'))
@app.route('/kra', methods=['GET', 'POST'])
@login_required
def kra():
if request.method == 'POST':
return render_template('kra.html')
return render_template('kra.html')
@app.route('/admin')
@login_required
def admin():
users = db.session.query(Users).all()
# g.db = connect_db()
# db_object = g.db
# cur = db_object.execute('SELECT * from posts')
# posts = [dict(title=row[0], description=row[1]) for row in cur.fetchall()]
# g.db.close()
return render_template('admin.html', users=users)
@app.route('/users')
@login_required
def users():
return render_template('users.html')
@app.route('/user_roles')
@login_required
def user_roles():
return render_template('user_roles.html')
@app.route('/workflow')
@login_required
def workflow():
return render_template('workflow.html')
@app.route('/career_ladders')
@login_required
def career_ladders():
return render_template('career_ladders.html')
@app.route('/new_role', methods=['GET', 'POST'])
@login_required
def new_role():
if request.method == 'POST':
return render_template('new_role.html')
return render_template('new_role.html')
@app.route('/new_user', methods=['GET', 'POST'])
@login_required
def new_user():
if request.method == 'POST':
return render_template('new_user.html')
return render_template('new_user.html')
# def connect_db():
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# db_path = os.path.join(BASE_DIR + '\db', app.database)
# return sqlite3.connect(db_path)
| mit | -4,646,386,358,432,778,000 | 24.892308 | 94 | 0.641117 | false |
ua-snap/downscale | snap_scripts/baseline_climatologies/cru_cl20_climatology_preprocess_2km_OLD.py | 1 | 6667 | # # # PREPROCESS CRU CL20 1961-1990 CLIMATOLOGY DATA (http://www.cru.uea.ac.uk/cru/data/hrg/tmc)
# # author: Michael Lindgren ([email protected]) -- Sept. 2016
# # # #
import numpy as np
def xyz_to_grid( x, y, z, xi, yi, method='linear', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
matplotlib.mlab.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using `numpy.meshgrid` / `numpy.mgrid`
order (xi, yi)
method = 'linear' # hardwired currently due to multiprocessing bug with scipy griddata
'''
import numpy as np
from matplotlib.mlab import griddata
return griddata( x, y, z, xi, yi, interp=method ).astype( output_dtype )
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray( lon )
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def regrid( x ):
return xyz_to_grid( **x )
if __name__ == '__main__':
import os, rasterio
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from pathos.mp_map import mp_map
# import argparse
# # parse the commandline arguments
# parser = argparse.ArgumentParser( description='preprocess CRU CL2.0 data to the AKCAN extent required by SNAP' )
# parser.add_argument( "-p", "--base_path", action='store', dest='base_path', type=str, help="path to parent directory with a subdirector(ies)y storing the data" )
# parser.add_argument( "-cru", "--cru_filename", action='store', dest='cru_filename', type=str, help="string path to the .tar.gz file location, downloaded from the CRU site" )
# parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string abbreviated name of the variable being processed." )
# parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="string path to a template raster dataset to match the CRU CL2.0 to." )
# # parse and unpack the args
# args = parser.parse_args()
# base_path = args.base_path
# cru_filename = args.cru_filename
# variable = args.variable
# template_raster_fn = args.template_raster_fn
# # # FOR TESTING # # # #
base_path = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/cru_cl20_test_remove'
cru_filename = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS20/grid_10min_pre.dat.gz'
variable = 'pre'
template_raster_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/templates/akcan_2km/tas_mean_C_AR5_CCSM4_rcp26_01_2006.tif'
# # # # # # # # # # # # #
# build an output path to store the data generated with this script
cru_path = os.path.join( base_path, 'climatologies','cru_cl20','2km', variable )
if not os.path.exists( cru_path ):
os.makedirs( cru_path )
months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
colnames = [ 'lat', 'lon' ] + months
months_lookup = { count+1:month for count, month in enumerate( months ) }
cru_df = pd.read_csv( cru_filename, delim_whitespace=True, compression='gzip', header=None, names=colnames )
# manually flip to PCLL for interpolation
cru_df['lon'][ cru_df['lon'] < 0 ] = cru_df['lon'][ cru_df['lon'] < 0 ] + 360
cru_df['geometry'] = cru_df.apply( lambda x: Point( x.lon, x.lat), axis=1 )
cru_shp = gpd.GeoDataFrame( cru_df, geometry='geometry', crs={'init':'EPSG:4326'} )
# set bounds to interpolate over
# xmin, ymin, xmax, ymax = (0,-90, 360, 90)
xmin, ymin, xmax, ymax = (160, 0, 300, 90)
# multiply arcminutes in degree by 360(180) for 10' resolution
rows = 60 * ( ymax - ymin )
cols = 60 * ( xmax - xmin )
# build the output grid
x = np.linspace( xmin, xmax, cols )
y = np.linspace( ymin, ymax, rows )
xi, yi = np.meshgrid( x, y )
# yi = np.flipud(yi) # I think this is needed...
args_list = [ {'x':np.array(cru_df['lon']),'y':np.array(cru_df['lat']),'z':np.array(cru_df[month]),'xi':xi,'yi':yi} for month in months ]
# run interpolation in parallel
interped_grids = mp_map( regrid, args_list, nproc=12 )
# stack and give a proper nodata value
arr = np.array([ i.data for i in interped_grids ])
arr[ np.isnan(arr) ] = -9999
pcll_affine = transform_from_latlon( y, x )
meta = {'transform': pcll_affine,
'count': 1,
'crs': {'init':'epsg:4326'},
'driver': u'GTiff',
'dtype': 'float32',
'height': rows,
'nodata': -9999,
'width': cols,
'compress':'lzw'}
# set up a dir to toss the intermediate files into -- since we are using gdalwarp...
intermediate_path = os.path.join( cru_path, 'intermediates' )
if not os.path.exists( intermediate_path ):
os.makedirs( intermediate_path )
out_paths = []
for i in range( arr.shape[0] ):
output_filename = os.path.join( intermediate_path, '{}_cru_cl20_akcan_{}_1961-1990_PCLL.tif'.format( variable, months_lookup[ i+1 ] ) )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr[ i, ... ], 1 )
out_paths = out_paths + [ output_filename ]
# # template dataset
template_raster = rasterio.open( template_raster_fn )
resolution = template_raster.res
template_meta = template_raster.meta
template_meta.update( compress='lzw' )
a,b,c,d = template_raster.bounds
# FLIP IT BACK TO GREENWICH-CENTERED using gdalwarp... then to AKCAN 2km...
for fn in out_paths:
os.system( 'gdalwarp -co COMPRESS=LZW -overwrite -dstnodata -9999 -multi -t_srs EPSG:4326 -te -180 0 180 90 {} {}'.format( fn, fn.replace( 'PCLL', 'LL' ) ) )
final_fn = fn.replace( '_PCLL', '' )
final_fn = os.path.join( cru_path, os.path.basename(final_fn) )
if os.path.exists( final_fn ):
os.remove( final_fn )
mask = template_raster.read_masks( 1 ).astype( np.float32 )
with rasterio.open( final_fn, 'w', **template_meta ) as out:
out.write( np.empty_like( mask ), 1 )
os.system( 'gdalwarp -co COMPRESS=LZW -wo SOURCE_EXTRA=100 -multi -srcnodata -9999 -dstnodata -9999 {} {}'.format( fn.replace( 'PCLL', 'LL' ), final_fn ) )
# os.system( 'gdalwarp -overwrite -t_srs EPSG:3338 -co COMPRESS=LZW -wo SOURCE_EXTRA=100 -multi -srcnodata {} -dstnodata {} {} {}'.format( -9999, -9999, fn.replace( 'PCLL', 'LL' ), final_fn ) )
with rasterio.open( final_fn, 'r+' ) as rst:
arr = rst.read( 1 )
arr[ mask == 0 ] = -9999
rst.write( arr, 1 )
print( 'completed run of {}'.format( variable ) )
| mit | -7,133,843,686,221,857,000 | 42.292208 | 195 | 0.666717 | false |
jazkarta/edx-platform-for-isc | lms/djangoapps/instructor/tests/test_enrollment.py | 6 | 18109 | # -*- coding: utf-8 -*-
"""
Unit tests for instructor.enrollment methods.
"""
import json
import mock
from abc import ABCMeta
from courseware.models import StudentModule
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import get_language
from django.utils.translation import override as override_language
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MOCK_MODULESTORE
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from instructor.enrollment import (
EmailEnrollmentState,
enroll_email,
get_email_params,
reset_student_attempts,
send_beta_role_email,
unenroll_email,
render_message_to_string,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from submissions import api as sub_api
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class TestSettableEnrollmentState(TestCase):
""" Test the basis class for enrollment tests. """
def setUp(self):
self.course_key = SlashSeparatedCourseKey('Robot', 'fAKE', 'C-%-se-%-ID')
def test_mes_create(self):
"""
Test SettableEnrollmentState creation of user.
"""
mes = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
# enrollment objects
eobjs = mes.create_user(self.course_key)
ees = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(mes, ees)
class TestEnrollmentChangeBase(TestCase):
"""
Test instructor enrollment administration against database effects.
Test methods in derived classes follow a strict format.
`action` is a function which is run
the test will pass if `action` mutates state from `before_ideal` to `after_ideal`
"""
__metaclass__ = ABCMeta
def setUp(self):
self.course_key = SlashSeparatedCourseKey('Robot', 'fAKE', 'C-%-se-%-ID')
def _run_state_change_test(self, before_ideal, after_ideal, action):
"""
Runs a state change test.
`before_ideal` and `after_ideal` are SettableEnrollmentState's
`action` is a function which will be run in the middle.
`action` should transition the world from before_ideal to after_ideal
`action` will be supplied the following arguments (None-able arguments)
`email` is an email string
"""
# initialize & check before
print "checking initialization..."
eobjs = before_ideal.create_user(self.course_key)
before = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(before, before_ideal)
# do action
print "running action..."
action(eobjs.email)
# check after
print "checking effects..."
after = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(after, after_ideal)
class TestInstructorEnrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.enroll_email """
def test_enroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_again(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_again(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=True)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_change_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=False)
return self._run_state_change_test(before_ideal, after_ideal, action)
class TestInstructorUnenrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.unenroll_email """
def test_unenroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_notenrolled(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_disallow(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_norecord(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class TestInstructorEnrollmentStudentModule(TestCase):
""" Test student module manipulations. """
def setUp(self):
self.course_key = SlashSeparatedCourseKey('fake', 'course', 'id')
def test_reset_student_attempts(self):
user = UserFactory()
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(student=user, course_id=self.course_key, module_state_key=msk, state=original_state)
# lambda to reload the module state from the database
module = lambda: StudentModule.objects.get(student=user, course_id=self.course_key, module_state_key=msk)
self.assertEqual(json.loads(module().state)['attempts'], 32)
reset_student_attempts(self.course_key, user, msk)
self.assertEqual(json.loads(module().state)['attempts'], 0)
def test_delete_student_attempts(self):
user = UserFactory()
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(student=user, course_id=self.course_key, module_state_key=msk, state=original_state)
self.assertEqual(StudentModule.objects.filter(student=user, course_id=self.course_key, module_state_key=msk).count(), 1)
reset_student_attempts(self.course_key, user, msk, delete_module=True)
self.assertEqual(StudentModule.objects.filter(student=user, course_id=self.course_key, module_state_key=msk).count(), 0)
def test_delete_submission_scores(self):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
# Create a student module for the user
StudentModule.objects.create(
student=user,
course_id=self.course_key,
module_state_key=problem_location,
state=json.dumps({})
)
# Create a submission and score for the student using the submissions API
student_item = {
'student_id': anonymous_id_for_user(user, self.course_key),
'course_id': self.course_key.to_deprecated_string(),
'item_id': problem_location.to_deprecated_string(),
'item_type': 'openassessment'
}
submission = sub_api.create_submission(student_item, 'test answer')
sub_api.set_score(submission['uuid'], 1, 2)
# Delete student state using the instructor dash
reset_student_attempts(
self.course_key, user, problem_location,
delete_module=True
)
# Verify that the student's scores have been reset in the submissions API
score = sub_api.get_score(student_item)
self.assertIs(score, None)
class EnrollmentObjects(object):
"""
Container for enrollment objects.
`email` - student email
`user` - student User object
`cenr` - CourseEnrollment object
`cea` - CourseEnrollmentAllowed object
Any of the objects except email can be None.
"""
def __init__(self, email, user, cenr, cea):
self.email = email
self.user = user
self.cenr = cenr
self.cea = cea
class SettableEnrollmentState(EmailEnrollmentState):
"""
Settable enrollment state.
Used for testing state changes.
SettableEnrollmentState can be constructed and then
a call to create_user will make objects which
correspond to the state represented in the SettableEnrollmentState.
"""
def __init__(self, user=False, enrollment=False, allowed=False, auto_enroll=False): # pylint: disable=super-init-not-called
self.user = user
self.enrollment = enrollment
self.allowed = allowed
self.auto_enroll = auto_enroll
def __eq__(self, other):
return self.to_dict() == other.to_dict()
def __neq__(self, other):
return not self == other
def create_user(self, course_id=None):
"""
Utility method to possibly create and possibly enroll a user.
Creates a state matching the SettableEnrollmentState properties.
Returns a tuple of (
email,
User, (optionally None)
CourseEnrollment, (optionally None)
CourseEnrollmentAllowed, (optionally None)
)
"""
# if self.user=False, then this will just be used to generate an email.
email = "[email protected]"
if self.user:
user = UserFactory()
email = user.email
if self.enrollment:
cenr = CourseEnrollment.enroll(user, course_id)
return EnrollmentObjects(email, user, cenr, None)
else:
return EnrollmentObjects(email, user, None, None)
elif self.allowed:
cea = CourseEnrollmentAllowed.objects.create(
email=email,
course_id=course_id,
auto_enroll=self.auto_enroll,
)
return EnrollmentObjects(email, None, None, cea)
else:
return EnrollmentObjects(email, None, None, None)
class TestSendBetaRoleEmail(TestCase):
"""
Test edge cases for `send_beta_role_email`
"""
def setUp(self):
self.user = UserFactory.create()
self.email_params = {'course': 'Robot Super Course'}
def test_bad_action(self):
bad_action = 'beta_tester'
error_msg = "Unexpected action received '{}' - expected 'add' or 'remove'".format(bad_action)
with self.assertRaisesRegexp(ValueError, error_msg):
send_beta_role_email(bad_action, self.user, self.email_params)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class TestGetEmailParams(ModuleStoreTestCase):
"""
Test what URLs the function get_email_params returns under different
production-like conditions.
"""
def setUp(self):
self.course = CourseFactory.create()
# Explicitly construct what we expect the course URLs to be
site = settings.SITE_NAME
self.course_url = u'https://{}/courses/{}/'.format(
site,
self.course.id.to_deprecated_string()
)
self.course_about_url = self.course_url + 'about'
self.registration_url = u'https://{}/register'.format(
site,
)
def test_normal_params(self):
# For a normal site, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
result = get_email_params(self.course, False)
self.assertEqual(result['auto_enroll'], False)
self.assertEqual(result['course_about_url'], self.course_about_url)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
def test_marketing_params(self):
# For a site with a marketing front end, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
result = get_email_params(self.course, True)
self.assertEqual(result['auto_enroll'], True)
# We should *not* get a course about url (LMS doesn't know what the marketing site URLs are)
self.assertEqual(result['course_about_url'], None)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
class TestRenderMessageToString(TestCase):
"""
Test that email templates can be rendered in a language chosen manually.
"""
def setUp(self):
self.subject_template = 'emails/enroll_email_allowedsubject.txt'
self.message_template = 'emails/enroll_email_allowedmessage.txt'
self.course = CourseFactory.create()
def get_email_params(self):
"""
Returns a dictionary of parameters used to render an email.
"""
email_params = get_email_params(self.course, True)
email_params["email_address"] = "[email protected]"
email_params["full_name"] = "Jean Reno"
return email_params
def get_subject_and_message(self, language):
"""
Returns the subject and message rendered in the specified language.
"""
return render_message_to_string(
self.subject_template,
self.message_template,
self.get_email_params(),
language=language
)
def test_subject_and_message_translation(self):
subject, message = self.get_subject_and_message('fr')
language_after_rendering = get_language()
you_have_been_invited_in_french = u"Vous avez été invité"
self.assertIn(you_have_been_invited_in_french, subject)
self.assertIn(you_have_been_invited_in_french, message)
self.assertEqual(settings.LANGUAGE_CODE, language_after_rendering)
def test_platform_language_is_used_for_logged_in_user(self):
with override_language('zh_CN'): # simulate a user login
subject, message = self.get_subject_and_message(None)
self.assertIn("You have been", subject)
self.assertIn("You have been", message)
| agpl-3.0 | -4,978,220,356,829,922,000 | 33.487619 | 128 | 0.62946 | false |
picolix/cf-php-bluemix | lib/httpd/extension.py | 14 | 1792 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def preprocess_commands(ctx):
return ((
'$HOME/.bp/bin/rewrite',
'"$HOME/httpd/conf"'),)
def service_commands(ctx):
return {
'httpd': (
'$HOME/httpd/bin/apachectl',
'-f "$HOME/httpd/conf/httpd.conf"',
'-k start',
'-DFOREGROUND')
}
def service_environment(ctx):
return {
'HTTPD_SERVER_ADMIN': ctx['ADMIN_EMAIL']
}
def compile(install):
print 'Installing HTTPD'
install.builder._ctx['PHP_FPM_LISTEN'] = '127.0.0.1:9000'
(install
.package('HTTPD')
.config()
.from_application('.bp-config/httpd')
.or_from_build_pack('defaults/config/httpd/{HTTPD_VERSION}')
.to('httpd/conf')
.rewrite()
.done()
.modules('HTTPD')
.filter_files_by_extension('.conf')
.find_modules_with_regex('^LoadModule .* modules/(.*).so$')
.from_application('httpd/conf')
.done())
return 0
| apache-2.0 | -8,392,346,827,973,793,000 | 31.581818 | 74 | 0.628906 | false |
josanvel/BazarPapeleriaLulita | CodigoBazarLulita/IngresarProducto.py | 1 | 4034 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ingresarproducto.ui'
#
# Created: Sun Mar 15 12:31:18 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_IngresarProducto(object):
def setupUi(self, IngresarProducto):
IngresarProducto.setObjectName(_fromUtf8("IngresarProducto"))
IngresarProducto.resize(500, 140)
IngresarProducto.setMaximumSize(QtCore.QSize(500, 140))
self.tblVwProductoStock = QtGui.QTableView(IngresarProducto)
self.tblVwProductoStock.setGeometry(QtCore.QRect(20, 10, 461, 81))
self.tblVwProductoStock.setMouseTracking(True)
self.tblVwProductoStock.setObjectName(_fromUtf8("tblVwProductoStock"))
self.tblVwProductoStock.horizontalHeader().setCascadingSectionResizes(True)
self.tblVwProductoStock.horizontalHeader().setSortIndicatorShown(True)
self.tblVwProductoStock.horizontalHeader().setStretchLastSection(True)
self.tblVwProductoStock.verticalHeader().setVisible(False)
self.btnAceptar = QtGui.QPushButton(IngresarProducto)
self.btnAceptar.setGeometry(QtCore.QRect(319, 100, 160, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Comic Sans MS"))
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.btnAceptar.setFont(font)
self.btnAceptar.setStyleSheet(_fromUtf8("Color:rgb(1, 137, 125);"))
self.btnAceptar.setObjectName(_fromUtf8("btnAceptar"))
self.btnCancelar = QtGui.QPushButton(IngresarProducto)
self.btnCancelar.setGeometry(QtCore.QRect(179, 100, 131, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Comic Sans MS"))
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.btnCancelar.setFont(font)
self.btnCancelar.setStyleSheet(_fromUtf8("Color:rgb(223, 15, 90);"))
self.btnCancelar.setObjectName(_fromUtf8("btnCancelar"))
self.txtCantidad = QtGui.QLineEdit(IngresarProducto)
self.txtCantidad.setGeometry(QtCore.QRect(20, 100, 121, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
self.txtCantidad.setFont(font)
self.txtCantidad.setText(_fromUtf8(""))
self.txtCantidad.setObjectName(_fromUtf8("txtCantidad"))
self.retranslateUi(IngresarProducto)
QtCore.QMetaObject.connectSlotsByName(IngresarProducto)
def retranslateUi(self, IngresarProducto):
IngresarProducto.setWindowTitle(_translate("IngresarProducto", "Ingresar Producto", None))
self.tblVwProductoStock.setToolTip(_translate("IngresarProducto", "<html><head/><body><p><span style=\" color:#ffffff;\">Detalle</span></p></body></html>", None))
self.btnAceptar.setToolTip(_translate("IngresarProducto", "<html><head/><body><p><span style=\" color:#ffffff;\">ACEPTAR</span></p></body></html>", None))
self.btnAceptar.setText(_translate("IngresarProducto", "ACEPTAR", None))
self.btnCancelar.setToolTip(_translate("IngresarProducto", "<html><head/><body><p><span style=\" color:#ffffff;\">CANCELAR</span></p></body></html>", None))
self.btnCancelar.setText(_translate("IngresarProducto", "CANCELAR", None))
self.txtCantidad.setToolTip(_translate("IngresarProducto", "<html><head/><body><p><span style=\" color:#ffffff;\">Cantidad producto</span></p></body></html>", None))
self.txtCantidad.setPlaceholderText(_translate("IngresarProducto", "Ingrese cantidad", None))
| gpl-2.0 | -6,358,320,857,863,521,000 | 50.063291 | 173 | 0.699058 | false |
msjoinder/EXIFzero | minimal_exif_writer.py | 1 | 16830 | """
Offers one class, MinimalExifWriter, which takes a jpg filename
in the constructor. Allows you to: remove exif section, add
image description, add copyright. Typical usage:
f = MinimalExifWriter('xyz.jpg')
f.newImageDescription('This is a photo of something very interesting!')
f.newCopyright('Jose Blow, All Rights Reserved', addCopyrightYear = 1)
f.process()
Class methods:
newImageDescription(description)--will add Exif ImageDescription to file.
newCopyright(copyright, addSymbol = 0, addYear = 0)--will add Exif Copyright to file.
Will optionally prepend copyright symbol, or copyright symbol and current year.
removeExif()--will obliterate existing exif section.
process()--call after calling one or more of the above. Will remove existing exif
section, optionally saving some existing tags (see below), and insert a new exif
section with only three tags at most: description, copyright and date time original.
If removeExif() not called, existing description (or new description if newDescription()
called), existing copyright (or new copyright if newCopyright() called) and existing
"DateTimeOriginal" (date/time picture taken) tags will be rewritten to the new
minimal exif section.
Run at comand line with no args to see command line usage.
Does not work on unix due to differences in mmap. Not sure what's up there--
don't need it on unix!
Brought to you by Megabyte Rodeo Software.
http://www.fetidcascade.com/pyexif.html
"""
# Written by Chris Stromberger, 10/2004. Public Domain.
# Last updated: 12/3/2004.
DUMP_TIFF = 0
VERBOSE = 0
if VERBOSE:
import binascii
import mmap
import sys
import minimal_exif_reader
#---------------------------------------------------------------------
class ExifFormatException(Exception):
pass
#---------------------------------------------------------------------------
class MinimalExifWriter:
SOI_MARKER = '\xff\xd8'
APP0_MARKER = '\xff\xe0'
APP1_MARKER = '\xff\xe1'
# Standard app0 segment that will work for all files. We hope.
# Based on http://www.funducode.com/freec/Fileformats/format3/format3b.htm.
APP0 = '\xff\xe0\x00\x10\x4a\x46\x49\x46\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00'
def __init__(self, filename):
self.filename = filename
self.removeExifSection = 0
self.description = None
self.copyright = None
self.dateTimeOriginal = None
#---------------------------------------------
def newImageDescription(self, description):
self.description = description
#---------------------------------------------
def newCopyright(self, copyright, addSymbol = 0, addYear = 0):
if addYear:
import time
year = time.localtime()[0]
self.copyright = "\xa9 %s %s" % (year, copyright)
elif addSymbol:
self.copyright = "\xa9 %s" % copyright
else:
self.copyright = copyright
#---------------------------------------------
def removeExif(self):
self.removeExifSection = 1
#---------------------------------------------
def process(self):
if not self.removeExifSection:
self.getExistingExifInfo()
if VERBOSE:
print self
import os
try:
fd = os.open(self.filename, os.O_RDWR)
except:
sys.stderr.write('Unable to open "%s"\n' % filename)
return
self.m = mmap.mmap(fd, 0)
os.close(fd)
# We only add app0 if all we're doing is removing the exif section.
justRemovingExif = self.description is None and self.copyright is None and self.removeExifSection
if VERBOSE: print 'justRemovingExif=%s' % justRemovingExif
self.removeExifInfo(addApp0 = justRemovingExif)
if justRemovingExif:
self.m.close()
return
# Get here means we are adding new description and/or copyright.
self.removeApp0()
totalTagsToBeAdded = len(filter(None, (self.description, self.copyright, self.dateTimeOriginal)))
assert(totalTagsToBeAdded > 0)
# Layout will be: firstifd|description|copyright|exififd|datetime.
# First ifd will have tags: desc|copyright|subifd tag.
ifd = [self.twoBytesHexIntel(totalTagsToBeAdded)]
ifdEnd = ['\x00\x00\x00\x00']
NUM_TAGS_LEN = 2
TAG_LEN = 12
NEXT_IFD_OFFSET_LEN = 4
TIFF_HEADER_LENGTH = 8
ifdLength = NUM_TAGS_LEN + TAG_LEN * totalTagsToBeAdded + NEXT_IFD_OFFSET_LEN
# Subifd only has one tag.
SUBIFD_LENGTH = NUM_TAGS_LEN + TAG_LEN + NEXT_IFD_OFFSET_LEN
offsetToEndOfData = ifdLength + TIFF_HEADER_LENGTH
if self.description:
ifd.append(self.descriptionTag(len(self.description), offsetToEndOfData))
ifdEnd.append(self.description)
offsetToEndOfData += len(self.description)
if self.copyright:
ifd.append(self.copyrightTag(len(self.copyright), offsetToEndOfData))
ifdEnd.append(self.copyright)
offsetToEndOfData += len(self.copyright)
if self.dateTimeOriginal:
ifd.append(self.subIfdTag(offsetToEndOfData))
offsetToEndOfData += SUBIFD_LENGTH
ifdEnd.append(self.buildSubIfd(len(self.dateTimeOriginal), offsetToEndOfData))
ifdEnd.append(self.dateTimeOriginal)
app1 = self.buildApp1Section(ifd, ifdEnd)
self.addApp1(app1)
self.m.close()
#---------------------------------------------
# Build exif subifd with one tag for datetime (0x9003).
# Type is ascii (0x0002).
def buildSubIfd(self, lenDateTime, offsetToEndOfData):
return '\x01\x00\x03\x90\x02\x00%s%s\x00\x00\x00\x00' % (self.fourBytesHexIntel(lenDateTime), self.fourBytesHexIntel(offsetToEndOfData))
#---------------------------------------------
def getExistingExifInfo(self):
# Save off the old stuff.
try:
f = minimal_exif_reader.MinimalExifReader(self.filename)
except:
# Assume no existing exif info in the file. We
# don't care.
return
if not self.description:
self.description = f.imageDescription()
if not self.copyright:
self.copyright = f.copyright()
self.dateTimeOriginal = f.dateTimeOriginal()
if self.dateTimeOriginal:
# Restore ending nul.
if self.dateTimeOriginal[-1] != '\x00':
self.dateTimeOriginal += '\x00'
#---------------------------------------------------------------------------
def removeExifInfo(self, addApp0 = 1):
"""Remove the app1 section of the jpg. This removes all exif info and the exif
thumbnail. addApp0 should be 1 to add a minimal app0 section right after soi
to make it a legitimate jpg, I think (various image programs can read the file
without app0, but I think the standard requires one).
"""
# Read first bit of file to see if exif file.
self.m.seek(0)
if self.m.read(2) != self.SOI_MARKER:
self.m.close()
raise ExifFormatException("Missing SOI marker")
app0DataLength = 0
appMarker = self.m.read(2)
# See if there's an APP0 section, which sometimes appears.
if appMarker == self.APP0_MARKER:
if VERBOSE: print 'app0 found'
app0DataLength = ord(self.m.read(1)) * 256 + ord(self.m.read(1))
if VERBOSE: print 'app0DataLength: %s' % app0DataLength
# Back up 2 bytes to get the length bytes.
self.m.seek(-2, 1)
existingApp0 = self.m.read(app0DataLength)
appMarker = self.m.read(2)
if appMarker != self.APP1_MARKER:
# We don't care, we'll add our minimal app1 later.
return
exifHeader = self.m.read(8)
if VERBOSE: print 'exif header: %s' % binascii.hexlify(exifHeader)
if (exifHeader[2:6] != 'Exif' or
exifHeader[6:8] != '\x00\x00'):
self.m.close()
raise ExifFormatException("Malformed APP1")
app1Length = ord(exifHeader[0]) * 256 + ord(exifHeader[1])
if VERBOSE: print 'app1Length: %s' % app1Length
originalFileSize = self.m.size()
# Shift stuff just past app1 to overwrite app1.
# Start at app1 length bytes in + other bytes not incl in app1 length.
src = app1Length + len(self.SOI_MARKER) + len(self.APP1_MARKER)
if app0DataLength:
src += app0DataLength + len(self.APP0_MARKER)
dest = len(self.SOI_MARKER)
if addApp0:
if app0DataLength != 0:
# We'll re-add the existing app0.
dest += app0DataLength + len(self.APP0_MARKER)
else:
# Add our generic app0.
dest += len(self.APP0)
count = originalFileSize - app1Length - len(self.SOI_MARKER) - len(self.APP1_MARKER)
if app0DataLength:
count -= app0DataLength + len(self.APP0_MARKER)
if VERBOSE: print 'self.m.move(%s, %s, %s)' % (dest, src, count)
self.m.move(dest, src, count)
if addApp0:
if app0DataLength != 0:
self.m.resize(originalFileSize - app1Length - len(self.APP1_MARKER))
else:
self.m.seek(len(self.SOI_MARKER))
self.m.write(self.APP0)
try:
self.m.resize(originalFileSize - app1Length - len(self.APP1_MARKER) + len(self.APP0))
except:
r = 1
else:
self.m.resize(originalFileSize - app1Length - len(self.APP1_MARKER))
#---------------------------------------------------------------------------
def removeApp0(self):
self.m.seek(0)
header = self.m.read(6)
if (header[0:2] != self.SOI_MARKER or
header[2:4] != self.APP0_MARKER):
if VERBOSE: print 'no app0 found: %s' % binascii.hexlify(header)
return
originalFileSize = self.m.size()
app0Length = ord(header[4]) * 256 + ord(header[5])
if VERBOSE: print 'app0Length:', app0Length
# Shift stuff to overwrite app0.
# Start at app0 length bytes in + other bytes not incl in app0 length.
src = app0Length + len(self.SOI_MARKER) + len(self.APP0_MARKER)
dest = len(self.SOI_MARKER)
count = originalFileSize - app0Length - len(self.SOI_MARKER) - len(self.APP0_MARKER)
self.m.move(dest, src, count)
if VERBOSE: print 'm.move(%s, %s, %s)' % (dest, src, count)
self.m.resize(originalFileSize - app0Length - len(self.APP0_MARKER))
#---------------------------------------------------------------------------
def addApp1(self, app1):
originalFileSize = self.m.size()
# Insert app1 section.
self.m.resize(originalFileSize + len(app1))
src = len(self.SOI_MARKER)
dest = len(app1) + len(self.SOI_MARKER)
count = originalFileSize - len(self.SOI_MARKER)
self.m.move(dest, src, count)
self.m.seek(len(self.SOI_MARKER))
self.m.write(app1)
#---------------------------------------------------------------------------
def fourBytesHexIntel(self, number):
return '%s%s%s%s' % (chr(number & 0x000000ff),
chr((number >> 8) & 0x000000ff),
chr((number >> 16) & 0x000000ff),
chr((number >> 24) & 0x000000ff))
#---------------------------------------------------------------------------
def twoBytesHexIntel(self, number):
return '%s%s' % (chr(number & 0x00ff),
chr((number >> 8) & 0x00ff))
#---------------------------------------------------------------------------
def descriptionTag(self, numChars, loc):
return self.asciiTag('\x0e\x01', numChars, loc)
#---------------------------------------------------------------------------
def copyrightTag(self, numChars, loc):
return self.asciiTag('\x98\x82', numChars, loc)
#---------------------------------------------------------------------------
def subIfdTag(self, loc):
return '\x69\x87\x04\x00\x01\x00\x00\x00%s' % self.fourBytesHexIntel(loc)
#---------------------------------------------------------------------------
def asciiTag(self, tag, numChars, loc):
"""Create ascii tag. Assumes description > 4 chars long."""
return '%s\x02\x00%s%s' % (tag, self.fourBytesHexIntel(numChars), self.fourBytesHexIntel(loc))
#---------------------------------------------------------------------------
def buildApp1Section(self, ifdPieces, ifdEndPieces):
"""Create the APP1 section of an exif jpg. Consists of exif header plus
tiff header + ifd and associated data."""
# Intel byte order, offset to first ifd will be 8.
tiff = 'II\x2a\x00\x08\x00\x00\x00%s%s' % (''.join(ifdPieces), ''.join(ifdEndPieces))
if DUMP_TIFF:
f = open('tiff.dump', 'wb')
f.write(tiff)
f.close()
app1Length = len(tiff) + 8
return '\xff\xe1%s%sExif\x00\x00%s' % (chr((app1Length >> 8) & 0x00ff), chr(app1Length & 0x00ff), tiff)
#---------------------------------------------------------------------------
def __str__(self):
return """filename: %(filename)s
removeExifSection: %(removeExifSection)s
description: %(description)s
copyright: %(copyright)s
dateTimeOriginal: %(dateTimeOriginal)s
""" % self.__dict__
#---------------------------------------------------------------------------
def usage(error = None):
"""Print command line usage and exit"""
if error:
print error
print
print """This program will remove exif info from an exif jpg, and can optionally
add the ImageDescription exif tag and/or the Copyright tag. But it will always remove
some or all existing exif info (depending on options--see below)!
So don't run this on your original images without a backup.
Options:
-h: shows this message.
-f <file>: jpg to process (required).
-x: remove exif info (including thumbnail).
-d <description or file>: remove exif info (including thumbnail) and then add exif
ImageDescription. Will save the existing copyright tag if present,
as well as the date time original tag (date & time photo taken),
unless -x also passed (-x always means remove all exif info).
It will attempt to open whatever is passed on the
command line as a file; if successful, the contents of the file
are added as the description, else the literal text on the
command line is used as the description.
-c <copyright or file>: remove exif info (including thumbnail) and then add exif
Copyright tag. Will save the existing image description tag if present,
as well as the date time original tag (date & time photo taken),
unless -x also passed (-x always means remove all exif info).
It will attempt to open whatever is passed on the command line as a file;
if successful, the contents of the file are added as the copyright,
else the literal text on the command line is used as the copyright.
-s: prepend copyright symbol to copyright.
-y: prepend copyright symbol and current year to copyright.
The image description and copyright must be > 4 characters long.
This software courtesy of Megabyte Rodeo Software."""
sys.exit(1)
#---------------------------------------------------------------------------
def parseArgs(args_):
import getopt
try:
opts, args = getopt.getopt(args_, "yshxd:f:c:")
except getopt.GetoptError:
usage()
filename = None
description = ''
copyright = ''
addCopyrightSymbol = 0
addCopyrightYear = 0
removeExif = 0
for o, a in opts:
if o == "-h":
usage()
if o == "-f":
filename = a
if o == "-d":
try:
f = open(a)
description = f.read()
f.close()
except:
description = a
if o == "-c":
try:
f = open(a)
copyright = f.read()
f.close()
except:
copyright = a
if o == '-x':
removeExif = 1
if o == '-s':
addCopyrightSymbol = 1
if o == '-y':
addCopyrightYear = 1
if filename is None:
usage('Missing jpg filename')
if description and (len(description) <= 4 or len(description) > 60000):
usage('Description too short or too long')
if copyright and (len(copyright) <= 4 or len(copyright) > 60000):
usage('Copyright too short or too long')
if not description and not copyright and not removeExif:
usage('Nothing to do!')
return filename, description, copyright, removeExif, addCopyrightSymbol, addCopyrightYear
#---------------------------------------------------------------------------
if __name__ == '__main__':
try:
filename, description, copyright, removeExif, addCopyrightSymbol, addCopyrightYear = parseArgs(sys.argv[1:])
f = MinimalExifWriter(filename)
if description:
f.newImageDescription(description)
if copyright:
f.newCopyright(copyright, addCopyrightSymbol, addCopyrightYear)
if removeExif:
f.removeExif()
f.process()
except ExifFormatException, ex:
sys.stderr.write("Exif format error: %s\n" % ex)
except SystemExit:
pass
except:
sys.stderr.write("Unable to process %s\n" % filename)
raise | gpl-3.0 | -8,163,865,281,723,483,000 | 35.58913 | 140 | 0.598515 | false |
jirikuncar/invenio | invenio/modules/search/searchext/units/citedexcludingselfcites.py | 7 | 1653 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Cited excluding self cites search unit."""
from intbitset import intbitset
def search_unit(query, f, m, wl=None):
"""Search for records with given citation count excluding self-cites.
Query usually looks like '10->23'.
"""
from invenio.modules.records.models import Record
from invenio.legacy.bibrank.citation_searcher import (
get_records_with_num_cites
)
numstr = '"{}"'.format(query)
# this is sort of stupid but since we may need to
# get the records that do _not_ have cites, we have to
# know the ids of all records, too
# but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = intbitset()
if query == 0 or query == "0" or \
query.startswith("0->") or query.endswith("->0"):
allrecs = Record.allids()
return get_records_with_num_cites(numstr, allrecs, exclude_selfcites=True)
| gpl-2.0 | -4,106,544,662,706,156,000 | 37.44186 | 78 | 0.701149 | false |
f03lipe/eMec-db-retriever | src/queryassembler.py | 1 | 4942 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
This module defines the QueryAssembler class.
Part of the pydbcollector package.
by @f03lipe, 2011-2012
"""
class QueryAssembler(object):
""" Assembles an SQL query to retrieve data from server.
This class defines default query snippets to be added to the queries,
as the __init__ arguments dictate. The two mapping objects are SELECT
and WHERE, which contain snippets of sql queries for SELECT and WHERE
clauses, respectively.
!!!!
This is not to be used as a full interface, as not every combination of
SELECT and WHERE fields yields something meaningfull or even error-free.
"""
SELECT = {
'qtde_de_cursos': "count (*) as qtde_cursos",
'nat_jurídica': "n.natureza_juridica as nat_jurídica",
'org_acad': "o.organizacao_academica as org_acad",
'estado': "i.uf as estado",
'modalidade': "m.nome as modalidade",
'vagas_turno': "c.vagas_totais_anuais as vagas_turno"
}
WHERE = {
'__base': "i.natid = n.id and i.instid = c.instid and c.modid = m.id and c.titid = t.id and i.orgid = o.id", # the basic condition that holds all tables together
'tecnológico': "t.nome = 'Tecnológico'",
'licenciatura': "t.nome = 'Licenciatura'",
'bacharelado': "t.nome = 'Bacharelado'",
'ensino_público':
"""((n.natureza_juridica = 'Pessoa Jurídica de Direito Público - Municipal') or (n.natureza_juridica = 'Pessoa Jurídica de Direito Público - Estadual') or
(n.natureza_juridica = 'Pessoa Jurídica de Direito Público - Federal'))""",
'ensino_privado':
"""((n.natureza_juridica = 'Pessoa Jurídica de Direito Privado - Sem fins lucrativos - Associação de Utilidade Pública') or (n.natureza_juridica = 'Privada sem fins lucrativos')
or (n.natureza_juridica = 'Pessoa Jurídica de Direito Privado - Sem fins lucrativos - Sociedade') or (n.natureza_juridica = 'Privada com fins lucrativos')
or (n.natureza_juridica = 'Pessoa Jurídica de Direito Privado - Com fins lucrativos - Sociedade Mercantil ou Comercial')
or (n.natureza_juridica = 'Pessoa Jurídica de Direito Privado - Com fins lucrativos - Associação de Utilidade Pública'))""",
'existia_no_ano':
"""(SUBSTRING(c.data_inicio FROM 7 FOR 10) < '{year}') and not (SUBSTRING(c.data_inicio FROM 7 FOR 10) = '') and
(SUBSTRING(c.data_inicio FROM 7 FOR 10) > '1900')""", # for incorrect entries
'UF': "(i.nome LIKE '%UNIVERSIDADE FEDERAL%')",
"UTF": "(SUBSTRING(i.nome FROM 1 FOR 32) = 'UNIVERSIDADE TECNOLÓGICA FEDERAL')",
'no norte': "(i.uf = 'AM' or i.uf = 'AC' or i.uf = 'PA' or i.uf = 'RO' or i.uf = 'RR' or i.uf = 'TO' or i.uf = 'AP')",
'no sul': "(i.uf = 'SC' or i.uf = 'PR' or i.uf = 'RS')",
'no nordeste': "(i.uf = 'AL' or i.uf = 'BA' or i.uf = 'CE' or i.uf = 'MA' or i.uf = 'PB' or i.uf = 'PE' or i.uf = 'PI' or i.uf = 'RN' or i.uf = 'SE')",
'no centro-oeste': "(i.uf = 'GO' or i.uf = 'MT' or i.uf = 'MS' or i.uf = 'DF')",
'no sudeste': "(i.uf = 'RJ' or i.uf = 'ES' or i.uf = 'SP' or i.uf = 'MG')",
'educação_presencial': "(m.nome = 'Educação Presencial')",
'educação_a_distância': "(m.nome = 'Educação a Distância')",
'municipal': "(n.natureza_juridica = 'Pessoa Jurídica de Direito Público - Municipal')",
'estadual': "(n.natureza_juridica = 'Pessoa Jurídica de Direito Público - Estadual')",
'federal': "(n.natureza_juridica = 'Pessoa Jurídica de Direito Público - Federal')"
}
def __init__(self, year, select, where=None, group_by=None, order_by=None):
# this here is a huge mess!! \\O
# /
# /\
self.query = ""
self.year = year
self.add_select(select)
self.add_from()
self.add_where(where)
self.add_group_by(group_by)
self.add_order_by(order_by)
def add_to_query(self, *pieces):
""" Add sql pieces to the query. """
if pieces[0].isupper():
self.query += "\n"
self.query += " ".join(pieces)+" "
def add_select(self, conditions):
""" Add the SELECT field. """
self.add_to_query("SELECT")
assert 'qtde_de_cursos' in conditions, "select qtde_de_cursor"
self.add_to_query(', '.join(self.SELECT[c] for c in conditions))
def add_from(self):
""" Add the default FROM field. """
self.add_to_query("FROM curso c, instituicao i, titulacao t, modalidade m, organizacao o, natureza n")
def add_where(self, conditions):
""" Add WHERE field. """
assert conditions, "no conditions given"
conditions.append('__base') # add base condition
d = dict()
for c in conditions:
d[c] = self.WHERE[c]
if c == "existia_no_ano":
d[c] = d[c].format(year=self.year+1)
self.add_to_query("WHERE", ' and '.join(d.values()))
def add_group_by(self, clauses):
if clauses:
self.add_to_query("group by")
self.add_to_query(', '.join(clauses))
def add_order_by(self, clauses):
if clauses:
self.add_to_query("order by")
self.add_to_query(', '.join(clauses)) | mit | 5,283,218,452,178,943,000 | 38.556452 | 180 | 0.641313 | false |
cfarquhar/openstack-ansible | osa_toolkit/filesystem.py | 4 | 10521 | # Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <[email protected]>
# (c) 2015, Major Hayden <[email protected]>
#
import copy
import datetime
import json
import logging
import os
from osa_toolkit import dictutils as du
import tarfile
import yaml
logger = logging.getLogger('osa-inventory')
INVENTORY_FILENAME = 'openstack_inventory.json'
class MissingDataSource(Exception):
def __init__(self, *sources):
self.sources = sources
error_msg = "Could not read data sources: '{sources}'."
self.message = error_msg.format(sources=self.sources)
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_search_paths(preferred_path=None, suffix=None):
"""Return a list of search paths, including the standard location
:param preferred_path: A search path to prefer to a standard location
:param suffix: Appended to the search paths, e.g. subdirectory or filename
:return: ``(list)`` Path strings to search
"""
search_paths = [
os.path.join(
'/etc', 'openstack_deploy'
),
]
if preferred_path is not None:
search_paths.insert(0, os.path.expanduser(preferred_path))
if suffix:
search_paths = [os.path.join(p, suffix) for p in search_paths]
return search_paths
def file_find(filename, preferred_path=None, raise_if_missing=True):
"""Return the path to an existing file, or False if no file is found.
If no file is found and raise_if_missing is True, MissingDataSource
will be raised.
The file lookup will be done in the following directories:
* ``preferred_path`` [Optional]
* ``/etc/openstack_deploy/``
:param filename: ``str`` Name of the file to find
:param preferred_path: ``str`` Additional directory to look in FIRST
:param raise_if_missing: ``bool`` Should a MissingDataSource be raised if
the file is not found
"""
search_paths = _get_search_paths(preferred_path, suffix=filename)
for file_candidate in search_paths:
if os.path.isfile(file_candidate):
return file_candidate
# The file was not found
if raise_if_missing:
raise MissingDataSource(search_paths)
else:
return False
def dir_find(preferred_path=None, suffix=None, raise_if_missing=True):
"""Return the path to the user configuration files.
If no directory is found the system will exit.
The lookup will be done in the following directories:
* ``preferred_path`` [Optional]
* ``/etc/openstack_deploy/``
:param preferred_path: ``str`` Additional directory to look in FIRST
:param suffix: ``str`` Name of a subdirectory to find under standard paths
:param raise_if_missing: ``bool`` Should a MissingDataSource be raised if
the directory is not found.
"""
search_paths = _get_search_paths(preferred_path, suffix)
for f in search_paths:
if os.path.isdir(f):
return f
# The directory was not found
if raise_if_missing:
raise MissingDataSource(search_paths)
else:
return False
def _extra_config(user_defined_config, base_dir):
"""Discover new items in any extra directories and add the new values.
:param user_defined_config: ``dict``
:param base_dir: ``str``
"""
for root_dir, _, files in os.walk(base_dir):
for name in files:
if name.endswith(('.yml', '.yaml')):
with open(os.path.join(root_dir, name), 'rb') as f:
du.merge_dict(
user_defined_config,
yaml.safe_load(f.read()) or {}
)
logger.debug("Merged overrides from file {}".format(name))
def _make_backup(backup_path, source_file_path):
"""Create a backup of all previous inventory files as a tar archive
:param backup_path: where to store the backup file
:param source_file_path: path of file to backup
:return:
"""
inventory_backup_file = os.path.join(
backup_path,
'backup_openstack_inventory.tar'
)
with tarfile.open(inventory_backup_file, 'a') as tar:
basename = os.path.basename(source_file_path)
backup_name = _get_backup_name(basename)
tar.add(source_file_path, arcname=backup_name)
logger.debug("Backup written to {}".format(inventory_backup_file))
def _get_backup_name(basename):
"""Return a name for a backup file based on the time
:param basename: serves as prefix for the return value
:return: a name for a backup file based on current time
"""
utctime = datetime.datetime.utcnow()
utctime = utctime.strftime("%Y%m%d_%H%M%S")
return '{}-{}.json'.format(basename, utctime)
def write_hostnames(save_path, hostnames_ips):
"""Write a list of all hosts and their given IP addresses
NOTE: the file is saved in json format to a file with the name
``openstack_hostnames_ips.yml``
:param save_path: path to save the file to, will use default location if
None or an invalid path is provided
:param hostnames_ips: the list of all hosts and their IP addresses
"""
file_path = dir_find(save_path)
hostnames_ip_file = os.path.join(file_path, 'openstack_hostnames_ips.yml')
with open(hostnames_ip_file, 'wb') as f:
f.write(
json.dumps(
hostnames_ips,
indent=4,
separators=(',', ': '),
sort_keys=True
).encode('ascii')
)
def _load_from_json(filename, preferred_path=None, raise_if_missing=True):
"""Return a dictionary found in json format in a given file
:param filename: ``str`` Name of the file to read from
:param preferred_path: ``str`` Path to the json file to try FIRST
:param raise_if_missing: ``bool`` Should a MissingDataSource be raised if
the file is not found
:return ``(dict, str)`` Dictionary describing the JSON file contents or
False, and the fully resolved file name loaded or None
"""
target_file = file_find(filename, preferred_path, raise_if_missing)
dictionary = False
if target_file is not False:
with open(target_file, 'rb') as f_handle:
dictionary = json.loads(f_handle.read().decode('ascii'))
return dictionary, target_file
def load_inventory(preferred_path=None, default_inv=None, filename=None):
"""Create an inventory dictionary from the given source file or a default
inventory. If an inventory is found then a backup tarball is created
as well.
:param preferred_path: ``str`` Path to the inventory directory to try FIRST
:param default_inv: ``dict`` Default inventory skeleton
:return: ``(dict, str)`` Dictionary describing the JSON file contents or
``default_inv``, and the directory from which the inventory was loaded
or should have been loaded from.
"""
if filename:
inv_fn = filename
else:
inv_fn = INVENTORY_FILENAME
inventory, file_loaded = _load_from_json(inv_fn, preferred_path,
raise_if_missing=False)
if file_loaded is not False:
load_path = os.path.dirname(file_loaded)
else:
load_path = dir_find(preferred_path)
if inventory is not False:
logger.debug("Loaded existing inventory from {}".format(file_loaded))
_make_backup(load_path, file_loaded)
else:
logger.debug("No existing inventory, created fresh skeleton.")
inventory = copy.deepcopy(default_inv)
return inventory, load_path
def save_inventory(inventory_json, save_path):
"""Save an inventory dictionary
:param inventory_json: ``str`` String of JSON formatted inventory to store
:param save_path: ``str`` Path of the directory to save to
"""
if INVENTORY_FILENAME == save_path:
inventory_file = file_find(save_path)
else:
inventory_file = os.path.join(save_path, INVENTORY_FILENAME)
with open(inventory_file, 'wb') as f:
f.write(inventory_json.encode('ascii'))
logger.info("Inventory written")
def load_environment(config_path, environment):
"""Create an environment dictionary from config files
:param config_path: ``str`` path where the environment files are kept
:param environment: ``dict`` dictionary to populate with environment data
"""
# Load all YAML files found in the env.d directory
env_plugins = dir_find(config_path, 'env.d', raise_if_missing=False)
if env_plugins is not False:
_extra_config(user_defined_config=environment, base_dir=env_plugins)
logger.debug("Loaded environment from {}".format(config_path))
return environment
def load_user_configuration(config_path=None):
"""Create a user configuration dictionary from config files
:param config_path: ``str`` path where the configuration files are kept
"""
user_defined_config = dict()
# Load the user defined configuration file
user_config_file = file_find('openstack_user_config.yml',
preferred_path=config_path,
raise_if_missing=False)
if user_config_file is not False:
with open(user_config_file, 'rb') as f:
user_defined_config.update(yaml.safe_load(f.read()) or {})
# Load anything in a conf.d directory if found
base_dir = dir_find(config_path, 'conf.d', raise_if_missing=False)
if base_dir is not False:
_extra_config(user_defined_config, base_dir)
# Exit if no user_config was found and loaded
if not user_defined_config:
raise MissingDataSource(_get_search_paths(config_path) +
_get_search_paths(config_path, 'conf.d'))
logger.debug("User configuration loaded from: {}".format(user_config_file))
return user_defined_config
| apache-2.0 | 8,111,415,517,427,129,000 | 32.4 | 79 | 0.652885 | false |
tempbottle/kwplayer | kuwo/TopList.py | 4 | 4653 |
# Copyright (C) 2013-2014 LiuLang <[email protected]>
# Use of this source code is governed by GPLv3 license that can be found
# in the LICENSE file.
import time
from gi.repository import GdkPixbuf
from gi.repository import Gtk
from kuwo import Config
_ = Config._
from kuwo import Net
from kuwo import Widgets
from kuwo.log import logger
class TopList(Gtk.Box):
'''TopList tab in notebook.'''
title = _('Top List')
def __init__(self, app):
super().__init__()
self.set_orientation(Gtk.Orientation.VERTICAL)
self.app = app
def first(self):
app = self.app
self.buttonbox = Gtk.Box(spacing=5)
self.pack_start(self.buttonbox, False, False, 0)
button_home = Gtk.Button(_('TopList'))
button_home.connect('clicked', self.on_button_home_clicked)
self.buttonbox.pack_start(button_home, False, False, 0)
self.label = Gtk.Label('')
self.buttonbox.pack_start(self.label, False, False, 0)
# checked, name, artist, album, rid, artistid, albumid
treeview_songs = Widgets.TreeViewSongs(app)
self.liststore_songs = treeview_songs.liststore
control_box = Widgets.ControlBox(self.liststore_songs, app)
self.buttonbox.pack_end(control_box, False, False, 0)
self.scrolled_nodes = Gtk.ScrolledWindow()
self.pack_start(self.scrolled_nodes, True, True, 0)
# logo, name, nid, info, tooltip
self.liststore_nodes = Gtk.ListStore(GdkPixbuf.Pixbuf, str, int,
str, str)
iconview_nodes = Widgets.IconView(self.liststore_nodes, tooltip=4)
iconview_nodes.connect('item_activated',
self.on_iconview_nodes_item_activated)
self.scrolled_nodes.add(iconview_nodes)
self.scrolled_songs = Gtk.ScrolledWindow()
self.pack_start(self.scrolled_songs, True, True, 0)
self.scrolled_songs.add(treeview_songs)
self.show_all()
self.buttonbox.hide()
self.scrolled_songs.hide()
def _on_get_nodes(info, error):
if error or not info or not info[0] or not info[1]:
logger.errror('_on_get_nodes(), info: %s, error: %s' %
(info, error))
return
nodes, total_pages = info
urls = []
tree_iters = []
for node in nodes:
tree_iter = self.liststore_nodes.append([
Config.ANONYMOUS_PIXBUF,
Widgets.unescape(node['name']),
int(node['sourceid']),
Widgets.unescape(node['info']),
Widgets.set_tooltip_with_song_tips(node['name'],
node['tips']),
])
urls.append(node['pic'])
tree_iters.append(tree_iter)
self.liststore_nodes.timestamp = time.time()
Net.async_call(Net.update_liststore_images, self.liststore_nodes, 0,
tree_iters, urls)
nid = 2
page = 0
Net.async_call(Net.get_nodes, nid, page, callback=_on_get_nodes)
def on_button_home_clicked(self, btn):
self.scrolled_nodes.show_all()
self.scrolled_songs.hide()
self.buttonbox.hide()
def on_iconview_nodes_item_activated(self, iconview, path):
model = iconview.get_model()
self.buttonbox.show_all()
self.label.set_label(model[path][1])
self.app.playlist.advise_new_playlist_name(model[path][1])
self.show_toplist_songs(model[path][2])
def show_toplist_songs(self, nid):
def _on_get_toplist_songs(songs, error):
if not songs or error:
logger.error('show_toplist_songs(), songs: %s, error: %s' %
(songs, error))
return
self.liststore_songs.clear()
for song in songs:
self.liststore_songs.append([
True,
Widgets.unescape(song['name']),
Widgets.unescape(song['artist']),
Widgets.unescape(song['album']),
int(song['id']),
int(song['artistid']),
int(song['albumid']),
song['formats'],
])
self.scrolled_nodes.hide()
self.scrolled_songs.show_all()
self.scrolled_songs.get_vscrollbar().set_value(0)
Net.async_call(Net.get_toplist_songs, nid,
callback=_on_get_toplist_songs)
| gpl-3.0 | 5,369,660,901,661,805,000 | 35.928571 | 80 | 0.552117 | false |
MatthieuDartiailh/eapii | eapii/visa/standards.py | 1 | 3643 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Copyright 2014 by Eapii Authors, see AUTHORS for more details.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENCE, distributed with this software.
#------------------------------------------------------------------------------
""" Module implementing standards compliant drivers.
The drivers defined in this module can be used as based class for instrument
implementing the standard.
This module has been inspired by the iec60488 module found in the slave
project.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from .visa_instrs import VisaMessageInstrument
from ..core.iprops.api import Bool, Register
EVENT_STATUS_BYTE = (
'operation complete',
'request control',
'query error',
'device dependent error',
'execution error',
'command error',
'user request',
'power on',
)
class IEC60488(VisaMessageInstrument):
""" Base class for instrument implementing the following commands.
Reporting Commands
- `*CLS` - Clears the data status structure.
- `*ESE` - Write the event status enable register.
- `*ESE?` - Query the event status enable register.
- `*ESR?` - Query the standard event status register.
- `*SRE` - Write the status enable register.
- `*SRE?` - Query the status enable register.
- `*STB` - Query the status register.
Internal operation commands
- `*IDN?` - Identification query.
- `*RST` - Perform a device reset.
- `*TST?` - Perform internal self-test.
Synchronization commands
- `*OPC` - Set operation complete flag high.
- `*OPC?` - Query operation complete flag.
- `*WAI` - Wait to continue.
"""
# =========================================================================
# --- IProperties
# =========================================================================
#: Event register recording the state of the different events.
event_status = Register('*ESR?', '*ESR {}', names=EVENT_STATUS_BYTE)
#: Register listing ofr which event notifications are enabled.
event_status_enable = Register('*ESE?', '*ESE {}', names=[None]*8)
#: Register listing ofr which event service requests are enabled.
service_request_enable = Register('*SRE?', '*SRE {}', names=[None]*8)
#: Flag signaling all pending operations are completed.
operation_complete = Bool('*OPC?', mapping={True: '1', False: '0'})
# =========================================================================
# --- Methods
# =========================================================================
def get_id(self):
"""Access the instrument identification."""
return self.query('*IDN?')
def clear_status(self):
"""Clears the status data structure."""
self.write('*CLS')
def complete_operation(self):
"""Sets the operation complete bit high of the event status byte."""
self.write('*OPC')
def reset(self):
"""Performs a device reset."""
self.write('*RST')
def test(self):
"""Performs a internal self-test and returns an integer in the range
-32767 to + 32767.
"""
return int(self.query('*TST?'))
def wait_to_continue(self):
"""Prevents the device from executing any further commands or queries
until the no operation flag is `True`.
"""
self.write('*WAI')
| bsd-3-clause | -7,927,440,676,249,917,000 | 34.028846 | 79 | 0.551743 | false |
looker/sentry | src/sentry/eventtypes/base.py | 4 | 1107 | from __future__ import absolute_import
from sentry.utils.strings import truncatechars, strip
class BaseEvent(object):
id = None
def __init__(self, data):
self.data = data
def has_metadata(self):
raise NotImplementedError
def get_metadata(self):
raise NotImplementedError
def to_string(self, metadata):
raise NotImplementedError
class DefaultEvent(BaseEvent):
key = 'default'
def has_metadata(self):
# the default event can always work
return True
def get_metadata(self):
# See GH-3248
message_interface = self.data.get(
'sentry.interfaces.Message', {
'message': self.data.get('message', ''),
}
)
message = strip(message_interface.get('formatted', message_interface['message']))
if not message:
title = '<unlabeled event>'
else:
title = truncatechars(message.splitlines()[0], 100)
return {
'title': title,
}
def to_string(self, metadata):
return metadata['title']
| bsd-3-clause | -47,770,865,815,998,790 | 23.065217 | 89 | 0.586269 | false |
163gal/Time-Line | libs_arm/wx/lib/analogclock/helpers.py | 9 | 27261 | # AnalogClock's base classes
# E. A. Tacao <e.a.tacao |at| estadao.com.br>
# http://j.domaindlx.com/elements28/wxpython/
# 15 Fev 2006, 22:00 GMT-03:00
# Distributed under the wxWidgets license.
from time import strftime, localtime
import math
import wx
from styles import *
#----------------------------------------------------------------------
_targets = [HOUR, MINUTE, SECOND]
#----------------------------------------------------------------------
class Element:
"""Base class for face, hands and tick marks."""
def __init__(self, idx=0, pos=None, size=None, offset=0, clocksize=None,
scale=1, rotate=False, kind=""):
self.idx = idx
self.pos = pos
self.size = size
self.offset = offset
self.clocksize = clocksize
self.scale = scale
self.rotate = rotate
self.kind = kind
self.text = None
self.angfac = [6, 30][self.kind == "hours"]
def _pol2rect(self, m, t):
return m * math.cos(math.radians(t)), m * math.sin(math.radians(t))
def _rect2pol(self, x, y):
return math.hypot(x, y), math.degrees(math.atan2(y, x))
def DrawRotated(self, dc, offset=0):
pass
def DrawStraight(self, dc, offset=0):
pass
def Draw(self, dc, offset=0):
if self.rotate:
self.DrawRotated(dc, offset)
else:
self.DrawStraight(dc, offset)
def RecalcCoords(self, clocksize, centre, scale):
pass
def GetSize(self):
return self.size
def GetOffset(self):
return self.offset
def GetIsRotated(self, rotate):
return self.rotate
def GetMaxSize(self, scale=1):
return self.size * scale
def GetScale(self):
return self.scale
def SetIsRotated(self, rotate):
self.rotate = rotate
def GetMaxSize(self, scale=1):
return self.size * scale
def GetPolygon(self):
return self.polygon
def SetPosition(self, pos):
self.pos = pos
def SetSize(self, size):
self.size = size
def SetOffset(self, offset):
self.offset = offset
def SetClockSize(self, clocksize):
self.clocksize = clocksize
def SetScale(self, scale):
self.scale = scale
def SetIsRotated(self, rotate):
self.rotate = rotate
def SetPolygon(self, polygon):
self.polygon = polygon
#----------------------------------------------------------------------
class ElementWithDyer(Element):
"""Base class for clock face and hands."""
def __init__(self, **kwargs):
self.dyer = kwargs.pop("dyer", Dyer())
Element.__init__(self, **kwargs)
def GetFillColour(self):
return self.dyer.GetFillColour()
def GetBorderColour(self):
return self.dyer.GetBorderColour()
def GetBorderWidth(self):
return self.dyer.GetBorderWidth()
def GetShadowColour(self):
return self.dyer.GetShadowColour()
def SetFillColour(self, colour):
self.dyer.SetFillColour(colour)
def SetBorderColour(self, colour):
self.dyer.SetBorderColour(colour)
def SetBorderWidth(self, width):
self.dyer.SetBorderWidth(width)
def SetShadowColour(self, colour):
self.dyer.SetShadowColour(colour)
#----------------------------------------------------------------------
class Face(ElementWithDyer):
"""Holds info about the clock face."""
def __init__(self, **kwargs):
ElementWithDyer.__init__(self, **kwargs)
def Draw(self, dc):
self.dyer.Select(dc)
dc.DrawCircle(self.pos.x, self.pos.y, self.radius)
def RecalcCoords(self, clocksize, centre, scale):
self.radius = min(clocksize.Get()) / 2. - self.dyer.width / 2.
self.pos = centre
#----------------------------------------------------------------------
class Hand(ElementWithDyer):
"""Holds info about a clock hand."""
def __init__(self, **kwargs):
self.lenfac = kwargs.pop("lenfac")
ElementWithDyer.__init__(self, **kwargs)
self.SetPolygon([[-1, 0], [0, -1], [1, 0], [0, 4]])
def Draw(self, dc, end, offset=0):
radius, centre, r = end
angle = math.degrees(r)
polygon = self.polygon[:]
vscale = radius / max([y for x, y in polygon])
for i, (x, y) in enumerate(polygon):
x *= self.scale * self.size
y *= vscale * self.lenfac
m, t = self._rect2pol(x, y)
polygon[i] = self._pol2rect(m, t - angle)
dc.DrawPolygon(polygon, centre.x + offset, centre.y + offset)
def RecalcCoords(self, clocksize, centre, scale):
self.pos = centre
self.scale = scale
#----------------------------------------------------------------------
class TickSquare(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def Draw(self, dc, offset=0):
width = height = self.size * self.scale
x = self.pos.x - width / 2.
y = self.pos.y - height / 2.
dc.DrawRectangle(x + offset, y + offset, width, height)
#----------------------------------------------------------------------
class TickCircle(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def Draw(self, dc, offset=0):
radius = self.size * self.scale / 2.
x = self.pos.x
y = self.pos.y
dc.DrawCircle(x + offset, y + offset, radius)
#----------------------------------------------------------------------
class TickPoly(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
self.SetPolygon([[0, 1], [1, 0], [2, 1], [1, 5]])
def _calcPolygon(self):
width = max([x for x, y in self.polygon])
height = max([y for x, y in self.polygon])
tscale = self.size / max(width, height) * self.scale
polygon = [(x * tscale, y * tscale) for x, y in self.polygon]
width = max([x for x, y in polygon])
height = max([y for x, y in polygon])
return polygon, width, height
def DrawStraight(self, dc, offset=0):
polygon, width, height = self._calcPolygon()
x = self.pos.x - width / 2.
y = self.pos.y - height / 2.
dc.DrawPolygon(polygon, x + offset, y + offset)
def DrawRotated(self, dc, offset=0):
polygon, width, height = self._calcPolygon()
angle = 360 - self.angfac * (self.idx + 1)
r = math.radians(angle)
for i in range(len(polygon)):
m, t = self._rect2pol(*polygon[i])
t -= angle
polygon[i] = self._pol2rect(m, t)
x = self.pos.x - math.cos(r) * width / 2. - math.sin(r) * height / 2.
y = self.pos.y - math.cos(r) * height / 2. + math.sin(r) * width / 2.
dc.DrawPolygon(polygon, x + offset, y + offset)
#----------------------------------------------------------------------
class TickDecimal(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
self.text = "%s" % (self.idx + 1)
def DrawStraight(self, dc, offset=0):
width, height = dc.GetTextExtent(self.text)
x = self.pos.x - width / 2.
y = self.pos.y - height / 2.
dc.DrawText(self.text, x + offset, y + offset)
def DrawRotated(self, dc, offset=0):
width, height = dc.GetTextExtent(self.text)
angle = 360 - self.angfac * (self.idx + 1)
r = math.radians(angle)
x = self.pos.x - math.cos(r) * width / 2. - math.sin(r) * height / 2.
y = self.pos.y - math.cos(r) * height / 2. + math.sin(r) * width / 2.
dc.DrawRotatedText(self.text, x + offset, y + offset, angle)
#----------------------------------------------------------------------
class TickRoman(TickDecimal):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
TickDecimal.__init__(self, **kwargs)
self.text = ["I","II","III","IV","V", \
"VI","VII","VIII","IX","X", \
"XI","XII","XIII","XIV","XV", \
"XVI","XVII","XVIII","XIX","XX", \
"XXI","XXII","XXIII","XXIV","XXV", \
"XXVI","XXVII","XXVIII","XXIX","XXX", \
"XXXI","XXXII","XXXIII","XXXIV","XXXV", \
"XXXVI","XXXVII","XXXVIII","XXXIX","XL", \
"XLI","XLII","XLIII","XLIV","XLV", \
"XLVI","XLVII","XLVIII","XLIX","L", \
"LI","LII","LIII","LIV","LV", \
"LVI","LVII","LVIII","LIX","LX"][self.idx]
#----------------------------------------------------------------------
class TickBinary(TickDecimal):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
TickDecimal.__init__(self, **kwargs)
def d2b(n, b=""):
while n > 0:
b = str(n % 2) + b; n = n >> 1
return b.zfill(4)
self.text = d2b(self.idx + 1)
#----------------------------------------------------------------------
class TickHex(TickDecimal):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
TickDecimal.__init__(self, **kwargs)
self.text = hex(self.idx + 1)[2:].upper()
#----------------------------------------------------------------------
class TickNone(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def Draw(self, dc, offset=0):
pass
#----------------------------------------------------------------------
class Dyer:
"""Stores info about colours and borders of clock Elements."""
def __init__(self, border=None, width=0, fill=None, shadow=None):
"""
self.border (wx.Colour) border colour
self.width (int) border width
self.fill (wx.Colour) fill colour
self.shadow (wx.Colour) shadow colour
"""
self.border = border or \
wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.fill = fill or \
wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.shadow = shadow or \
wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW)
self.width = width
def Select(self, dc, shadow=False):
"""Selects the current settings into the dc."""
if not shadow:
dc.SetPen(wx.Pen(self.border, self.width, wx.SOLID))
dc.SetBrush(wx.Brush(self.fill, wx.SOLID))
dc.SetTextForeground(self.fill)
else:
dc.SetPen(wx.Pen(self.shadow, self.width, wx.SOLID))
dc.SetBrush(wx.Brush(self.shadow, wx.SOLID))
dc.SetTextForeground(self.shadow)
def GetFillColour(self):
return self.fill
def GetBorderColour(self):
return self.border
def GetBorderWidth(self):
return self.width
def GetShadowColour(self):
return self.shadow
def SetFillColour(self, colour):
self.fill = colour
def SetBorderColour(self, colour):
self.border = colour
def SetBorderWidth(self, width):
self.width = width
def SetShadowColour(self, colour):
self.shadow = colour
#----------------------------------------------------------------------
class HandSet:
"""Manages the set of hands."""
def __init__(self, parent, h, m, s):
self.parent = parent
self.hands = [h, m, s]
self.radius = 1
self.centre = wx.Point(1, 1)
def _draw(self, dc, shadow=False):
ends = [int(x) for x in strftime("%I %M %S", localtime()).split()]
flags = [self.parent.clockStyle & flag \
for flag in self.parent.allHandStyles]
a_hand = self.hands[0]
if shadow:
offset = self.parent.shadowOffset * a_hand.GetScale()
else:
offset = 0
for i, hand in enumerate(self.hands):
# Is this hand supposed to be drawn?
if flags[i]:
idx = ends[i]
# Is this the hours hand?
if i == 0:
idx = idx * 5 + ends[1] / 12 - 1
# else prevent exceptions on leap seconds
elif idx <= 0 or idx > 60:
idx = 59
# and adjust idx offset for minutes and non-leap seconds
else:
idx = idx - 1
angle = math.radians(180 - 6 * (idx + 1))
hand.dyer.Select(dc, shadow)
hand.Draw(dc, (self.radius, self.centre, angle), offset)
def Draw(self, dc):
if self.parent.clockStyle & SHOW_SHADOWS:
self._draw(dc, True)
self._draw(dc)
def RecalcCoords(self, clocksize, centre, scale):
self.centre = centre
[hand.RecalcCoords(clocksize, centre, scale) for hand in self.hands]
def SetMaxRadius(self, radius):
self.radius = radius
def GetSize(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetSize())
return tuple(r)
def GetFillColour(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetFillColour())
return tuple(r)
def GetBorderColour(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetBorderColour())
return tuple(r)
def GetBorderWidth(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetBorderWidth())
return tuple(r)
def GetShadowColour(self):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetShadowColour())
return tuple(r)
def SetSize(self, size, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetSize(size)
def SetFillColour(self, colour, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetFillColour(colour)
def SetBorderColour(self, colour, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetBorderColour(colour)
def SetBorderWidth(self, width, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetBorderWidth(width)
def SetShadowColour(self, colour):
for i, hand in enumerate(self.hands):
hand.SetShadowColour(colour)
#----------------------------------------------------------------------
class TickSet:
"""Manages a set of tick marks."""
def __init__(self, parent, **kwargs):
self.parent = parent
self.dyer = Dyer()
self.noe = {"minutes": 60, "hours": 12}[kwargs["kind"]]
self.font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
style = kwargs.pop("style")
self.kwargs = kwargs
self.SetStyle(style)
def _draw(self, dc, shadow=False):
dc.SetFont(self.font)
a_tick = self.ticks[0]
if shadow:
offset = self.parent.shadowOffset * a_tick.GetScale()
else:
offset = 0
clockStyle = self.parent.clockStyle
for idx, tick in self.ticks.items():
draw = False
# Are we a set of hours?
if self.noe == 12:
# Should we show all hours ticks?
if clockStyle & SHOW_HOURS_TICKS:
draw = True
# Or is this tick a quarter and should we show only quarters?
elif clockStyle & SHOW_QUARTERS_TICKS and not (idx + 1) % 3.:
draw = True
# Are we a set of minutes and minutes should be shown?
elif self.noe == 60 and clockStyle & SHOW_MINUTES_TICKS:
# If this tick occupies the same position of an hour/quarter
# tick, should we still draw it anyway?
if clockStyle & OVERLAP_TICKS:
draw = True
# Right, sir. I promise I won't overlap any tick.
else:
# Ensure that this tick won't overlap an hour tick.
if clockStyle & SHOW_HOURS_TICKS:
if (idx + 1) % 5.:
draw = True
# Ensure that this tick won't overlap a quarter tick.
elif clockStyle & SHOW_QUARTERS_TICKS:
if (idx + 1) % 15.:
draw = True
# We're not drawing quarters nor hours, so we can draw all
# minutes ticks.
else:
draw = True
if draw:
tick.Draw(dc, offset)
def Draw(self, dc):
if self.parent.clockStyle & SHOW_SHADOWS:
self.dyer.Select(dc, True)
self._draw(dc, True)
self.dyer.Select(dc)
self._draw(dc)
def RecalcCoords(self, clocksize, centre, scale):
a_tick = self.ticks[0]
size = a_tick.GetMaxSize(scale)
maxsize = size
# Try to find a 'good' max size for text-based ticks.
if a_tick.text is not None:
self.font.SetPointSize(size)
dc = wx.MemoryDC()
dc.SelectObject(wx.EmptyBitmap(*clocksize.Get()))
dc.SetFont(self.font)
maxsize = size
for tick in self.ticks.values():
maxsize = max(*(dc.GetTextExtent(tick.text) + (maxsize,)))
radius = self.radius = min(clocksize.Get()) / 2. - \
self.dyer.width / 2. - \
maxsize / 2. - \
a_tick.GetOffset() * scale - \
self.parent.shadowOffset * scale
# If we are a set of hours, the number of elements of this tickset is
# 12 and ticks are separated by a distance of 30 degrees;
# if we are a set of minutes, the number of elements of this tickset is
# 60 and ticks are separated by a distance of 6 degrees.
angfac = [6, 30][self.noe == 12]
for i, tick in self.ticks.items():
tick.SetClockSize(clocksize)
tick.SetScale(scale)
deg = 180 - angfac * (i + 1)
angle = math.radians(deg)
x = centre.x + radius * math.sin(angle)
y = centre.y + radius * math.cos(angle)
tick.SetPosition(wx.Point(x, y))
def GetSize(self):
return self.kwargs["size"]
def GetFillColour(self):
return self.dyer.GetFillColour()
def GetBorderColour(self):
return self.dyer.GetBorderColour()
def GetBorderWidth(self):
return self.dyer.GetBorderWidth()
def GetPolygon(self):
a_tick = self.ticks.values()[0]
return a_tick.GetPolygon()
def GetFont(self):
return self.font
def GetOffset(self):
a_tick = self.ticks[0]
return a_tick.GetOffset()
def GetShadowColour(self):
return self.dyer.GetShadowColour()
def GetIsRotated(self):
a_tick = self.ticks[0]
return a_tick.GetIsRotated()
def GetStyle(self):
return self.style
def SetSize(self, size):
self.kwargs["size"] = size
[tick.SetSize(size) for tick in self.ticks.values()]
def SetFillColour(self, colour):
self.dyer.SetFillColour(colour)
def SetBorderColour(self, colour):
self.dyer.SetBorderColour(colour)
def SetBorderWidth(self, width):
self.dyer.SetBorderWidth(width)
def SetPolygon(self, polygon):
[tick.SetPolygon(polygon) for tick in self.ticks.values()]
def SetFont(self, font):
self.font = font
def SetOffset(self, offset):
self.kwargs["offset"] = offset
[tick.SetOffset(offset) for tick in self.ticks.values()]
def SetShadowColour(self, colour):
self.dyer.SetShadowColour(colour)
def SetIsRotated(self, rotate):
self.kwargs["rotate"] = rotate
[tick.SetIsRotated(rotate) for tick in self.ticks.values()]
def SetStyle(self, style):
self.style = style
tickclass = allTickStyles[style]
self.kwargs["rotate"] = self.parent.clockStyle & ROTATE_TICKS
self.ticks = {}
for i in range(self.noe):
self.kwargs["idx"] = i
self.ticks[i] = tickclass(**self.kwargs)
#----------------------------------------------------------------------
class Box:
"""Gathers info about the clock face and tick sets."""
def __init__(self, parent, Face, TicksM, TicksH):
self.parent = parent
self.Face = Face
self.TicksH = TicksH
self.TicksM = TicksM
def GetNiceRadiusForHands(self, centre):
a_tick = self.TicksM.ticks[0]
scale = a_tick.GetScale()
bw = max(self.TicksH.dyer.width / 2. * scale,
self.TicksM.dyer.width / 2. * scale)
mgt = self.TicksM.ticks[59]
my = mgt.pos.y + mgt.GetMaxSize(scale) + bw
hgt = self.TicksH.ticks[11]
hy = hgt.pos.y + hgt.GetMaxSize(scale) + bw
niceradius = centre.y - max(my, hy)
return niceradius
def Draw(self, dc):
[getattr(self, attr).Draw(dc) \
for attr in ["Face", "TicksM", "TicksH"]]
def RecalcCoords(self, size, centre, scale):
[getattr(self, attr).RecalcCoords(size, centre, scale) \
for attr in ["Face", "TicksH", "TicksM"]]
def GetTickSize(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetSize())
return tuple(r)
def GetTickFillColour(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetFillColour())
return tuple(r)
def GetTickBorderColour(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetBorderColour())
return tuple(r)
def GetTickBorderWidth(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetBorderWidth())
return tuple(r)
def GetTickPolygon(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetPolygon())
return tuple(r)
def GetTickFont(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetFont())
return tuple(r)
def GetIsRotated(self):
a_tickset = self.TicksH
return a_tickset.GetIsRotated()
def GetTickOffset(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetOffset())
return tuple(r)
def GetShadowColour(self):
a_tickset = self.TicksH
return a_tickset.GetShadowColour()
def GetTickStyle(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetStyle())
return tuple(r)
def SetTickSize(self, size, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetSize(size)
def SetTickFillColour(self, colour, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetFillColour(colour)
def SetTickBorderColour(self, colour, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetBorderColour(colour)
def SetTickBorderWidth(self, width, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetBorderWidth(width)
def SetTickPolygon(self, polygon, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetPolygon(polygon)
def SetTickFont(self, font, target):
fs = font.GetNativeFontInfoDesc()
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetFont(wx.FontFromNativeInfoString(fs))
def SetIsRotated(self, rotate):
[getattr(self, attr).SetIsRotated(rotate) \
for attr in ["TicksH", "TicksM"]]
def SetTickOffset(self, offset, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetOffset(offset)
def SetShadowColour(self, colour):
for attr in ["TicksH", "TicksM"]:
tick = getattr(self, attr)
tick.SetShadowColour(colour)
def SetTickStyle(self, style, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetStyle(style)
#----------------------------------------------------------------------
# Relationship between styles and ticks class names.
allTickStyles = {TICKS_BINARY: TickBinary,
TICKS_CIRCLE: TickCircle,
TICKS_DECIMAL: TickDecimal,
TICKS_HEX: TickHex,
TICKS_NONE: TickNone,
TICKS_POLY: TickPoly,
TICKS_ROMAN: TickRoman,
TICKS_SQUARE: TickSquare}
#
##
### eof
| gpl-3.0 | -2,115,285,473,991,342,800 | 26.676142 | 79 | 0.512527 | false |
jakevdp/megaman | megaman/embedding/tests/test_isomap.py | 4 | 2327 | # LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import sys
import numpy as np
import scipy as sp
import scipy.sparse as sparse
from scipy.spatial.distance import squareform, pdist
from itertools import product
from sklearn import manifold, datasets
from sklearn.neighbors import NearestNeighbors
from numpy.testing import assert_array_almost_equal
import megaman.embedding.isomap as iso
import megaman.geometry.geometry as geom
from megaman.utils.eigendecomp import EIGEN_SOLVERS
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_isomap_with_sklearn():
N = 10
X, color = datasets.samples_generator.make_s_curve(N, random_state=0)
n_components = 2
n_neighbors = 3
knn = NearestNeighbors(n_neighbors + 1).fit(X)
# Assign the geometry matrix to get the same answer since sklearn using k-neighbors instead of radius-neighbors
g = geom.Geometry(X)
g.set_adjacency_matrix(knn.kneighbors_graph(X, mode = 'distance'))
# test Isomap with sklearn
sk_Y_iso = manifold.Isomap(n_neighbors, n_components, eigen_solver = 'arpack').fit_transform(X)
mm_Y_iso = iso.isomap(g, n_components)
assert(_check_with_col_sign_flipping(sk_Y_iso, mm_Y_iso, 0.05))
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
radius = 10
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = squareform(pdist(X))
g = geom.Geometry(adjacency_kwds = {'radius':radius})
for eigen_solver in EIGEN_SOLVERS:
clf = iso.Isomap(n_components = 2, eigen_solver = eigen_solver, geom=g)
clf.fit(X)
G_iso = squareform(pdist(clf.embedding_))
assert_array_almost_equal(G, G_iso)
| bsd-2-clause | 5,677,321,358,824,208,000 | 37.783333 | 115 | 0.658788 | false |
armab/st2contrib | packs/orion/tests/test_action_node_create.py | 1 | 4998 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from mock import MagicMock
from orion_base_action_test_case import OrionBaseActionTestCase
from node_create import NodeCreate
from lib.utils import is_ip
__all__ = [
'NodeCreateTestCase'
]
class NodeCreateTestCase(OrionBaseActionTestCase):
__test__ = True
action_cls = NodeCreate
def test_run_is_ip_v4(self):
self.assertTrue(is_ip("172.16.0.1"))
self.assertTrue(is_ip("1762:0:0:0:0:B03:1:AF18"))
self.assertFalse(is_ip("172.16.0.300"))
self.assertFalse(is_ip("1762:%:0:0:0:B03:1:AF18"))
self.assertFalse(is_ip("server.example.com"))
self.assertFalse(is_ip("router1"))
self.assertFalse(is_ip("router:8080"))
def test_run_connect_fail(self):
action = self.setup_connect_fail()
self.assertRaises(ValueError,
action.run,
"router1",
"192.168.0.1",
"orion",
None,
"snmpv2",
"internal",
None,
"snmp")
def test_run_node_caption_exists(self):
action = self.setup_node_exists()
self.assertRaises(ValueError,
action.run,
"router1",
"192.168.0.1",
"orion",
None,
"snmpv2",
"internal",
None,
"snmp")
def test_run_node_ip_exists(self):
query_data = []
query_data.append(self.query_no_results)
query_data.append(self.query_npm_node)
query_data.append(self.query_ncm_node)
action = self.get_action_instance(config=self.full_config)
action.connect = MagicMock(return_value=True)
action.query = MagicMock(side_effect=query_data)
action.invoke = MagicMock(return_value=None)
action.create = MagicMock(return_value=None)
self.assertRaises(ValueError,
action.run,
"router2",
"192.168.0.1",
"orion",
None,
"snmpv2",
"internal",
None,
"snmp")
def test_run_poller_is_none(self):
expected = {'node_id': '6', 'platform': 'orion'}
query_data = self.query_no_results
action = self.get_action_instance(config=self.full_config)
action.connect = MagicMock(return_value=True)
action.query = MagicMock(return_value=query_data)
action.invoke = MagicMock(return_value=None)
action.get_engine_id = MagicMock(return_value=2)
action.create = MagicMock(
return_value="swis://orionr/Orion/Orion.Nodes/NodeID=6")
result = action.run("router2",
"192.168.0.1",
"orion",
None,
"snmpv2",
"internal",
None,
"snmp")
self.assertEqual(result, expected)
def test_run_node_additonal_poller(self):
expected = {'node_id': '6', 'platform': 'orion'}
query_data = [self.query_no_results,
self.query_no_results,
{'results': [{'EngineID': 2}]}]
action = self.get_action_instance(config=self.full_config)
action.connect = MagicMock(return_value=True)
action.query = MagicMock(side_effect=query_data)
action.invoke = MagicMock(return_value=None)
action.create = MagicMock(
return_value="swis://orionr/Orion/Orion.Nodes/NodeID=6")
result = action.run("router2",
"192.168.0.1",
"orion",
"additonal1",
"snmpv2",
"internal",
None,
"snmp")
self.assertEqual(result, expected)
| apache-2.0 | 6,106,616,948,749,239,000 | 35.217391 | 74 | 0.516607 | false |
PatidarWeb/poedit | deps/boost/tools/regression/xsl_reports/boost_wide_report.py | 29 | 29776 |
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import shutil
import codecs
import xml.sax.handler
import xml.sax.saxutils
import glob
import re
import os.path
import os
import string
import time
import sys
import ftplib
import utils
report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'i', 'n', 'ddr', 'dsr', 'udr', 'usr' ]
if __name__ == '__main__':
run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
else:
run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
def map_path( path ):
return os.path.join( run_dir, path )
def xsl_path( xsl_file_name ):
return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
class file_info:
def __init__( self, file_name, file_size, file_date ):
self.name = file_name
self.size = file_size
self.date = file_date
def __repr__( self ):
return "name: %s, size: %s, date %s" % ( self.name, self.size, self.date )
#
# Find the mod time from unix format directory listing line
#
def get_date( words ):
date = words[ 5: -1 ]
t = time.localtime()
month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]
year = time.localtime()[0] # If year is not secified is it the current year
month = month_names.index( date[0] ) + 1
day = int( date[1] )
hours = 0
minutes = 0
if date[2].find( ":" ) != -1:
( hours, minutes ) = [ int(x) for x in date[2].split( ":" ) ]
else:
# there is no way to get seconds for not current year dates
year = int( date[2] )
return ( year, month, day, hours, minutes, 0, 0, 0, 0 )
def list_ftp( f ):
# f is an ftp object
utils.log( "listing source content" )
lines = []
# 1. get all lines
f.dir( lambda x: lines.append( x ) )
# 2. split lines into words
word_lines = [ x.split( None, 8 ) for x in lines ]
# we don't need directories
result = [ file_info( l[-1], None, get_date( l ) ) for l in word_lines if l[0][0] != "d" ]
for f in result:
utils.log( " %s" % f )
return result
def list_dir( dir ):
utils.log( "listing destination content %s" % dir )
result = []
for file_path in glob.glob( os.path.join( dir, "*.zip" ) ):
if os.path.isfile( file_path ):
mod_time = time.localtime( os.path.getmtime( file_path ) )
mod_time = ( mod_time[0], mod_time[1], mod_time[2], mod_time[3], mod_time[4], mod_time[5], 0, 0, mod_time[8] )
# no size (for now)
result.append( file_info( os.path.basename( file_path ), None, mod_time ) )
for fi in result:
utils.log( " %s" % fi )
return result
def find_by_name( d, name ):
for dd in d:
if dd.name == name:
return dd
return None
def diff( source_dir_content, destination_dir_content ):
utils.log( "Finding updated files" )
result = ( [], [] ) # ( changed_files, obsolete_files )
for source_file in source_dir_content:
found = find_by_name( destination_dir_content, source_file.name )
if found is None: result[0].append( source_file.name )
elif time.mktime( found.date ) != time.mktime( source_file.date ): result[0].append( source_file.name )
else:
pass
for destination_file in destination_dir_content:
found = find_by_name( source_dir_content, destination_file.name )
if found is None: result[1].append( destination_file.name )
utils.log( " Updated files:" )
for f in result[0]:
utils.log( " %s" % f )
utils.log( " Obsolete files:" )
for f in result[1]:
utils.log( " %s" % f )
return result
def _modtime_timestamp( file ):
return os.stat( file ).st_mtime
root_paths = []
def shorten( file_path ):
root_paths.sort( lambda x, y: cmp( len(y ), len( x ) ) )
for root in root_paths:
if file_path.lower().startswith( root.lower() ):
return file_path[ len( root ): ].replace( "\\", "/" )
return file_path.replace( "\\", "/" )
class action:
def __init__( self, file_path ):
self.file_path_ = file_path
self.relevant_paths_ = [ self.file_path_ ]
self.boost_paths_ = []
self.dependencies_ = []
self.other_results_ = []
def run( self ):
utils.log( "%s: run" % shorten( self.file_path_ ) )
__log__ = 2
for dependency in self.dependencies_:
if not os.path.exists( dependency ):
utils.log( "%s doesn't exists, removing target" % shorten( dependency ) )
self.clean()
return
if not os.path.exists( self.file_path_ ):
utils.log( "target doesn't exists, building" )
self.update()
return
dst_timestamp = _modtime_timestamp( self.file_path_ )
utils.log( " target: %s [%s]" % ( shorten( self.file_path_ ), dst_timestamp ) )
needs_updating = 0
utils.log( " dependencies:" )
for dependency in self.dependencies_:
dm = _modtime_timestamp( dependency )
update_mark = ""
if dm > dst_timestamp:
needs_updating = 1
utils.log( ' %s [%s] %s' % ( shorten( dependency ), dm, update_mark ) )
if needs_updating:
utils.log( "target needs updating, rebuilding" )
self.update()
return
else:
utils.log( "target is up-to-date" )
def clean( self ):
to_unlink = self.other_results_ + [ self.file_path_ ]
for result in to_unlink:
utils.log( ' Deleting obsolete "%s"' % shorten( result ) )
if os.path.exists( result ):
os.unlink( result )
class merge_xml_action( action ):
def __init__( self, source, destination, expected_results_file, failures_markup_file, tag ):
action.__init__( self, destination )
self.source_ = source
self.destination_ = destination
self.tag_ = tag
self.expected_results_file_ = expected_results_file
self.failures_markup_file_ = failures_markup_file
self.dependencies_.extend( [
self.source_
, self.expected_results_file_
, self.failures_markup_file_
]
)
self.relevant_paths_.extend( [ self.source_ ] )
self.boost_paths_.extend( [ self.expected_results_file_, self.failures_markup_file_ ] )
def update( self ):
def filter_xml( src, dest ):
class xmlgen( xml.sax.saxutils.XMLGenerator ):
def __init__( self, writer ):
xml.sax.saxutils.XMLGenerator.__init__( self, writer )
self.trimmed = 0
self.character_content = ""
def startElement( self, name, attrs):
self.flush()
xml.sax.saxutils.XMLGenerator.startElement( self, name, attrs )
def endElement( self, name ):
self.flush()
xml.sax.saxutils.XMLGenerator.endElement( self, name )
def flush( self ):
content = self.character_content
self.character_content = ""
self.trimmed = 0
xml.sax.saxutils.XMLGenerator.characters( self, content )
def characters( self, content ):
if not self.trimmed:
max_size = pow( 2, 16 )
self.character_content += content
if len( self.character_content ) > max_size:
self.character_content = self.character_content[ : max_size ] + "...\n\n[The content has been trimmed by the report system because it exceeds %d bytes]" % max_size
self.trimmed = 1
o = open( dest, "w" )
try:
gen = xmlgen( o )
xml.sax.parse( src, gen )
finally:
o.close()
return dest
utils.log( 'Merging "%s" with expected results...' % shorten( self.source_ ) )
try:
trimmed_source = filter_xml( self.source_, '%s-trimmed.xml' % os.path.splitext( self.source_ )[0] )
utils.libxslt(
utils.log
, trimmed_source
, xsl_path( 'add_expected_results.xsl' )
, self.file_path_
, {
"expected_results_file" : self.expected_results_file_
, "failures_markup_file": self.failures_markup_file_
, "source" : self.tag_
}
)
os.unlink( trimmed_source )
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
if os.path.exists( self.file_path_ ):
os.unlink( self.file_path_ )
def _xml_timestamp( xml_path ):
class timestamp_reader( xml.sax.handler.ContentHandler ):
def startElement( self, name, attrs ):
if name == 'test-run':
self.timestamp = attrs.getValue( 'timestamp' )
raise self
try:
xml.sax.parse( xml_path, timestamp_reader() )
raise 'Cannot extract timestamp from "%s". Invalid XML file format?' % xml_path
except timestamp_reader, x:
return x.timestamp
class make_links_action( action ):
def __init__( self, source, destination, output_dir, tag, run_date, comment_file, failures_markup_file ):
action.__init__( self, destination )
self.dependencies_.append( source )
self.source_ = source
self.output_dir_ = output_dir
self.tag_ = tag
self.run_date_ = run_date
self.comment_file_ = comment_file
self.failures_markup_file_ = failures_markup_file
self.links_file_path_ = os.path.join( output_dir, 'links.html' )
def update( self ):
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "output" ) )
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "developer", "output" ) )
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "user", "output" ) )
utils.log( ' Making test output files...' )
try:
utils.libxslt(
utils.log
, self.source_
, xsl_path( 'links_page.xsl' )
, self.links_file_path_
, {
'source': self.tag_
, 'run_date': self.run_date_
, 'comment_file': self.comment_file_
, 'explicit_markup_file': self.failures_markup_file_
}
)
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
open( self.file_path_, "w" ).close()
class unzip_action( action ):
def __init__( self, source, destination, unzip_func ):
action.__init__( self, destination )
self.dependencies_.append( source )
self.source_ = source
self.unzip_func_ = unzip_func
def update( self ):
try:
utils.log( ' Unzipping "%s" ... into "%s"' % ( shorten( self.source_ ), os.path.dirname( self.file_path_ ) ) )
self.unzip_func_( self.source_, os.path.dirname( self.file_path_ ) )
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
def ftp_task( site, site_path , destination ):
__log__ = 1
utils.log( '' )
utils.log( 'ftp_task: "ftp://%s/%s" -> %s' % ( site, site_path, destination ) )
utils.log( ' logging on ftp site %s' % site )
f = ftplib.FTP( site )
f.login()
utils.log( ' cwd to "%s"' % site_path )
f.cwd( site_path )
source_content = list_ftp( f )
source_content = [ x for x in source_content if re.match( r'.+[.](?<!log[.])zip', x.name ) and x.name.lower() != 'boostbook.zip' ]
destination_content = list_dir( destination )
d = diff( source_content, destination_content )
def synchronize():
for source in d[0]:
utils.log( 'Copying "%s"' % source )
result = open( os.path.join( destination, source ), 'wb' )
f.retrbinary( 'RETR %s' % source, result.write )
result.close()
mod_date = find_by_name( source_content, source ).date
m = time.mktime( mod_date )
os.utime( os.path.join( destination, source ), ( m, m ) )
for obsolete in d[1]:
utils.log( 'Deleting "%s"' % obsolete )
os.unlink( os.path.join( destination, obsolete ) )
utils.log( " Synchronizing..." )
__log__ = 2
synchronize()
f.quit()
def unzip_archives_task( source_dir, processed_dir, unzip_func ):
utils.log( '' )
utils.log( 'unzip_archives_task: unpacking updated archives in "%s" into "%s"...' % ( source_dir, processed_dir ) )
__log__ = 1
target_files = [ os.path.join( processed_dir, os.path.basename( x.replace( ".zip", ".xml" ) ) ) for x in glob.glob( os.path.join( source_dir, "*.zip" ) ) ] + glob.glob( os.path.join( processed_dir, "*.xml" ) )
actions = [ unzip_action( os.path.join( source_dir, os.path.basename( x.replace( ".xml", ".zip" ) ) ), x, unzip_func ) for x in target_files ]
for a in actions:
a.run()
def merge_xmls_task( source_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file, tag ):
utils.log( '' )
utils.log( 'merge_xmls_task: merging updated XMLs in "%s"...' % source_dir )
__log__ = 1
utils.makedirs( merged_dir )
target_files = [ os.path.join( merged_dir, os.path.basename( x ) ) for x in glob.glob( os.path.join( processed_dir, "*.xml" ) ) ] + glob.glob( os.path.join( merged_dir, "*.xml" ) )
actions = [ merge_xml_action( os.path.join( processed_dir, os.path.basename( x ) )
, x
, expected_results_file
, failures_markup_file
, tag ) for x in target_files ]
for a in actions:
a.run()
def make_links_task( input_dir, output_dir, tag, run_date, comment_file, extended_test_results, failures_markup_file ):
utils.log( '' )
utils.log( 'make_links_task: make output files for test results in "%s"...' % input_dir )
__log__ = 1
target_files = [ x + ".links" for x in glob.glob( os.path.join( input_dir, "*.xml" ) ) ] + glob.glob( os.path.join( input_dir, "*.links" ) )
actions = [ make_links_action( x.replace( ".links", "" )
, x
, output_dir
, tag
, run_date
, comment_file
, failures_markup_file
) for x in target_files ]
for a in actions:
a.run()
class xmlgen( xml.sax.saxutils.XMLGenerator ):
document_started = 0
def startDocument( self ):
if not self.document_started:
xml.sax.saxutils.XMLGenerator.startDocument( self )
self.document_started = 1
def merge_processed_test_runs( test_runs_dir, tag, writer ):
utils.log( '' )
utils.log( 'merge_processed_test_runs: merging processed test runs from %s into a single XML...' % test_runs_dir )
__log__ = 1
all_runs_xml = xmlgen( writer, encoding='utf-8' )
all_runs_xml.startDocument()
all_runs_xml.startElement( 'all-test-runs', {} )
files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
for test_run in files:
#file_pos = writer.stream.tell()
file_pos = writer.tell()
try:
utils.log( ' Writing "%s" into the resulting XML...' % test_run )
xml.sax.parse( test_run, all_runs_xml )
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( test_run, msg ) )
#writer.stream.seek( file_pos )
#writer.stream.truncate()
writer.seek( file_pos )
writer.truncate()
all_runs_xml.endElement( 'all-test-runs' )
all_runs_xml.endDocument()
def execute_tasks(
tag
, user
, run_date
, comment_file
, results_dir
, output_dir
, reports
, warnings
, extended_test_results
, dont_collect_logs
, expected_results_file
, failures_markup_file
):
incoming_dir = os.path.join( results_dir, 'incoming', tag )
processed_dir = os.path.join( incoming_dir, 'processed' )
merged_dir = os.path.join( processed_dir, 'merged' )
if not os.path.exists( incoming_dir ):
os.makedirs( incoming_dir )
if not os.path.exists( processed_dir ):
os.makedirs( processed_dir )
if not os.path.exists( merged_dir ):
os.makedirs( merged_dir )
if not dont_collect_logs:
ftp_site = 'boost.cowic.de'
site_path = '/boost/do-not-publish-this-url/results/%s' % tag
ftp_task( ftp_site, site_path, incoming_dir )
unzip_archives_task( incoming_dir, processed_dir, utils.unzip )
merge_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file, tag )
make_links_task( merged_dir
, output_dir
, tag
, run_date
, comment_file
, extended_test_results
, failures_markup_file )
results_xml_path = os.path.join( output_dir, 'extended_test_results.xml' )
#writer = codecs.open( results_xml_path, 'w', 'utf-8' )
writer = open( results_xml_path, 'w' )
merge_processed_test_runs( merged_dir, tag, writer )
writer.close()
make_result_pages(
extended_test_results
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, output_dir
, reports
, warnings
)
def make_result_pages(
extended_test_results
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, output_dir
, reports
, warnings
):
utils.log( 'Producing the reports...' )
__log__ = 1
warnings_text = '+'.join( warnings )
if comment_file != '':
comment_file = os.path.abspath( comment_file )
links = os.path.join( output_dir, 'links.html' )
utils.makedirs( os.path.join( output_dir, 'output' ) )
for mode in ( 'developer', 'user' ):
utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
issues = os.path.join( output_dir, 'developer', 'issues.html' )
if 'i' in reports:
utils.log( ' Making issues list...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'issues_page.xsl' )
, issues
, {
'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file': failures_markup_file
, 'release': "yes"
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'd' in reports:
utils.log( ' Making detailed %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl' )
, os.path.join( output_dir, mode, 'index.html' )
, {
'links_file': 'links.html'
, 'mode': mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 's' in reports:
utils.log( ' Making summary %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl' )
, os.path.join( output_dir, mode, 'summary.html' )
, {
'mode' : mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'dr' in reports:
utils.log( ' Making detailed %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl' )
, os.path.join( output_dir, mode, 'index_release.html' )
, {
'links_file': 'links.html'
, 'mode': mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
, 'release': "yes"
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'sr' in reports:
utils.log( ' Making summary %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl' )
, os.path.join( output_dir, mode, 'summary_release.html' )
, {
'mode' : mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
, 'release': 'yes'
}
)
if 'e' in reports:
utils.log( ' Generating expected_results ...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'produce_expected_results.xsl' )
, os.path.join( output_dir, 'expected_results.xml' )
)
if 'n' in reports:
utils.log( ' Making runner comment files...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'runners.xsl' )
, os.path.join( output_dir, 'runners.html' )
)
shutil.copyfile(
xsl_path( 'html/master.css' )
, os.path.join( output_dir, 'master.css' )
)
fix_file_names( output_dir )
def fix_file_names( dir ):
"""
The current version of xslproc doesn't correctly handle
spaces. We have to manually go through the
result set and decode encoded spaces (%20).
"""
utils.log( 'Fixing encoded file names...' )
for root, dirs, files in os.walk( dir ):
for file in files:
if file.find( "%20" ) > -1:
new_name = file.replace( "%20", " " )
utils.rename(
utils.log
, os.path.join( root, file )
, os.path.join( root, new_name )
)
def build_xsl_reports(
locate_root_dir
, tag
, expected_results_file
, failures_markup_file
, comment_file
, results_dir
, result_file_prefix
, dont_collect_logs = 0
, reports = report_types
, warnings = []
, user = None
, upload = False
):
( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )
root_paths.append( locate_root_dir )
root_paths.append( results_dir )
bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
output_dir = os.path.join( results_dir, result_file_prefix )
utils.makedirs( output_dir )
if expected_results_file != '':
expected_results_file = os.path.abspath( expected_results_file )
else:
expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
execute_tasks(
tag
, user
, run_date
, comment_file
, results_dir
, output_dir
, reports
, warnings
, extended_test_results
, dont_collect_logs
, expected_results_file
, failures_markup_file
)
if upload:
upload_dir = 'regression-logs/'
utils.log( 'Uploading results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
archive_name = '%s.tar.gz' % result_file_prefix
utils.tar(
os.path.join( results_dir, result_file_prefix )
, archive_name
)
utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
def accept_args( args ):
args_spec = [
'locate-root='
, 'tag='
, 'expected-results='
, 'failures-markup='
, 'comment='
, 'results-dir='
, 'results-prefix='
, 'dont-collect-logs'
, 'reports='
, 'user='
, 'upload'
, 'help'
]
options = {
'--comment': ''
, '--expected-results': ''
, '--failures-markup': ''
, '--reports': string.join( report_types, ',' )
, '--tag': None
, '--user': None
, 'upload': False
}
utils.accept_args( args_spec, args, options, usage )
if not options.has_key( '--results-dir' ):
options[ '--results-dir' ] = options[ '--locate-root' ]
if not options.has_key( '--results-prefix' ):
options[ '--results-prefix' ] = 'all'
return (
options[ '--locate-root' ]
, options[ '--tag' ]
, options[ '--expected-results' ]
, options[ '--failures-markup' ]
, options[ '--comment' ]
, options[ '--results-dir' ]
, options[ '--results-prefix' ]
, options.has_key( '--dont-collect-logs' )
, options[ '--reports' ].split( ',' )
, options[ '--user' ]
, options.has_key( '--upload' )
)
def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--locate-root the same as --locate-root in compiler_status
\t--tag the tag for the results (i.e. 'trunk')
\t--expected-results the file with the results to be compared with
\t the current run
\t--failures-markup the file with the failures markup
\t--comment an html comment file (will be inserted in the reports)
\t--results-dir the directory containing -links.html, -fail.html
\t files produced by compiler_status (by default the
\t same as specified in --locate-root)
\t--results-prefix the prefix of -links.html, -fail.html
\t files produced by compiler_status
\t--user SourceForge user name for a shell account
\t--upload upload reports to SourceForge
The following options are useful in debugging:
\t--dont-collect-logs dont collect the test logs
\t--reports produce only the specified reports
\t us - user summary
\t ds - developer summary
\t ud - user detailed
\t dd - developer detailed
\t l - links
\t p - patches
\t x - extended results file
\t i - issues
\t n - runner comment files
'''
def main():
build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
if __name__ == '__main__':
main()
| mit | 4,577,968,203,933,966,300 | 34.65988 | 214 | 0.503728 | false |
michael-lazar/praw3 | tests/test_comments.py | 1 | 11603 | """Tests for Comment class."""
from __future__ import print_function, unicode_literals
import pickle
import mock
from praw import errors, helpers
from praw.objects import Comment, MoreComments
from .helper import OAuthPRAWTest, PRAWTest, betamax
class CommentTest(PRAWTest):
def betamax_init(self):
self.r.login(self.un, self.un_pswd, disable_warning=True)
self.subreddit = self.r.get_subreddit(self.sr)
@betamax()
def test_add_comment(self):
text = 'Unique comment: {0}'.format(self.r.modhash)
submission = next(self.subreddit.get_new())
comment = submission.add_comment(text)
self.assertEqual(comment.submission, submission)
self.assertEqual(comment.body, text)
@betamax()
def test_add_reply(self):
text = 'Unique reply: {0}'.format(self.r.modhash)
submission = self.first(self.subreddit.get_new(),
lambda submission: submission.num_comments > 0)
comment = submission.comments[0]
reply = comment.reply(text)
self.assertEqual(reply.parent_id, comment.fullname)
self.assertEqual(reply.body, text)
@betamax()
def test_edit(self):
comment = next(self.r.user.get_comments())
new_body = '{0}\n\n+Edit Text'.format(comment.body)
comment = comment.edit(new_body)
self.assertEqual(comment.body, new_body)
@betamax()
def test_front_page_comment_replies_are_none(self):
item = next(self.r.get_comments('all'))
self.assertEqual(item._replies, None)
@betamax()
def test_get_comments_permalink(self):
item = next(self.subreddit.get_comments())
self.assertTrue(item.id in item.permalink)
@betamax()
def test_inbox_comment_permalink(self):
item = self.first(self.r.get_inbox(),
lambda item: isinstance(item, Comment))
self.assertTrue(item.id in item.permalink)
@betamax()
def test_inbox_comment_replies_are_none(self):
comment = self.first(self.r.get_inbox(),
lambda item: isinstance(item, Comment))
self.assertEqual(comment._replies, None)
@betamax()
def test_save_comment(self):
comment = next(self.r.user.get_comments())
comment.save()
comment.refresh()
self.assertTrue(comment.saved)
self.first(self.r.user.get_saved(), lambda x: x == comment)
comment.unsave()
comment.refresh()
self.assertFalse(comment.saved)
self.assertFalse(comment in self.r.user.get_saved(params={'u': 1}))
@betamax()
def test_spambox_comments_replies_are_none(self):
sequence = self.r.get_subreddit(self.sr).get_spam()
comment = self.first(sequence,
lambda item: isinstance(item, Comment))
self.assertEqual(comment._replies, None)
@betamax()
def test_unicode_comment(self):
sub = next(self.subreddit.get_new())
text = 'Have some unicode: (\xd0, \xdd)'
comment = sub.add_comment(text)
self.assertEqual(text, comment.body)
@betamax()
def test_user_comment_permalink(self):
item = next(self.r.user.get_comments())
self.assertTrue(item.id in item.permalink)
@betamax()
def test_user_comment_replies_are_none(self):
comment = self.first(self.r.user.get_comments(),
lambda item: isinstance(item, Comment))
self.assertEqual(comment._replies, None)
def _test_pickling(self, protocol):
comment = next(self.r.user.get_comments())
with mock.patch('praw.BaseReddit.request_json') as request_json_func:
unpickled_comment = pickle.loads(pickle.dumps(comment, protocol))
self.assertEqual(comment, unpickled_comment)
self.assertEqual(request_json_func.called, 0)
@betamax()
def test_pickling_v0(self):
self._test_pickling(0)
@betamax()
def test_pickling_v1(self):
self._test_pickling(1)
@betamax()
def test_pickling_v2(self):
self._test_pickling(2)
@betamax()
def test_distinguish_and_sticky(self):
submission = next(self.subreddit.get_new())
text = 'Distinguished and/or stickied comment'
comment = submission.add_comment(text)
comment.distinguish()
comment.refresh()
self.assertEqual(comment.distinguished, 'moderator')
self.assertFalse(comment.stickied)
comment.distinguish(sticky=True)
comment.refresh()
self.assertEqual(comment.distinguished, 'moderator')
self.assertTrue(comment.stickied)
comment.undistinguish()
comment.refresh()
self.assertIsNone(comment.distinguished)
self.assertFalse(comment.stickied)
class MoreCommentsTest(PRAWTest):
def betamax_init(self):
self.r.login(self.un, self.un_pswd, disable_warning=True)
self.submission = self.r.get_submission(url=self.more_comments_url,
comment_limit=130)
@betamax()
def test_all_comments(self):
c_len = len(self.submission.comments)
flat = helpers.flatten_tree(self.submission.comments)
continue_items = [x for x in flat if isinstance(x, MoreComments) and
x.count == 0]
self.assertTrue(continue_items)
cf_len = len(flat)
saved = self.submission.replace_more_comments(threshold=2)
ac_len = len(self.submission.comments)
flat = helpers.flatten_tree(self.submission.comments)
acf_len = len(flat)
for item in continue_items:
self.assertTrue(item.id in [x.id for x in flat])
self.assertEqual(len(self.submission._comments_by_id), acf_len)
self.assertTrue(c_len < ac_len)
self.assertTrue(c_len < cf_len)
self.assertTrue(ac_len < acf_len)
self.assertTrue(cf_len < acf_len)
self.assertTrue(saved)
@betamax()
def test_comments_method(self):
item = self.first(self.submission.comments,
lambda item: isinstance(item, MoreComments))
self.assertTrue(item.comments())
class OAuthCommentTest(OAuthPRAWTest):
@betamax()
def test_raise_invalidcomment_oauth(self):
fullname = '{0}_{1}'.format(self.r.config.by_object[Comment],
self.comment_deleted_id)
comment = self.r.get_info(thing_id=fullname)
self.r.refresh_access_information(self.refresh_token['submit'])
self.assertRaises(errors.InvalidComment, comment.reply, 'test')
invalid_comment = errors.InvalidComment()
self.assertEqual(invalid_comment.ERROR_TYPE, str(invalid_comment))
@betamax()
def test_refresh_deleted_comment(self):
subreddit = self.r.get_subreddit(self.sr)
submission = next(subreddit.get_new())
self.r.refresh_access_information(self.refresh_token['submit'])
comment = submission.add_comment("Delete this")
self.r.refresh_access_information(self.refresh_token['edit'])
comment.delete()
self.r.refresh_access_information(self.refresh_token['read'])
self.assertWarnings(RuntimeWarning, comment.refresh)
comment.refresh()
self.assertEqual(comment.submission, submission)
self.assertEqual(comment.author, None)
self.assertEqual(comment.body, '[deleted]')
@betamax()
def test_refresh_removed_comment(self):
subreddit = self.r.get_subreddit(self.sr)
submission = next(subreddit.get_new())
self.r.refresh_access_information(self.refresh_token['submit'])
comment = submission.add_comment("Remove this")
self.r.refresh_access_information(self.refresh_token['modposts'])
comment.remove()
self.r.refresh_access_information(self.other_refresh_token['read'])
self.assertWarnings(RuntimeWarning, comment.refresh)
comment.refresh()
self.assertEqual(comment.submission, submission)
self.assertEqual(comment.author, None)
self.assertEqual(comment.body, '[removed]')
self.assertEqual(comment.replies, [])
@betamax()
def test_deleted_comment_refresh_from_inbox(self):
root_url = self.reply_warnings_url
root_comment = self.r.get_submission(root_url).comments[0]
self.r.refresh_access_information(self.refresh_token['submit'])
delete_needed = root_comment.reply('To be deleted then refreshed')
other_token = self.other_refresh_token['privatemessages']
self.r.refresh_access_information(other_token)
test_refresh = next(self.r.get_unread(limit=1))
self.r.refresh_access_information(self.refresh_token['edit'])
delete_needed.delete()
self.r.refresh_access_information(self.other_refresh_token['read'])
self.assertWarningsRegexp('was_comment', RuntimeWarning,
test_refresh.refresh)
self.assertTrue(hasattr(test_refresh, 'was_comment'))
@betamax()
def test_removed_comment_refresh_from_inbox(self):
root_url = self.reply_warnings_url
root_comment = self.r.get_submission(root_url).comments[0]
self.r.refresh_access_information(self.refresh_token['submit'])
remove_needed = root_comment.reply('To be removed then refreshed')
other_token = self.other_refresh_token['privatemessages']
self.r.refresh_access_information(other_token)
test_refresh = next(self.r.get_unread(limit=1))
self.r.refresh_access_information(self.refresh_token['modposts'])
remove_needed.remove()
self.r.refresh_access_information(self.other_refresh_token['read'])
self.assertWarningsRegexp('was_comment', RuntimeWarning,
test_refresh.refresh)
self.assertTrue(hasattr(test_refresh, 'was_comment'))
@betamax()
def test_deleted_comment_replies_in_inbox(self):
root_url = self.reply_warnings_url
root_comment = self.r.get_submission(root_url).comments[0]
self.r.refresh_access_information(self.refresh_token['submit'])
delete_needed = root_comment.reply('To be deleted')
other_token = self.other_refresh_token['privatemessages']
self.r.refresh_access_information(other_token)
test_replies = next(self.r.get_unread(limit=1))
self.r.refresh_access_information(self.refresh_token['edit'])
delete_needed.delete()
self.r.refresh_access_information(self.other_refresh_token['read'])
self.assertWarnings(RuntimeWarning, lambda x: x.replies,
test_replies)
self.assertEqual(test_replies.replies, [])
@betamax()
def test_removed_comment_replies_in_inbox(self):
root_url = self.reply_warnings_url
root_comment = self.r.get_submission(root_url).comments[0]
self.r.refresh_access_information(self.refresh_token['submit'])
remove_needed = root_comment.reply('To be removed')
other_token = self.other_refresh_token['privatemessages']
self.r.refresh_access_information(other_token)
test_replies = next(self.r.get_unread(limit=1))
self.r.refresh_access_information(self.refresh_token['modposts'])
remove_needed.remove()
self.r.refresh_access_information(self.other_refresh_token['read'])
self.assertWarnings(RuntimeWarning, lambda x: x.replies,
test_replies)
self.assertEqual(test_replies.replies, [])
| gpl-3.0 | -938,182,191,784,976,600 | 40.14539 | 79 | 0.64242 | false |
darren-wang/gl | glance/api/middleware/version_negotiation.py | 6 | 3713 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A filter middleware that inspects the requested URI for a version string
and/or Accept headers and attempts to negotiate an API controller to
return
"""
from oslo_config import cfg
from oslo_log import log as logging
from glance.api import versions
from glance.common import wsgi
from glance import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LW = i18n._LW
class VersionNegotiationFilter(wsgi.Middleware):
def __init__(self, app):
self.versions_app = versions.Controller()
super(VersionNegotiationFilter, self).__init__(app)
def process_request(self, req):
"""Try to find a version first in the accept header, then the URL"""
msg = _("Determining version of request: %(method)s %(path)s"
" Accept: %(accept)s")
args = {'method': req.method, 'path': req.path, 'accept': req.accept}
LOG.debug(msg % args)
# If the request is for /versions, just return the versions container
# TODO(bcwaldon): deprecate this behavior
if req.path_info_peek() == "versions":
return self.versions_app
accept = str(req.accept)
if accept.startswith('application/vnd.openstack.images-'):
LOG.debug("Using media-type versioning")
token_loc = len('application/vnd.openstack.images-')
req_version = accept[token_loc:]
else:
LOG.debug("Using url versioning")
# Remove version in url so it doesn't conflict later
req_version = self._pop_path_info(req)
try:
version = self._match_version_string(req_version)
except ValueError:
LOG.warn(_LW("Unknown version. Returning version choices."))
return self.versions_app
req.environ['api.version'] = version
req.path_info = ''.join(('/v', str(version), req.path_info))
LOG.debug("Matched version: v%d", version)
LOG.debug('new path %s', req.path_info)
return None
def _match_version_string(self, subject):
"""
Given a string, tries to match a major and/or
minor version number.
:param subject: The string to check
:returns version found in the subject
:raises ValueError if no acceptable version could be found
"""
if subject in ('v1', 'v1.0', 'v1.1') and CONF.enable_v1_api:
major_version = 1
elif subject in ('v2', 'v2.0', 'v2.1', 'v2.2') and CONF.enable_v2_api:
major_version = 2
else:
raise ValueError()
return major_version
def _pop_path_info(self, req):
"""
'Pops' off the next segment of PATH_INFO, returns the popped
segment. Do NOT push it onto SCRIPT_NAME.
"""
path = req.path_info
if not path:
return None
while path.startswith('/'):
path = path[1:]
idx = path.find('/')
if idx == -1:
idx = len(path)
r = path[:idx]
req.path_info = path[idx:]
return r
| apache-2.0 | 8,677,422,790,592,900,000 | 33.06422 | 78 | 0.615675 | false |
plumgrid/plumgrid-nova | nova/filters.py | 9 | 3238 | # Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter support
"""
from nova import loadables
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
"""Yield objects that pass the filter.
Can be overriden in a subclass, if you need to base filtering
decisions on all objects. Otherwise, one can just override
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
# Set to true in a subclass if a filter only needs to be run once
# for each request rather than for each instance
run_filter_once_per_request = False
def run_filter_for_index(self, index):
"""Return True if the filter needs to be run for the "index-th"
instance in a request. Only need to override this if a filter
needs anything other than "first only" or "all" behaviour.
"""
if self.run_filter_once_per_request and index > 0:
return False
else:
return True
class BaseFilterHandler(loadables.BaseLoader):
"""Base class to handle loading filter classes.
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filter_classes, objs,
filter_properties, index=0):
list_objs = list(objs)
LOG.debug(_("Starting with %d host(s)"), len(list_objs))
for filter_cls in filter_classes:
cls_name = filter_cls.__name__
filter = filter_cls()
if filter.run_filter_for_index(index):
objs = filter.filter_all(list_objs,
filter_properties)
if objs is None:
LOG.debug(_("Filter %(cls_name)s says to stop filtering"),
{'cls_name': cls_name})
return
list_objs = list(objs)
LOG.debug(_("Filter %(cls_name)s returned "
"%(obj_len)d host(s)"),
{'cls_name': cls_name, 'obj_len': len(list_objs)})
if len(list_objs) == 0:
break
return list_objs
| apache-2.0 | 3,815,857,725,473,622,500 | 35.795455 | 78 | 0.605312 | false |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/third_party/dulwich/patch.py | 7 | 10594 | # patch.py -- For dealing with packed-style patches.
# Copyright (C) 2009-2013 Jelmer Vernooij <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) a later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Classes for dealing with git am-style patches.
These patches are basically unified diffs with some extra metadata tacked
on.
"""
from difflib import SequenceMatcher
import email.parser
import time
from dulwich.objects import (
Commit,
S_ISGITLINK,
)
FIRST_FEW_BYTES = 8000
def write_commit_patch(f, commit, contents, progress, version=None, encoding=None):
"""Write a individual file patch.
:param commit: Commit object
:param progress: Tuple with current patch number and total.
:return: tuple with filename and contents
"""
encoding = encoding or getattr(f, "encoding", "ascii")
if type(contents) is str:
contents = contents.encode(encoding)
(num, total) = progress
f.write(b"From " + commit.id + b" " + time.ctime(commit.commit_time).encode(encoding) + b"\n")
f.write(b"From: " + commit.author + b"\n")
f.write(b"Date: " + time.strftime("%a, %d %b %Y %H:%M:%S %Z").encode(encoding) + b"\n")
f.write(("Subject: [PATCH %d/%d] " % (num, total)).encode(encoding) + commit.message + b"\n")
f.write(b"\n")
f.write(b"---\n")
try:
import subprocess
p = subprocess.Popen(["diffstat"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
except (ImportError, OSError):
pass # diffstat not available?
else:
(diffstat, _) = p.communicate(contents)
f.write(diffstat)
f.write(b"\n")
f.write(contents)
f.write(b"-- \n")
if version is None:
from dulwich import __version__ as dulwich_version
f.write(b"Dulwich %d.%d.%d\n" % dulwich_version)
else:
f.write(version.encode(encoding) + b"\n")
def get_summary(commit):
"""Determine the summary line for use in a filename.
:param commit: Commit
:return: Summary string
"""
return commit.message.splitlines()[0].replace(" ", "-")
def unified_diff(a, b, fromfile, tofile, n=3):
"""difflib.unified_diff that doesn't write any dates or trailing spaces.
Based on the same function in Python2.6.5-rc2's difflib.py
"""
started = False
for group in SequenceMatcher(None, a, b).get_grouped_opcodes(n):
if not started:
yield b'--- ' + fromfile + b'\n'
yield b'+++ ' + tofile + b'\n'
started = True
i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
sizes = "@@ -%d,%d +%d,%d @@\n" % (i1+1, i2-i1, j1+1, j2-j1)
yield sizes.encode('ascii')
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield b' ' + line
continue
if tag == 'replace' or tag == 'delete':
for line in a[i1:i2]:
if not line[-1:] == b'\n':
line += b'\n\\ No newline at end of file\n'
yield b'-' + line
if tag == 'replace' or tag == 'insert':
for line in b[j1:j2]:
if not line[-1:] == b'\n':
line += b'\n\\ No newline at end of file\n'
yield b'+' + line
def is_binary(content):
"""See if the first few bytes contain any null characters.
:param content: Bytestring to check for binary content
"""
return b'\0' in content[:FIRST_FEW_BYTES]
def shortid(hexsha):
if hexsha is None:
return b"0" * 7
else:
return hexsha[:7]
def patch_filename(p, root):
if p is None:
return b"/dev/null"
else:
return root + b"/" + p
def write_object_diff(f, store, old_file, new_file, diff_binary=False):
"""Write the diff for an object.
:param f: File-like object to write to
:param store: Store to retrieve objects from, if necessary
:param old_file: (path, mode, hexsha) tuple
:param new_file: (path, mode, hexsha) tuple
:param diff_binary: Whether to diff files even if they
are considered binary files by is_binary().
:note: the tuple elements should be None for nonexistant files
"""
(old_path, old_mode, old_id) = old_file
(new_path, new_mode, new_id) = new_file
old_path = patch_filename(old_path, b"a")
new_path = patch_filename(new_path, b"b")
def content(mode, hexsha):
if hexsha is None:
return b''
elif S_ISGITLINK(mode):
return b"Submodule commit " + hexsha + b"\n"
else:
return store[hexsha].data
def lines(content):
if not content:
return []
else:
return content.splitlines(True)
f.writelines(gen_diff_header(
(old_path, new_path), (old_mode, new_mode), (old_id, new_id)))
old_content = content(old_mode, old_id)
new_content = content(new_mode, new_id)
if not diff_binary and (is_binary(old_content) or is_binary(new_content)):
f.write(b"Binary files " + old_path + b" and " + new_path + b" differ\n")
else:
f.writelines(unified_diff(lines(old_content), lines(new_content),
old_path, new_path))
# TODO(user): Support writing unicode, rather than bytes.
def gen_diff_header(paths, modes, shas):
"""Write a blob diff header.
:param paths: Tuple with old and new path
:param modes: Tuple with old and new modes
:param shas: Tuple with old and new shas
"""
(old_path, new_path) = paths
(old_mode, new_mode) = modes
(old_sha, new_sha) = shas
yield b"diff --git " + old_path + b" " + new_path + b"\n"
if old_mode != new_mode:
if new_mode is not None:
if old_mode is not None:
yield ("old mode %o\n" % old_mode).encode('ascii')
yield ("new mode %o\n" % new_mode).encode('ascii')
else:
yield ("deleted mode %o\n" % old_mode).encode('ascii')
yield b"index " + shortid(old_sha) + b".." + shortid(new_sha)
if new_mode is not None:
yield (" %o" % new_mode).encode('ascii')
yield b"\n"
# TODO(user): Support writing unicode, rather than bytes.
def write_blob_diff(f, old_file, new_file):
"""Write blob diff.
:param f: File-like object to write to
:param old_file: (path, mode, hexsha) tuple (None if nonexisting)
:param new_file: (path, mode, hexsha) tuple (None if nonexisting)
:note: The use of write_object_diff is recommended over this function.
"""
(old_path, old_mode, old_blob) = old_file
(new_path, new_mode, new_blob) = new_file
old_path = patch_filename(old_path, b"a")
new_path = patch_filename(new_path, b"b")
def lines(blob):
if blob is not None:
return blob.data.splitlines(True)
else:
return []
f.writelines(gen_diff_header(
(old_path, new_path), (old_mode, new_mode),
(getattr(old_blob, "id", None), getattr(new_blob, "id", None))))
old_contents = lines(old_blob)
new_contents = lines(new_blob)
f.writelines(unified_diff(old_contents, new_contents,
old_path, new_path))
# TODO(user): Support writing unicode, rather than bytes.
def write_tree_diff(f, store, old_tree, new_tree, diff_binary=False):
"""Write tree diff.
:param f: File-like object to write to.
:param old_tree: Old tree id
:param new_tree: New tree id
:param diff_binary: Whether to diff files even if they
are considered binary files by is_binary().
"""
changes = store.tree_changes(old_tree, new_tree)
for (oldpath, newpath), (oldmode, newmode), (oldsha, newsha) in changes:
write_object_diff(f, store, (oldpath, oldmode, oldsha),
(newpath, newmode, newsha),
diff_binary=diff_binary)
def git_am_patch_split(f, encoding=None):
"""Parse a git-am-style patch and split it up into bits.
:param f: File-like object to parse
:param encoding: Encoding to use when creating Git objects
:return: Tuple with commit object, diff contents and git version
"""
encoding = encoding or getattr(f, "encoding", "ascii")
contents = f.read()
if type(contents) is bytes and getattr(email.parser, "BytesParser", None):
parser = email.parser.BytesParser()
msg = parser.parsebytes(contents)
else:
parser = email.parser.Parser()
msg = parser.parsestr(contents)
return parse_patch_message(msg, encoding)
def parse_patch_message(msg, encoding=None):
"""Extract a Commit object and patch from an e-mail message.
:param msg: An email message (email.message.Message)
:param encoding: Encoding to use to encode Git commits
:return: Tuple with commit object, diff contents and git version
"""
c = Commit()
c.author = msg["from"].encode(encoding)
c.committer = msg["from"].encode(encoding)
try:
patch_tag_start = msg["subject"].index("[PATCH")
except ValueError:
subject = msg["subject"]
else:
close = msg["subject"].index("] ", patch_tag_start)
subject = msg["subject"][close+2:]
c.message = (subject.replace("\n", "") + "\n").encode(encoding)
first = True
body = msg.get_payload(decode=True)
lines = body.splitlines(True)
line_iter = iter(lines)
for l in line_iter:
if l == b"---\n":
break
if first:
if l.startswith(b"From: "):
c.author = l[len(b"From: "):].rstrip()
else:
c.message += b"\n" + l
first = False
else:
c.message += l
diff = b""
for l in line_iter:
if l == b"-- \n":
break
diff += l
try:
version = next(line_iter).rstrip(b"\n")
except StopIteration:
version = None
return c, diff, version
| apache-2.0 | 7,687,223,049,708,310,000 | 33.508143 | 98 | 0.597036 | false |
CINPLA/expipe | setup.py | 1 | 1354 | # -*- coding: utf-8 -*-
from setuptools import setup
import os
from setuptools import setup, find_packages
long_description = open("README.md").read()
with open("requirements.txt", mode='r') as f:
install_requires = f.read().split('\n')
install_requires = [e for e in install_requires if len(e) > 0]
d = {}
exec(open("expipe/version.py").read(), None, d)
version = d['version']
pkg_name = "expipe"
setup(name="expipe",
packages=find_packages(),
version=version,
include_package_data=True,
author="CINPLA",
author_email="",
maintainer="Mikkel Elle Lepperød",
maintainer_email="[email protected]",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CINPLA/expipe",
platforms=['Linux', "Windows"],
install_requires=install_requires,
description="Experiment-data management platform",
entry_points={
'console_scripts': [
'expipe=expipe.cli:expipe'
]},
classifiers=['Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'],
)
| gpl-3.0 | 561,457,691,354,488,400 | 31.214286 | 86 | 0.615669 | false |
matthiask/django-mooch | mooch/postfinance.py | 1 | 6553 | import locale
import logging
from hashlib import sha1
from django import http
from django.conf.urls import url
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.mail import mail_managers
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import get_language, to_locale, ugettext_lazy as _
from mooch.base import BaseMoocher, csrf_exempt_m, require_POST_m
from mooch.signals import post_charge
logger = logging.getLogger("mooch.postfinance")
class PostFinanceMoocher(BaseMoocher):
identifier = "postfinance"
title = _("Pay with PostFinance")
def __init__(
self, *, pspid, live, sha1_in, sha1_out, payment_methods=None, **kwargs
):
if any(x is None for x in (pspid, live, sha1_in, sha1_out)):
raise ImproperlyConfigured(
"%s: None is not allowed in (%r, %r, %r, %r)"
% (self.__class__.__name__, pspid, live, sha1_in, sha1_out)
)
self.pspid = pspid
# Which payment options should be shown
# Options: PostFinance Card, PostFinance e-finance, TWINT, PAYPAL
self.payment_methods = (
["PostFinance Card", "PostFinance e-finance"]
if payment_methods is None
else payment_methods
)
self.live = live
self.sha1_in = sha1_in
self.sha1_out = sha1_out
super().__init__(**kwargs)
def get_urls(self):
return [
url(
r"^postfinance_success/$", self.success_view, name="postfinance_success"
),
url(
r"^postfinance_postsale/$",
self.postsale_view,
name="postfinance_postsale",
),
]
def payment_form(self, request, payment):
postfinance = {
# Add a random suffix, because PostFinance does not like
# processing the same order ID over and over.
"orderID": "%s-%s" % (payment.id.hex, get_random_string(4)),
"amount": str(payment.amount_cents),
"currency": "CHF",
"PSPID": self.pspid,
"language": locale.normalize(to_locale(get_language())).split(".")[0],
"EMAIL": payment.email,
}
postfinance["SHASign"] = sha1(
(
"".join(
(
postfinance["orderID"],
postfinance["amount"],
postfinance["currency"],
postfinance["PSPID"],
self.sha1_in,
)
)
).encode("utf-8")
).hexdigest()
return render_to_string(
"mooch/postfinance_payment_form.html",
{
"moocher": self,
"payment": payment,
"postfinance": postfinance,
"mode": "prod" if self.live else "test",
"payment_methods": self.payment_methods,
"success_url": request.build_absolute_uri(
reverse("%s:postfinance_success" % self.app_name)
),
"failure_url": request.build_absolute_uri(str(self.failure_url)),
},
request=request,
)
def _process_query(self, data, request):
try:
parameters_repr = repr(data).encode("utf-8")
logger.info("IPN: Processing request data %s" % parameters_repr)
try:
orderID = data["orderID"]
currency = data["currency"]
amount = data["amount"]
PM = data["PM"]
ACCEPTANCE = data["ACCEPTANCE"]
STATUS = data["STATUS"]
CARDNO = data["CARDNO"]
PAYID = data["PAYID"]
NCERROR = data["NCERROR"]
BRAND = data["BRAND"]
SHASIGN = data["SHASIGN"]
except KeyError:
logger.error("IPN: Missing data in %s" % parameters_repr)
raise ValidationError("Missing data")
sha1_source = "".join(
(
orderID,
currency,
amount,
PM,
ACCEPTANCE,
STATUS,
CARDNO,
PAYID,
NCERROR,
BRAND,
self.sha1_out,
)
)
sha1_out = sha1(sha1_source.encode("utf-8")).hexdigest()
if sha1_out.lower() != SHASIGN.lower():
logger.error("IPN: Invalid hash in %s" % parameters_repr)
raise ValidationError("Hash did not validate")
try:
instance = self.model.objects.get(pk=orderID.split("-")[0])
except self.model.DoesNotExist:
logger.error("IPN: Instance %s does not exist" % orderID)
raise ValidationError("Instance %s does not exist" % orderID)
if STATUS in ("5", "9"):
instance.charged_at = timezone.now()
instance.payment_service_provider = self.identifier
instance.transaction = parameters_repr
instance.save()
post_charge.send(sender=self, payment=instance, request=request)
except Exception as e:
logger.error("IPN: Processing failure %s" % e)
raise
def success_view(self, request):
try:
self._process_query(request.GET.copy(), request)
except ValidationError as exc:
mail_managers(
"Validation error in PostFinance success view",
"\n".join(
[request.build_absolute_uri(), ""] + [m for m in exc.messages]
),
)
for m in exc.messages:
messages.error(request, m)
return redirect(self.failure_url)
else:
return redirect(self.success_url)
@csrf_exempt_m
@require_POST_m
def postsale_view(self, request):
try:
self._process_query(request.POST.copy(), request)
except ValidationError as exc:
return http.HttpResponseForbidden(exc.message)
return http.HttpResponse("OK")
| mit | 4,776,739,833,738,807,000 | 33.489474 | 88 | 0.521288 | false |
arnaudsj/pybrain | pybrain/datasets/reinforcement.py | 6 | 2283 | __author__ = 'Thomas Rueckstiess, [email protected]'
from sequential import SequentialDataSet
from dataset import DataSet
from scipy import zeros
class ReinforcementDataSet(SequentialDataSet):
def __init__(self, statedim, actiondim):
""" initialize the reinforcement dataset, add the 3 fields state, action and
reward, and create an index marker. This class is basically a wrapper function
that renames the fields of SupervisedDataSet into the more common reinforcement
learning names. Instead of 'episodes' though, we deal with 'sequences' here. """
DataSet.__init__(self)
# add 3 fields: input, target, importance
self.addField('state', statedim)
self.addField('action', actiondim)
self.addField('reward', 1)
# link these 3 fields
self.linkFields(['state', 'action', 'reward'])
# reset the index marker
self.index = 0
# add field that stores the beginning of a new episode
self.addField('sequence_index', 1)
self.append('sequence_index', 0)
self.currentSeq = 0
self.statedim = statedim
self.actiondim = actiondim
# the input and target dimensions (for compatibility)
self.indim = self.statedim
self.outdim = self.actiondim
def addSample(self, state, action, reward):
""" adds a new sample consisting of state, action, reward.
:key state: the current state of the world
:key action: the executed action by the agent
:key reward: the reward received for action in state """
self.appendLinked(state, action, reward)
def getSumOverSequences(self, field):
sums = zeros((self.getNumSequences(), self.getDimension(field)))
for n in range(self.getNumSequences()):
sums[n, :] = sum(self._getSequenceField(n, field), 0)
return sums
def __reduce__(self):
# FIXME: This does actually not feel right: We have to use the DataSet
# method here, although we inherit from sequential dataset.
_, _, state, _, _ = DataSet.__reduce__(self)
creator = self.__class__
args = self.statedim, self.actiondim
return creator, args, state, iter([]), iter({})
| bsd-3-clause | 1,552,774,143,539,416,300 | 39.767857 | 92 | 0.638633 | false |
saandrews/pulsar | pulsar-client-cpp/python/functions/serde.py | 2 | 2664 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""serde.py: SerDe defines the interface for serialization/deserialization.
# Everytime a message is read from pulsar topic, the serde is invoked to
# serialize the bytes into an object before invoking the process method.
# Anytime a python object needs to be written back to pulsar, it is
# serialized into bytes before writing.
"""
from abc import abstractmethod
import pickle
class SerDe(object):
"""Interface for Serialization/Deserialization"""
@abstractmethod
def serialize(self, input):
"""Serialize input message into bytes"""
pass
@abstractmethod
def deserialize(self, input_bytes):
"""Serialize input_bytes into an object"""
pass
class PickleSerDe(SerDe):
"""Pickle based serializer"""
def serialize(self, input):
return pickle.dumps(input)
def deserialize(self, input_bytes):
return pickle.loads(input_bytes)
class IdentitySerDe(SerDe):
"""Pickle based serializer"""
def serialize(self, input):
return input
def deserialize(self, input_bytes):
return input_bytes | apache-2.0 | 6,516,800,897,333,343,000 | 34.065789 | 75 | 0.752252 | false |
repotvsupertuga/tvsupertuga.repository | script.module.liveresolver/lib/liveresolver/resolvers/zerocast.py | 10 | 2171 | # -*- coding: utf-8 -*-
'''
Liveresolver Add-on
Copyright (C) 2016 natko1412
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,base64, urllib,json
from liveresolver.modules import client,constants
from liveresolver.modules.log_utils import log
def resolve(url):
try:
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = 'http://zerocast.tv/channels'
if 'chan=' in url:
result = client.request(url, referer=referer)
url = re.findall('src=[\'"](.+?)[\'"]>', result)[-1]
page = url
r = re.findall('.+?a=([0-9]+)', url)[0]
url = 'http://zerocast.tv/embed.php?a=%s&id=&width=640&height=480&autostart=true&strech=exactfit' % r
result = client.request(url, referer=referer)
unpacked = ''
packed = result.split('\n')
for i in packed:
try:
unpacked += jsunpack.unpack(i)
except:
pass
result += unpacked
js = re.findall('getJSON\([\"\'](http://zerocast.tv/file[^\"\']+)',result)[0]
token = json.loads(client.request(js))['token']
r = re.findall('curl\s*=\s*[\'"](.+?)[\'"]', result)
r = r[0].decode('base64', 'strict')
if '.m3u8' in r or 'rtmp' in r:
url = r
return url + ' swfUrl=http://p.jwpcdn.com/6/12/jwplayer.flash.swf flashver=' + constants.flash_ver() + ' token=' + token + ' timeout=15 live=true swfVfy=1 pageUrl=' + page
except:
return
| gpl-2.0 | -9,051,616,642,844,487,000 | 35.183333 | 183 | 0.609397 | false |
page-io/Cactus | cactus/tests/integration/s3/__init__.py | 9 | 2048 | #coding:utf-8
import os
from cactus.deployment.s3.engine import S3DeploymentEngine
from cactus.utils.helpers import checksum
from cactus.tests.integration import IntegrationTestCase, DebugHTTPSConnectionFactory, BaseTestHTTPConnection, \
TestHTTPResponse
class DummyAWSCredentialsManager(object):
def __init__(self, site):
self.site = site
def get_credentials(self):
return "123", "abc"
def save_credentials(self):
pass
class S3TestHTTPConnection(BaseTestHTTPConnection):
def handle_request(self, request):
if request.method == "GET":
if request.path == "/":
if request.params == {}:
return self.list_buckets()
if "location" in request.params:
return self.location()
if request.method == "PUT":
if request.path == "/":
return TestHTTPResponse(200)
return self.put_object(request)
raise Exception("Unsupported request {0} {1}".format(request.method, request.url))
def _serve_data(self, name):
with open(os.path.join("cactus/tests/integration/s3/data", name)) as f:
return TestHTTPResponse(200, body=f.read())
def list_buckets(self):
return self._serve_data("buckets.xml")
def location(self):
return self._serve_data("location.xml")
def put_object(self, req):
return TestHTTPResponse(200, headers={"ETag":'"{0}"'.format(checksum(req.body))})
class S3IntegrationTestCase(IntegrationTestCase):
def get_deployment_engine_class(self):
# Create a connection factory
self.connection_factory = DebugHTTPSConnectionFactory(S3TestHTTPConnection)
class TestS3DeploymentEngine(S3DeploymentEngine):
_s3_https_connection_factory = (self.connection_factory, ())
CredentialsManagerClass = DummyAWSCredentialsManager
return TestS3DeploymentEngine
def get_credentials_manager_class(self):
return DummyAWSCredentialsManager
| bsd-3-clause | -4,714,865,855,429,717,000 | 30.507692 | 112 | 0.65918 | false |
ESS-LLP/erpnext-healthcare | erpnext/manufacturing/report/production_analytics/production_analytics.py | 12 | 4568 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from frappe.utils import getdate
from erpnext.stock.report.stock_analytics.stock_analytics import (get_period_date_ranges, get_period)
def execute(filters=None):
columns = get_columns(filters)
data, chart = get_data(filters, columns)
return columns, data, None , chart
def get_columns(filters):
columns =[
{
"label": _("Status"),
"fieldname": "Status",
"fieldtype": "Data",
"width": 140
}]
ranges = get_period_date_ranges(filters)
for dummy, end_date in ranges:
period = get_period(end_date, filters)
columns.append({
"label": _(period),
"fieldname": scrub(period),
"fieldtype": "Float",
"width": 120
})
return columns
def get_periodic_data(filters, entry):
periodic_data = {
"All Work Orders": {},
"Not Started": {},
"Overdue": {},
"Pending": {},
"Completed": {}
}
ranges = get_period_date_ranges(filters)
for from_date, end_date in ranges:
period = get_period(end_date, filters)
for d in entry:
if getdate(d.creation) <= getdate(from_date) or getdate(d.creation) <= getdate(end_date) :
periodic_data = update_periodic_data(periodic_data, "All Work Orders", period)
if d.status == 'Completed':
if getdate(d.actual_end_date) < getdate(from_date) or getdate(d.modified) < getdate(from_date):
periodic_data = update_periodic_data(periodic_data, "Completed", period)
elif getdate(d.actual_start_date) < getdate(from_date) :
periodic_data = update_periodic_data(periodic_data, "Pending", period)
elif getdate(d.planned_start_date) < getdate(from_date) :
periodic_data = update_periodic_data(periodic_data, "Overdue", period)
else:
periodic_data = update_periodic_data(periodic_data, "Not Started", period)
elif d.status == 'In Process':
if getdate(d.actual_start_date) < getdate(from_date) :
periodic_data = update_periodic_data(periodic_data, "Pending", period)
elif getdate(d.planned_start_date) < getdate(from_date) :
periodic_data = update_periodic_data(periodic_data, "Overdue", period)
else:
periodic_data = update_periodic_data(periodic_data, "Not Started", period)
elif d.status == 'Not Started':
if getdate(d.planned_start_date) < getdate(from_date) :
periodic_data = update_periodic_data(periodic_data, "Overdue", period)
else:
periodic_data = update_periodic_data(periodic_data, "Not Started", period)
return periodic_data
def update_periodic_data(periodic_data, status, period):
if periodic_data.get(status).get(period):
periodic_data[status][period] += 1
else:
periodic_data[status][period] = 1
return periodic_data
def get_data(filters, columns):
data = []
entry = frappe.get_all("Work Order",
fields=["creation", "modified", "actual_start_date", "actual_end_date", "planned_start_date", "planned_end_date", "status"],
filters={"docstatus": 1, "company": filters["company"] })
periodic_data = get_periodic_data(filters,entry)
labels = ["All Work Orders", "Not Started", "Overdue", "Pending", "Completed"]
chart_data = get_chart_data(periodic_data,columns)
ranges = get_period_date_ranges(filters)
for label in labels:
work = {}
work["Status"] = label
for dummy,end_date in ranges:
period = get_period(end_date, filters)
if periodic_data.get(label).get(period):
work[scrub(period)] = periodic_data.get(label).get(period)
else:
work[scrub(period)] = 0.0
data.append(work)
return data, chart_data
def get_chart_data(periodic_data, columns):
labels = [d.get("label") for d in columns[1:]]
all_data, not_start, overdue, pending, completed = [], [], [] , [], []
datasets = []
for d in labels:
all_data.append(periodic_data.get("All Work Orders").get(d))
not_start.append(periodic_data.get("Not Started").get(d))
overdue.append(periodic_data.get("Overdue").get(d))
pending.append(periodic_data.get("Pending").get(d))
completed.append(periodic_data.get("Completed").get(d))
datasets.append({'name':'All Work Orders', 'values': all_data})
datasets.append({'name':'Not Started', 'values': not_start})
datasets.append({'name':'Overdue', 'values': overdue})
datasets.append({'name':'Pending', 'values': pending})
datasets.append({'name':'Completed', 'values': completed})
chart = {
"data": {
'labels': labels,
'datasets': datasets
}
}
chart["type"] = "line"
return chart
| gpl-3.0 | -3,396,043,446,235,539,500 | 29.251656 | 126 | 0.677539 | false |
guillaumebel/nibbles-clutter | gnome-sudoku/src/lib/colors.py | 1 | 2044 | # -*- coding: utf-8 -*-
def rgb_to_hsv (r,g,b,maxval=255):
if type(r)==int: r = r/float(maxval)
if type(g)==int: g = g/float(maxval)
if type(b)==int: b = b/float(maxval)
# Taken from
# http://www.easyrgb.com/math.php?MATH=M20#text20
var_min = min(r,g,b)
var_max = max(r,g,b)
delta = var_max - var_min
v = var_max
if delta == 0:
# we're grey
h = 0
s = 0
else:
s = delta/var_max
delta_r = ( ( (var_max - r) / 6) + (delta/2) ) / delta
delta_g = ( ( (var_max - g) / 6) + (delta/2) ) / delta
delta_b = ( ( (var_max - b) / 6) + (delta/2) ) / delta
if (r==var_max):
h = delta_b - delta_g
elif g==var_max:
h = (1.0/3)+delta_r-delta_b
elif b==var_max:
h = (2.0/3)+delta_g-delta_r
if (h < 0): h+=1
if (h > 1): h -= 1
return h,s,v
def hsv_to_rgb (h,s,v):
if s==0:
return v,v,v
else:
h = h*6
if h == 6: h = 0
i = int(h)
c1 = v*(1 - s)
c2 = v*(1 - s * ( h-i ) )
c3 = v *(1 - s * (1 - (h - i) ) )
if i==0: r=v;g=c3;b=c1
elif i==1: r=c2; g=v; b=c1
elif i==2: r=c1; g=v; b=c3
elif i==3: r=c1; g=c2; b=v
elif i==4: r=c3; g=c1; b=v
else: r=v; g=c1; b=c2
return r,g,b
def rotate_hue (h,s,v, rotate_by=.25):
h += rotate_by
if h > 1.0: h = h-1.0
return h,s,v
def rotate_hue_rgb (r,g,b, rotate_by=0.25, maxval=255):
h,s,v = rgb_to_hsv(r,g,b,maxval=maxval)
h,s,v = rotate_hue (h,s,v,rotate_by=rotate_by)
return hsv_to_rgb(h,s,v)
def color_hex_to_float (hstr):
hstr = hstr.strip('#')
if len(hstr)==6:
r = hstr[:2]
g = hstr[2:4]
b = hstr[4:]
maxval = 255
elif len(hstr)==3:
r,g,b = hstr
maxval = 15
else:
raise ValueError('%s is not a 6 or 3 digit color string'%hstr)
r,g,b = int(r,16),int(g,16),int(b,16)
return r/float(maxval),g/float(maxval),b/float(maxval)
| gpl-2.0 | -3,950,238,025,224,076,000 | 27.388889 | 70 | 0.457436 | false |
carlosb1/examples-python | architecture/chatserver.py | 1 | 3299 | import socket
import select
import signal
import sys
from communication import send, receive
class ChatServer(object):
def sighandler(self,signum,frame):
print('Shutting down server...')
for o in self.outputs:
o.close()
self.server.close()
def __init__(self, port=3490, backlog=5):
self.clients = 0
self.clientmap = {}
self.outputs = []
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
self.server.bind(('',port))
print('Listening to port',port,'...')
self.server.listen(backlog)
signal.signal(signal.SIGINT, self.sighandler)
def get_name(self, client):
info = self.clientmap[client]
host, name = info[0][0], info[1]
return '@'.join((name,host))
def serve(self):
inputs = [self.server,sys.stdin]
self.outputs = []
running = 1
while running:
try:
inputready, outputready, exceptready = select.select(inputs, self.outputs, [])
except select.error as e:
break
except socket.error as e:
break
for s in inputready:
if s == self.server:
client, address = self.server.accept()
print('chatserver: got connection %d from %s' % (client.fileno(), address))
cname = receive(client).split('NAME: ')[1]
self.clients = 1
send(client, 'CLIENT: '+str(address[0]))
inputs.append(client)
self.clientmap[client] = (address,cname)
msg= str('\n(Connected: New client '+str(self.clients)+' from '+ str(self.get_name(client)))
for o in self.outputs:
send(o,msg)
self.outputs.append(client)
elif s == sys.stdin:
junk = sys.stdin.readline()
running = 0
else:
try:
data = receive(s)
if data:
msg = '\n#['+self.get_name(s)+']>>'+data
for o in self.outputs:
if o!=s:
send(o,msg)
else:
print('chatserver: %d hung up' % s.fileno())
self.clients -=1
s.close()
inputs.remove(s)
self.outputs.remove(s)
msg = '\n(Hung up: Client from %s)' %self.get_name(s)
for o in self.outputs:
send(o,msg)
except socket.error as e:
inputs.remove(s)
self.outputs.remove(s)
self.server.close()
if __name__ == '__main__':
ChatServer().serve()
| gpl-2.0 | 6,259,956,968,485,038,000 | 35.252747 | 116 | 0.42528 | false |
pcdummy/socketrpc | socketrpc/__init__.py | 1 | 6060 | # -*- coding: utf-8 -*-
# vim: set et sts=4 sw=4 encoding=utf-8:
#
###############################################################################
#
# This file is part of socketrpc, it is a compat library for shared data
# between twisted_srpc and gevent_srpc
#
# Copyright (C) 2011 Rene Jochum <[email protected]>
#
###############################################################################
__version__ = '0.0.2'
import xmlrpclib
import struct
struct_error = struct.error
# -32768 - 32000 is reserved for RPC errors
# @see: http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php
# Ranges of errors
PARSE_ERROR = xmlrpclib.PARSE_ERROR
SERVER_ERROR = xmlrpclib.SERVER_ERROR
APPLICATION_ERROR = xmlrpclib.APPLICATION_ERROR
#SYSTEM_ERROR = xmlrpclib.SYSTEM_ERROR
TRANSPORT_ERROR = xmlrpclib.TRANSPORT_ERROR
# Specific errors
NOT_WELLFORMED_ERROR = xmlrpclib.NOT_WELLFORMED_ERROR
UNSUPPORTED_ENCODING = xmlrpclib.UNSUPPORTED_ENCODING
#INVALID_ENCODING_CHAR = xmlrpclib.INVALID_ENCODING_CHAR
#INVALID_SRPC = xmlrpclib.INVALID_XMLRPC
METHOD_NOT_FOUND = xmlrpclib.METHOD_NOT_FOUND
#INVALID_METHOD_PARAMS = xmlrpclib.INVALID_METHOD_PARAMS
#INTERNAL_ERROR = xmlrpclib.INTERNAL_ERROR
STATUS_OK = 0
Fault = xmlrpclib.Fault
SUPPORTED_TRANSACTIONS = set(('call', 'reply'))
STRUCT_INT = struct.Struct("!I")
def set_serializer2(predefined=None, encode=None, decode=None, gls=None):
""" Sets the serializer for the gls globals.
set a serializer by:
set_serializer2(<serializer>, gls=globals())
or your own implementation:
ser_serializer(encode=<your encoder>, decode=<your decoder>, gls=globals())
Currently it supports 3 predefined serializers:
<bson> - As fast as "cPickle/2" and secure.
<jsonlib> - Same as "bson" but utilizes a higher network load.
<pickle2> - The fastest but insecure, great to transfer objects internally.
Own serializer notes:
Please make sure to translate the serializers exception to Fault exceptions!
"""
if gls is None:
gls = globals()
if encode and decode:
gls['encode'] = encode
gls['decode'] = decode
elif predefined is not None:
if predefined == 'bson':
import bson
def encode(data):
"""
Encodes data returns a BSON object or
a Fault
"""
try:
return bson.BSON.encode(data)
except bson.errors.InvalidBSON, e:
return Fault(NOT_WELLFORMED_ERROR, 'Invalid BSON Data: %s' % e)
except bson.errors.InvalidDocument, e:
return Fault(NOT_WELLFORMED_ERROR, 'Invalid BSON Data: %s' % e)
except bson.errors.InvalidStringData, e:
return Fault(UNSUPPORTED_ENCODING, 'Non UTF-8 BSON Data: %s' % e)
def decode(data):
"""
A proxy method for BSON.decode
TODO: This will block if a lot data has been received!
"""
try:
return bson.BSON(data).decode()
except bson.errors.InvalidBSON:
return Fault(NOT_WELLFORMED_ERROR, 'Invalid BSON Data')
except bson.errors.InvalidDocument:
return Fault(NOT_WELLFORMED_ERROR, 'Invalid BSON Data')
except bson.errors.InvalidStringData:
return Fault(UNSUPPORTED_ENCODING, 'Non UTF-8 BSON Data')
set_serializer2(encode=encode, decode=decode, gls=gls)
elif predefined == 'jsonlib':
import jsonlib
def encode(data):
"""
Encodes data returns a BSON object or
a Fault
"""
try:
return jsonlib.dumps(data)
except Exception, e:
msg = 'Invalid JSON Data, got: %s:%s' % (e.__class__.__name__, e)
return Fault(NOT_WELLFORMED_ERROR, msg)
def decode(data):
"""
A proxy method for BSON.decode
TODO: This will block if a lot data has been received!
"""
try:
return jsonlib.loads(data)
except Exception, e:
msg = 'Invalid JSON Data, got: %s:%s' % (e.__class__.__name__, e)
return Fault(NOT_WELLFORMED_ERROR, msg)
set_serializer2(encode=encode, decode=decode, gls=gls)
elif predefined == 'pickle2':
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
def encode(data):
"""
Encodes data returns a BSON object or
a Fault
"""
try:
return pickle.dumps(data, 2)
except pickle.PicklingError, e:
msg = 'Invalid pickle Data, got: %s:%s' % (e.__class__.__name__, e)
return Fault(NOT_WELLFORMED_ERROR, msg)
except EOFError, e:
msg = 'Invalid pickle Data, got: %s:%s' % (e.__class__.__name__, e)
return Fault(NOT_WELLFORMED_ERROR, msg)
def decode(data):
"""
A proxy method for BSON.decode
TODO: This will block if a lot data has been received!
"""
try:
return pickle.loads(data)
except pickle.UnpicklingError, e:
msg = 'Invalid pickle Data, got: %s:%s' % (e.__class__.__name__, e)
return Fault(NOT_WELLFORMED_ERROR, msg)
except EOFError, e:
msg = 'Invalid pickle Data, got: %s:%s' % (e.__class__.__name__, e)
return Fault(NOT_WELLFORMED_ERROR, msg)
set_serializer2(encode=encode, decode=decode, gls=gls)
| bsd-3-clause | 8,474,475,311,637,327,000 | 36.407407 | 87 | 0.533993 | false |
mohierf/mod-webui | module/regenerator.py | 1 | 66203 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
# Frederic Mohier, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This file is copied and updated from the Shinken Regenerator
The regenerator is used to build standard objects from the broks raised by the
Broker. This version is made to re-build Shiknen objects from the broks raised
by an Alignak broker.
Some small modifications introduced by Alignak are managed in this class.
"""
import os
import time
import uuid
import traceback
# Import all objects we will need
from shinken.objects.host import Host, Hosts
from shinken.objects.hostgroup import Hostgroup, Hostgroups
from shinken.objects.service import Service, Services
from shinken.objects.servicegroup import Servicegroup, Servicegroups
from shinken.objects.contact import Contact, Contacts
from shinken.objects.contactgroup import Contactgroup, Contactgroups
from shinken.objects.notificationway import NotificationWay, NotificationWays
from shinken.objects.timeperiod import Timeperiod, Timeperiods
from shinken.daterange import Timerange, Daterange
from shinken.objects.command import Command, Commands
from shinken.commandcall import CommandCall
from shinken.objects.config import Config
from shinken.objects.schedulerlink import SchedulerLink, SchedulerLinks
from shinken.objects.reactionnerlink import ReactionnerLink, ReactionnerLinks
from shinken.objects.pollerlink import PollerLink, PollerLinks
from shinken.objects.brokerlink import BrokerLink, BrokerLinks
from shinken.objects.receiverlink import ReceiverLink, ReceiverLinks
ALIGNAK = False
if os.environ.get('ALIGNAK_SHINKEN_UI', None):
if os.environ.get('ALIGNAK_SHINKEN_UI') not in ['0']:
ALIGNAK = True
if ALIGNAK:
from alignak.message import Message
else:
from shinken.message import Message
from shinken.log import logger
# Class for a Regenerator. It will get broks, and "regenerate" real objects
# from them :)
class Regenerator(object):
def __init__(self):
# Our Real datas
self.configs = {}
self.hosts = Hosts([])
self.services = Services([])
self.notificationways = NotificationWays([])
self.contacts = Contacts([])
self.hostgroups = Hostgroups([])
self.servicegroups = Servicegroups([])
self.contactgroups = Contactgroups([])
self.timeperiods = Timeperiods([])
self.commands = Commands([])
# WebUI - Manage notification ways
self.notificationways = NotificationWays([])
self.schedulers = SchedulerLinks([])
self.pollers = PollerLinks([])
self.reactionners = ReactionnerLinks([])
self.brokers = BrokerLinks([])
self.receivers = ReceiverLinks([])
# From now we only look for realms names
self.realms = set()
self.tags = {}
self.services_tags = {}
# And in progress one
self.inp_hosts = {}
self.inp_services = {}
self.inp_hostgroups = {}
self.inp_servicegroups = {}
self.inp_contactgroups = {}
# Do not ask for full data resent too much
self.last_need_data_send = time.time()
# Flag to say if our data came from the scheduler or not
# (so if we skip *initial* broks)
self.in_scheduler_mode = False
# The Queue where to launch message, will be fill from the broker
self.from_q = None
def load_external_queue(self, from_q):
"""Load an external queue for sending messages"""
self.from_q = from_q
def load_from_scheduler(self, sched):
"""If we are called from a scheduler it self, we load the data from it
Note that this is only when the WebUI is declared as a module of a scheduler.
Never seen such a configuration!
"""
# Ok, we are in a scheduler, so we will skip some useless steps
self.in_scheduler_mode = True
logger.warning("Using the WebUI as a module of a scheduler "
"is not recommended because not enough tested! "
"You should declare the WebUI as a module in your master broker.")
# Go with the data creation/load
c = sched.conf
# Simulate a drop conf
b = sched.get_program_status_brok()
b.prepare()
self.manage_program_status_brok(b)
# Now we will lie and directly map our objects :)
logger.debug("Regenerator::load_from_scheduler")
self.hosts = c.hosts
self.services = c.services
self.notificationways = c.notificationways
self.contacts = c.contacts
self.hostgroups = c.hostgroups
self.servicegroups = c.servicegroups
self.contactgroups = c.contactgroups
self.timeperiods = c.timeperiods
self.commands = c.commands
# WebUI - Manage notification ways
self.notificationways = c.notificationways
# We also load the realms
for h in self.hosts:
# WebUI - Manage realms if declared (use realm_name, or realm or default 'All')
self.realms.add(getattr(h, 'realm_name', getattr(h, 'realm', 'All')))
# WebUI - be aware that the realm may be a string or an object
# This will be managed later.
def want_brok(self, brok):
"""If we are in a scheduler mode, some broks are dangerous,
so we will skip them
Note that this is only when the WebUI is declared as a module of a scheduler.
Never seen such a configuration!
"""
if self.in_scheduler_mode:
return brok.type not in ['program_status',
'initial_host_status', 'initial_hostgroup_status',
'initial_service_status', 'initial_servicegroup_status',
'initial_contact_status', 'initial_contactgroup_status',
'initial_timeperiod_status', 'initial_command_status']
# Not in don't want? so want! :)
return True
def manage_brok(self, brok):
""" Look for a manager function for a brok, and call it """
manage = getattr(self, 'manage_' + brok.type + '_brok', None)
# WebUI - do not make a log because Shinken creates a brok per log!
if not manage:
return
# WebUI - Shinken uses id as a brok identifier whereas Alignak uses uuid
# the idea is to make every regenerated object have both identifiers. It
# will make it easier to migrate from Shinken to Alignak objects.
# This is because some broks contain objects and not only dictionaries!
# Shinken uses id as a brok identifier
if getattr(brok, 'id', None):
brok.uuid = brok.id
else:
# whereas Alignak uses uuid!
if getattr(brok, 'uuid', None):
brok.id = brok.uuid
# Same identifier logic for the brok contained data identifier
if brok.data.get('id', None):
brok.data['uuid'] = brok.data['id']
else:
if brok.data.get('uuid', None):
brok.data['id'] = brok.data['uuid']
# No id for the data contained in the brok, force set on identifier.
if brok.data.get('id', None) is None:
brok.data['uuid'] = str(uuid.uuid4())
brok.data['id'] = brok.data['uuid']
logger.debug("Got a brok: %s", brok.type)
try:
# Catch all the broks management exceptions to avoid breaking the module
manage(brok)
except Exception as exp:
logger.error("Exception on brok management: %s", str(exp))
logger.error("Traceback: %s", traceback.format_exc())
logger.error("Brok '%s': %s", brok.type, brok.data)
# pylint: disable=no-self-use
def update_element(self, element, data):
for prop in data:
setattr(element, prop, data[prop])
def _update_realm(self, data):
"""Set and return the realm the daemon is attached to
If no realm_name attribute exist, then use the realm attribute and set as default value All if it is empty
"""
if 'realm_name' not in data:
data['realm_name'] = data.get('realm', None) or 'All'
# Update realms list
self.realms.add(data['realm_name'])
def _update_events(self, element):
"""Update downtimes and comments for an element
"""
# WebUI - manage the different downtimes and comments structures
# We need to rebuild Downtime and Comment relationship with their parent element
if isinstance(element.downtimes, dict):
element.downtimes = element.downtimes.values()
for downtime in element.downtimes:
downtime.ref = element
if getattr(downtime, 'uuid', None) is not None:
downtime.id = downtime.uuid
if isinstance(element.comments, dict):
element.comments = element.comments.values()
for comment in element.comments:
comment.ref = element
if getattr(comment, 'uuid', None) is not None:
comment.id = comment.uuid
comment.persistent = True
# Now we get all data about an instance, link all this stuff :)
def all_done_linking(self, inst_id):
# In a scheduler we are already "linked" so we can skip this
if self.in_scheduler_mode:
logger.debug("Regenerator: We skip the all_done_linking phase because we are in a scheduler")
return
start = time.time()
logger.info("Linking objects together for %s, starting...", inst_id)
# check if the instance is really defined, so got ALL the
# init phase
if inst_id not in self.configs.keys():
logger.warning("Warning: the instance %d is not fully given, bailout", inst_id)
return
# Try to load the in progress list and make them available for
# finding
try:
inp_hosts = self.inp_hosts[inst_id]
inp_hostgroups = self.inp_hostgroups[inst_id]
inp_contactgroups = self.inp_contactgroups[inst_id]
inp_services = self.inp_services[inst_id]
inp_servicegroups = self.inp_servicegroups[inst_id]
except Exception as exp:
logger.error("Warning all done: %s", str(exp))
return
# WebUI - the linkify order in this function is important because of
# the relations that may exist between the objects. The order changed
# because it was more logical to linkify timeperiods and contacts
# stuff before hosts and services
# WebUI - linkiify timeperiods
for tp in self.timeperiods:
new_exclude = []
for ex in tp.exclude:
exname = ex.timeperiod_name
t = self.timeperiods.find_by_name(exname)
if t:
new_exclude.append(t)
else:
logger.warning("Unknown TP %s for TP: %s", exname, tp)
tp.exclude = new_exclude
# WebUI - linkify contacts groups with their contacts
for cg in inp_contactgroups:
logger.debug("Contacts group: %s", cg.get_name())
new_members = []
for (i, cname) in cg.members:
c = self.contacts.find_by_name(cname)
if c:
new_members.append(c)
else:
logger.warning("Unknown contact %s for contactgroup: %s", cname, cg)
cg.members = new_members
# Merge contactgroups with real ones
for group in inp_contactgroups:
logger.debug("Update existing contacts group: %s", group.get_name())
# If the contactgroup already exist, just add the new contacts into it
cg = self.contactgroups.find_by_name(group.get_name())
if cg:
logger.debug("- update members: %s / %s", group.members, group.contactgroup_members)
# Update contacts and contacts groups members
cg.members = group.members
cg.contactgroup_members = group.contactgroup_members
# Copy group identifiers because they will have changed after a restart
cg.id = group.id
cg.uuid = group.uuid
else:
logger.debug("- add a group")
self.contactgroups.add_item(group)
# Merge contactgroups with real ones
for group in self.contactgroups:
# Link with the other groups
new_groups = []
for cgname in group.contactgroup_members:
for cg in self.contactgroups:
if cgname == cg.get_name() or cgname == cg.uuid:
new_groups.append(cg)
logger.debug("Found contactgroup %s", cg.get_name())
break
else:
logger.warning("No contactgroup %s for contactgroup: %s", cgname, group.get_name())
group.contactgroup_members = new_groups
for group in self.contactgroups:
logger.debug("- members: %s / %s", group.members, group.contactgroup_members)
# Linkify hosts groups with their hosts
for hg in inp_hostgroups:
logger.debug("Hosts group: %s", hg.get_name())
new_members = []
for (i, hname) in hg.members:
h = inp_hosts.find_by_name(hname)
if h:
new_members.append(h)
else:
logger.warning("Unknown host %s for hostgroup: %s", hname, hg.get_name())
hg.members = new_members
logger.debug("- group members: %s", hg.members)
# Merge hosts groups with real ones
for group in inp_hostgroups:
logger.debug("Update existing hosts group: %s", group.get_name())
# If the hostgroup already exist, just add the new members and groups into it
hg = self.hostgroups.find_by_name(group.get_name())
if hg:
logger.debug("- update members: %s / %s", group.members, group.hostgroup_members)
# Update hosts and hosts groups members
hg.members = group.members
hg.hostgroup_members = group.hostgroup_members
# Copy group identifiers because they will have changed after a restart
hg.id = group.id
hg.uuid = group.uuid
else:
logger.debug("- add a group")
self.hostgroups.add_item(group)
# Merge hosts groups with real ones
for group in self.hostgroups:
# Link with the other groups
new_groups = []
for hgname in group.hostgroup_members:
for hg in self.hostgroups:
if hgname == hg.get_name() or hgname == hg.uuid:
new_groups.append(hg)
logger.debug("Found hostgroup %s", hg.get_name())
break
else:
logger.warning("No hostgroup %s for hostgroup: %s", hgname, group.get_name())
group.hostgroup_members = new_groups
for group in self.hostgroups:
logger.debug("- members: %s / %s", group.members, group.hostgroup_members)
# Now link hosts with their hosts groups, commands and timeperiods
for h in inp_hosts:
if h.hostgroups:
hgs = h.hostgroups
if not isinstance(hgs, list):
hgs = h.hostgroups.split(',')
new_groups = []
logger.debug("Searching hostgroup for the host %s, hostgroups: %s", h.get_name(), hgs)
for hgname in hgs:
for group in self.hostgroups:
if hgname == group.get_name() or hgname == group.uuid:
new_groups.append(group)
logger.debug("Found hostgroup %s", group.get_name())
break
else:
logger.warning("No hostgroup %s for host: %s", hgname, h.get_name())
h.hostgroups = new_groups
logger.debug("Linked %s hostgroups %s", h.get_name(), h.hostgroups)
# Now link Command() objects
self.linkify_a_command(h, 'check_command')
self.linkify_a_command(h, 'event_handler')
# Now link timeperiods
self.linkify_a_timeperiod_by_name(h, 'notification_period')
self.linkify_a_timeperiod_by_name(h, 'check_period')
# WebUI - todo, check if some other periods are necessary
self.linkify_a_timeperiod_by_name(h, 'maintenance_period')
# And link contacts too
self.linkify_contacts(h, 'contacts')
logger.debug("Host %s has contacts: %s", h.get_name(), h.contacts)
# Linkify tags
for t in h.tags:
if t not in self.tags:
self.tags[t] = 0
self.tags[t] += 1
# We can really declare this host OK now
old_h = self.hosts.find_by_name(h.get_name())
if old_h is not None:
self.hosts.remove_item(old_h)
self.hosts.add_item(h)
# Linkify services groups with their services
for sg in inp_servicegroups:
logger.debug("Services group: %s", sg.get_name())
new_members = []
for (i, sname) in sg.members:
if i not in inp_services:
logger.warning("Unknown service %s for services group: %s", sname, sg)
else:
new_members.append(inp_services[i])
sg.members = new_members
logger.debug("- group members: %s", sg.members)
# Merge services groups with real ones
for group in inp_servicegroups:
logger.debug("Update existing services group: %s", group.get_name())
# If the servicegroup already exist, just add the new services into it
sg = self.servicegroups.find_by_name(group.get_name())
if sg:
logger.debug("- update members: %s / %s", group.members, group.servicegroup_members)
# Update services and services groups members
sg.members = group.members
sg.servicegroup_members = group.servicegroup_members
# Copy group identifiers because they will have changed after a restart
sg.id = group.id
sg.uuid = group.uuid
else:
logger.debug("- add a group")
self.servicegroups.add_item(group)
# Merge services groups with real ones
for group in self.servicegroups:
# Link with the other groups
new_groups = []
for sgname in group.servicegroup_members:
for sg in self.servicegroups:
if sgname == sg.get_name() or sgname == sg.uuid:
new_groups.append(sg)
logger.debug("Found servicegroup %s", sg.get_name())
break
else:
logger.warning("No servicegroup %s for servicegroup: %s", sgname, group.get_name())
group.servicegroup_members = new_groups
for group in self.servicegroups:
logger.debug("- members: %s / %s", group.members, group.servicegroup_members)
# Now link services with hosts, servicesgroups, commands and timeperiods
for s in inp_services:
if s.servicegroups:
sgs = s.servicegroups
if not isinstance(sgs, list):
sgs = s.servicegroups.split(',')
new_groups = []
logger.debug("Searching servicegroup for the service %s, servicegroups: %s", s.get_full_name(), sgs)
for sgname in sgs:
for group in self.servicegroups:
if sgname == group.get_name() or sgname == group.uuid:
new_groups.append(group)
logger.debug("Found servicegroup %s", group.get_name())
break
else:
logger.warning("No servicegroup %s for service: %s", sgname, s.get_full_name())
s.servicegroups = new_groups
logger.debug("Linked %s servicegroups %s", s.get_full_name(), s.servicegroups)
# Now link with host
hname = s.host_name
s.host = self.hosts.find_by_name(hname)
if s.host:
old_s = s.host.find_service_by_name(s.service_description)
if old_s is not None:
s.host.services.remove(old_s)
s.host.services.append(s)
else:
logger.warning("No host %s for service: %s", hname, s)
# Now link Command() objects
self.linkify_a_command(s, 'check_command')
self.linkify_a_command(s, 'event_handler')
# Now link timeperiods
self.linkify_a_timeperiod_by_name(s, 'notification_period')
self.linkify_a_timeperiod_by_name(s, 'check_period')
self.linkify_a_timeperiod_by_name(s, 'maintenance_period')
# And link contacts too
self.linkify_contacts(s, 'contacts')
logger.debug("Service %s has contacts: %s", s.get_full_name(), s.contacts)
# Linkify services tags
for t in s.tags:
if t not in self.services_tags:
self.services_tags[t] = 0
self.services_tags[t] += 1
# We can really declare this service OK now
self.services.add_item(s, index=True)
# Add realm of the hosts
for h in inp_hosts:
# WebUI - Manage realms if declared (Alignak)
if getattr(h, 'realm_name', None):
self.realms.add(h.realm_name)
else:
# WebUI - Manage realms if declared (Shinken)
if getattr(h, 'realm', None):
self.realms.add(h.realm)
# Now we can link all impacts/source problem list
# but only for the new ones here of course
for h in inp_hosts:
self.linkify_dict_srv_and_hosts(h, 'impacts')
self.linkify_dict_srv_and_hosts(h, 'source_problems')
# todo: refactor this part for Alignak - to be tested.
# self.linkify_host_and_hosts(h, 'parent_dependencies')
# self.linkify_host_and_hosts(h, 'child_dependencies')
self.linkify_host_and_hosts(h, 'parents')
self.linkify_host_and_hosts(h, 'childs')
self.linkify_dict_srv_and_hosts(h, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(h, 'child_dependencies')
# Now services too
for s in inp_services:
self.linkify_dict_srv_and_hosts(s, 'impacts')
self.linkify_dict_srv_and_hosts(s, 'source_problems')
# todo: refactor this part for Alignak - to be tested.
# self.linkify_service_and_services(s, 'parent_dependencies')
# self.linkify_service_and_services(s, 'child_dependencies')
self.linkify_dict_srv_and_hosts(s, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(s, 'child_dependencies')
# clean old objects
del self.inp_hosts[inst_id]
del self.inp_hostgroups[inst_id]
del self.inp_contactgroups[inst_id]
del self.inp_services[inst_id]
del self.inp_servicegroups[inst_id]
for item_type in ['realm', 'timeperiod', 'command',
'contact', 'host', 'service',
'contactgroup', 'hostgroup', 'servicegroup']:
logger.info("Got %d %ss", len(getattr(self, "%ss" % item_type, [])), item_type)
for item in getattr(self, "%ss" % item_type):
logger.debug("- %s", item)
logger.info("Linking objects together, end. Duration: %s", time.time() - start)
def linkify_a_command(self, o, prop):
"""We look for o.prop (CommandCall) and we link the inner
Command() object with our real ones"""
logger.debug("Linkify a command: %s", prop)
cc = getattr(o, prop, None)
if not cc:
setattr(o, prop, None)
return
# WebUI - the command must has different representation
# (a simple name, an object or a simple identifier)
cmdname = cc
if isinstance(cc, CommandCall):
cmdname = cc.command
cc.command = self.commands.find_by_name(cmdname)
logger.debug("- %s = %s", prop, cc.command.get_name() if cc.command else 'None')
def linkify_commands(self, o, prop):
"""We look at o.prop and for each command we relink it"""
logger.debug("Linkify commands: %s", prop)
v = getattr(o, prop, None)
if not v:
# If do not have a command list, put a void list instead
setattr(o, prop, [])
return
for cc in v:
# WebUI - the command must has different representation
# (a simple name, an object or a simple identifier)
cmdname = cc
if hasattr(cc, 'command'):
cmdname = cc.command
if hasattr(cmdname, 'uuid') and cmdname.uuid in self.commands:
cc.command = self.commands[cmdname.uuid]
else:
cc.command = self.commands.find_by_name(cmdname)
logger.debug("- %s = %s", prop, cc.command.get_name() if cc.command else 'None')
def linkify_a_timeperiod(self, o, prop):
"""We look at the timeperiod() object of o.property
and we replace it with our true one"""
t = getattr(o, prop, None)
if not t:
setattr(o, prop, None)
return
if isinstance(t, Timeperiod):
logger.debug("- already linkified to an object")
return
logger.debug("Linkify a timeperiod: %s, found: %s", prop, type(t))
logger.debug("Linkify a timeperiod: %s, found: %s", prop, t)
for tp in self.timeperiods:
if t == tp.get_name() or t == tp.uuid:
setattr(o, prop, tp)
break
else:
logger.warning("Timeperiod not linkified: %s / %s !", type(t), t)
def linkify_a_timeperiod_by_name(self, o, prop):
"""same than before, but the value is a string here"""
tpname = getattr(o, prop, None)
if not tpname:
setattr(o, prop, None)
return
tp = self.timeperiods.find_by_name(tpname)
setattr(o, prop, tp)
def linkify_contacts(self, o, prop):
"""We look at o.prop and for each contacts in it,
we replace it with true object in self.contacts"""
v = getattr(o, prop, None)
if not v:
return
new_v = []
for cname in v:
c = self.contacts.find_by_name(cname)
if c:
new_v.append(c)
else:
# WebUI - search contact by id because we did not found by name
for contact in self.contacts:
if cname == contact.uuid:
new_v.append(contact)
break
setattr(o, prop, new_v)
def linkify_dict_srv_and_hosts(self, o, prop):
"""We got a service/host dict, we want to get back to a flat list"""
v = getattr(o, prop, None)
if not v:
setattr(o, prop, [])
return
logger.debug("Linkify Dict Srv/Host for %s - %s = %s", o.get_name(), prop, v)
new_v = []
if 'hosts' not in v or 'services' not in v:
# WebUI - Alignak do not use the same structure as Shinken
for id in v:
for host in self.hosts:
if id == host.id:
new_v.append(host)
break
else:
for service in self.services:
if id == service.id:
new_v.append(service)
break
else:
# WebUI - plain old Shinken structure
for name in v['services']:
elts = name.split('/')
hname = elts[0]
sdesc = elts[1]
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if s:
new_v.append(s)
for hname in v['hosts']:
h = self.hosts.find_by_name(hname)
if h:
new_v.append(h)
setattr(o, prop, new_v)
def linkify_host_and_hosts(self, o, prop):
v = getattr(o, prop)
if not v:
setattr(o, prop, [])
return
logger.debug("Linkify host>hosts for %s - %s = %s", o.get_name(), prop, v)
new_v = []
for hname in v:
h = self.hosts.find_by_name(hname)
if h:
new_v.append(h)
else:
# WebUI - we did not found by name, let's try with an identifier
for host in self.hosts:
if hname == host.uuid:
new_v.append(host)
break
setattr(o, prop, new_v)
def linkify_service_and_services(self, o, prop):
"""TODO confirm this function is useful !"""
v = getattr(o, prop)
if not v:
setattr(o, prop, [])
return
logger.debug("Linkify service>services for %s - %s = %s", o.get_name(), prop, v)
new_v = []
for sdesc in v:
s = self.services.find_by_name(sdesc)
if s:
new_v.append(s)
else:
for service in self.services:
if sdesc == service.uuid:
new_v.append(service)
break
setattr(o, prop, new_v)
###############
# Brok management part
###############
def before_after_hook(self, brok, obj):
"""
This can be used by derived classes to compare the data in the brok
with the object which will be updated by these data. For example,
it is possible to find out in this method whether the state of a
host or service has changed.
"""
pass
#######
# INITIAL PART
#######
def manage_program_status_brok(self, b):
"""A scheduler provides its initial status
Shinken brok contains:
data = {"is_running": 1,
"instance_id": self.instance_id,
"instance_name": self.instance_name,
"last_alive": now,
"interval_length": self.conf.interval_length,
"program_start": self.program_start,
"pid": os.getpid(),
"daemon_mode": 1,
"last_command_check": now,
"last_log_rotation": now,
"notifications_enabled": self.conf.enable_notifications,
"active_service_checks_enabled": self.conf.execute_service_checks,
"passive_service_checks_enabled": self.conf.accept_passive_service_checks,
"active_host_checks_enabled": self.conf.execute_host_checks,
"passive_host_checks_enabled": self.conf.accept_passive_host_checks,
"event_handlers_enabled": self.conf.enable_event_handlers,
"flap_detection_enabled": self.conf.enable_flap_detection,
"failure_prediction_enabled": 0,
"process_performance_data": self.conf.process_performance_data,
"obsess_over_hosts": self.conf.obsess_over_hosts,
"obsess_over_services": self.conf.obsess_over_services,
"modified_host_attributes": 0,
"modified_service_attributes": 0,
"global_host_event_handler": self.conf.global_host_event_handler,
'global_service_event_handler': self.conf.global_service_event_handler,
'check_external_commands': self.conf.check_external_commands,
'check_service_freshness': self.conf.check_service_freshness,
'check_host_freshness': self.conf.check_host_freshness,
'command_file': self.conf.command_file
}
Note that some parameters values are hard-coded and useless ... and some configuration
parameters are missing!
Alignak brok contains many more information:
_config: all the more interesting configuration parameters
are pushed in the program status brok sent by each scheduler. At minimum, the UI will receive
all the framework configuration parameters.
_running: all the running scheduler information: checks count, results, live synthesis
_macros: the configure Alignak macros and their value
"""
data = b.data
c_id = data['instance_id']
c_name = data.get('instance_name', c_id)
logger.info("Got a configuration from %s", c_name)
logger.debug("Data: %s", data)
now = time.time()
if c_id in self.configs:
# WebUI - it may happen that the same scheduler sends several times its initial status brok.
# Let's manage this and only consider one brok per minute!
# We already have a configuration for this scheduler instance
if now - self.configs[c_id]['_timestamp'] < 60:
logger.info("Got near initial program status for %s. Ignoring this information.", c_name)
return
# Clean all in_progress things.
# And in progress one
self.inp_hosts[c_id] = Hosts([])
self.inp_services[c_id] = Services([])
self.inp_hostgroups[c_id] = Hostgroups([])
self.inp_servicegroups[c_id] = Servicegroups([])
self.inp_contactgroups[c_id] = Contactgroups([])
# And we save the data in the configurations
data['_timestamp'] = now
self.configs[c_id] = data
# We should clean all previously added hosts and services
logger.debug("Cleaning hosts/service of %s", c_id)
to_del_h = [h for h in self.hosts if h.instance_id == c_id]
to_del_srv = [s for s in self.services if s.instance_id == c_id]
if to_del_h:
# Clean hosts from hosts and hostgroups
logger.info("Cleaning %d hosts", len(to_del_h))
for h in to_del_h:
self.hosts.remove_item(h)
# Exclude from all hostgroups members the hosts of this scheduler instance
for hg in self.hostgroups:
logger.debug("Cleaning hostgroup %s: %d members", hg.get_name(), len(hg.members))
try:
# hg.members = [h for h in hg.members if h.instance_id != c_id]
hg.members = []
for h in hg.members:
if h.instance_id != c_id:
hg.members.append(h)
else:
logger.debug("- removing host: %s", h)
except Exception as exp:
logger.error("Exception when cleaning hostgroup: %s", str(exp))
logger.debug("hostgroup members count after cleaning: %d members", len(hg.members))
if to_del_srv:
# Clean services from services and servicegroups
logger.debug("Cleaning %d services", len(to_del_srv))
for s in to_del_srv:
self.services.remove_item(s)
# Exclude from all servicegroups members the services of this scheduler instance
for sg in self.servicegroups:
logger.debug("Cleaning servicegroup %s: %d members", sg.get_name(), len(sg.members))
try:
# sg.members = [s for s in sg.members if s.instance_id != c_id]
sg.members = []
for s in sg.members:
if s.instance_id != c_id:
sg.members.append(s)
else:
logger.debug("- removing service: %s", s)
except Exception as exp:
logger.error("Exception when cleaning servicegroup: %s", str(exp))
logger.debug("- members count after cleaning: %d members", len(sg.members))
def manage_initial_host_status_brok(self, b):
"""Got a new host"""
data = b.data
hname = data['host_name']
inst_id = data['instance_id']
# Try to get the in progress Hosts
try:
inp_hosts = self.inp_hosts[inst_id]
except Exception as exp:
logger.error("[Regenerator] initial_host_status:: Not good! %s", str(exp))
return
logger.debug("Creating a host: %s - %s from scheduler %s", data['id'], hname, inst_id)
logger.debug("Creating a host: %s ", data)
host = Host({})
self.update_element(host, data)
# Update downtimes/comments
self._update_events(host)
# Ok, put in in the in progress hosts
inp_hosts[host.id] = host
def manage_initial_hostgroup_status_brok(self, b):
"""Got a new hosts group"""
data = b.data
hgname = data['hostgroup_name']
inst_id = data['instance_id']
# Try to get the in progress Hostgroups
try:
inp_hostgroups = self.inp_hostgroups[inst_id]
except Exception as exp:
logger.error("[Regenerator] initial_hostgroup_status:: Not good! %s", str(exp))
return
logger.debug("Creating a hostgroup: %s from scheduler %s", hgname, inst_id)
logger.debug("Creating a hostgroup: %s ", data)
# With void members
hg = Hostgroup([])
# populate data
self.update_element(hg, data)
# We will link hosts into hostgroups later
# so now only save it
inp_hostgroups[hg.id] = hg
members = getattr(hg, 'members', [])
if not isinstance(members, list):
members = members.split(',')
hg.members = members
logger.debug("- hostgroup host members: %s", hg.members)
# It looks like Shinken do not provide sub groups this information!
sub_groups = getattr(hg, 'hostgroup_members', [])
if not isinstance(sub_groups, list):
sub_groups = sub_groups.split(',')
sub_groups = [] if (sub_groups and not sub_groups[0]) else [g.strip() for g in sub_groups]
hg.hostgroup_members = sub_groups
logger.debug("- hostgroup group members: %s", hg.hostgroup_members)
def manage_initial_service_status_brok(self, b):
"""Got a new service"""
data = b.data
hname = data['host_name']
sdesc = data['service_description']
inst_id = data['instance_id']
# Try to get the in progress Hosts
try:
inp_services = self.inp_services[inst_id]
except Exception as exp:
logger.error("[Regenerator] host_check_result Not good! %s", str(exp))
return
logger.debug("Creating a service: %s - %s/%s from scheduler%s", data['id'], hname, sdesc, inst_id)
logger.debug("Creating a service: %s ", data)
if isinstance(data['display_name'], list):
data['display_name'] = data['service_description']
service = Service({})
self.update_element(service, data)
# Update downtimes/comments
self._update_events(service)
# Ok, put in in the in progress hosts
inp_services[service.id] = service
def manage_initial_servicegroup_status_brok(self, b):
"""Got a new services group"""
data = b.data
sgname = data['servicegroup_name']
inst_id = data['instance_id']
# Try to get the in progress Hostgroups
try:
inp_servicegroups = self.inp_servicegroups[inst_id]
except Exception as exp:
logger.error("[Regenerator] manage_initial_servicegroup_status_brok:: Not good! %s", str(exp))
return
logger.debug("Creating a servicegroup: %s from scheduler%s", sgname, inst_id)
logger.debug("Creating a servicegroup: %s ", data)
# With void members
sg = Servicegroup([])
# populate data
self.update_element(sg, data)
# We will link hosts into hostgroups later
# so now only save it
inp_servicegroups[sg.id] = sg
members = getattr(sg, 'members', [])
if not isinstance(members, list):
members = members.split(',')
sg.members = members
logger.debug("- servicegroup service members: %s", sg.members)
# It looks like Shinken do not provide sub groups this information!
sub_groups = getattr(sg, 'servicegroup_members', [])
if not isinstance(sub_groups, list):
sub_groups = sub_groups.split(',')
sub_groups = [] if (sub_groups and not sub_groups[0]) else [g.strip() for g in sub_groups]
sg.servicegroup_members = sub_groups
logger.debug("- servicegroup group members: %s", sg.servicegroup_members)
def manage_initial_contact_status_brok(self, b):
"""
For Contacts, it's a global value, so 2 cases:
We already got it from another scheduler instance -> we update it
We don't -> we create it
In both cases we need to relink it
"""
data = b.data
cname = data['contact_name']
inst_id = data['instance_id']
logger.debug("Creating a contact: %s from scheduler %s", cname, inst_id)
logger.debug("Creating a contact: %s", data)
c = self.contacts.find_by_name(cname)
if c:
self.update_element(c, data)
else:
c = Contact({})
self.update_element(c, data)
self.contacts.add_item(c)
# Delete some useless contact values
# WebUI - todo, perharps we should not nullify these values!
del c.host_notification_commands
del c.service_notification_commands
del c.host_notification_period
del c.service_notification_period
# Now manage notification ways too
# Same than for contacts. We create or
# update
nws = c.notificationways
if nws and not isinstance(nws, list):
logger.error("[WebUI] Contact %s, bad formed notification ways, ignoring!", c.get_name())
return
if nws and not isinstance(nws[0], NotificationWay):
# Alignak sends notification ways as dictionaries
new_notifways = []
for nw_uuid in nws:
nw = None
for nw in self.notificationways:
if nw_uuid == nw.get_name() or nw_uuid == nw.uuid:
break
else:
logger.warning("[WebUI] Contact %s has an unknown NW: %s", c.get_name(), nws)
continue
logger.debug("[WebUI] Contact %s, found the NW: %s", c.get_name(), nw.__dict__)
# Linking the notification way with commands
self.linkify_commands(nw, 'host_notification_commands')
self.linkify_commands(nw, 'service_notification_commands')
# Now link timeperiods
self.linkify_a_timeperiod(nw, 'host_notification_period')
self.linkify_a_timeperiod(nw, 'service_notification_period')
new_notifways.append(nw)
c.notificationways = new_notifways
else:
# Shinken old way...
new_notifways = []
for cnw in nws:
nwname = cnw.get_name()
logger.debug("- notification way: %s", nwname)
nw = self.notificationways.find_by_name(nwname)
if nw:
# Update it...
for prop in NotificationWay.properties:
if hasattr(cnw, prop):
setattr(nw, prop, getattr(cnw, prop))
else:
self.notificationways.add_item(cnw)
nw = self.notificationways.find_by_name(nwname)
# Linking the notification way with commands
self.linkify_commands(nw, 'host_notification_commands')
self.linkify_commands(nw, 'service_notification_commands')
# Now link timeperiods
self.linkify_a_timeperiod(nw, 'host_notification_period')
self.linkify_a_timeperiod(nw, 'service_notification_period')
new_notifways.append(nw)
c.notificationways = new_notifways
def manage_initial_contactgroup_status_brok(self, b):
"""Got a new contacts group"""
data = b.data
cgname = data['contactgroup_name']
inst_id = data['instance_id']
# Try to get the in progress Contactgroups
try:
inp_contactgroups = self.inp_contactgroups[inst_id]
except Exception as exp:
logger.error("[Regenerator] manage_initial_contactgroup_status_brok Not good! %s", str(exp))
return
logger.debug("Creating a contactgroup: %s from scheduler%s", cgname, inst_id)
logger.debug("Creating a contactgroup: %s", data)
# With void members
cg = Contactgroup([])
# populate data
self.update_element(cg, data)
# We will link contacts into contactgroups later
# so now only save it
inp_contactgroups[cg.id] = cg
members = getattr(cg, 'members', [])
if not isinstance(members, list):
members = members.split(',')
cg.members = members
logger.debug("- contactgroup contact members: %s", cg.members)
sub_groups = getattr(cg, 'contactgroup_members', [])
if not isinstance(sub_groups, list):
sub_groups = sub_groups.split(',')
sub_groups = [] if (sub_groups and not sub_groups[0]) else [g.strip() for g in sub_groups]
cg.contactgroup_members = sub_groups
logger.debug("- contactgroup group members: %s", cg.contactgroup_members)
def manage_initial_timeperiod_status_brok(self, b):
"""
For Timeperiods we got 2 cases: do we already got it or not.
if got: just update it
if not: create it and declare it in our main timeperiods
"""
data = b.data
tpname = data['timeperiod_name']
inst_id = data['instance_id']
logger.debug("Creating a timeperiod: %s from scheduler %s", tpname, inst_id)
logger.debug("Creating a timeperiod: %s ", data)
tp = self.timeperiods.find_by_name(tpname)
if tp:
self.update_element(tp, data)
else:
tp = Timeperiod({})
self.update_element(tp, data)
self.timeperiods.add_item(tp)
# Alignak do not keep the Timerange objects and serializes as dict...
# so we must restore Timeranges from the dictionary
logger.debug("Timeperiod: %s", tp)
# WebUI - try to manage time periods correctly!
# Alignak :
# - date range: <class 'alignak.daterange.MonthWeekDayDaterange'>
# - time range: <type 'dict'>
# Shinken :
# - date range: <class 'shinken.daterange.MonthWeekDayDaterange'>
# - time range: <class 'shinken.daterange.Timerange'>
# Transform some inner items
new_drs = []
for dr in tp.dateranges:
new_dr = dr
# new_dr = Daterange(dr.syear, dr.smon, dr.smday, dr.swday, dr.swday_offset,
# dr.eyear, dr.emon, dr.emday, dr.ewday, dr.ewday_offset,
# dr.skip_interval, dr.other)
logger.debug("- date range: %s (%s)", type(dr), dr.__dict__)
# logger.warning("- date range: %s (%s)", type(new_dr), new_dr.__dict__)
new_trs = []
for tr in dr.timeranges:
# Time range may be a dictionary or an object
logger.debug(" time range: %s - %s", type(tr), tr)
try:
# Dictionary for Alignak
entry = "%02d:%02d-%02d:%02d" % (tr['hstart'], tr['mstart'], tr['hend'], tr['mend'])
except TypeError:
# Object for Shinken
entry = "%02d:%02d-%02d:%02d" % (tr.hstart, tr.mstart, tr.hend, tr.mend)
logger.debug(" time range: %s", entry)
new_trs.append(Timerange(entry))
new_dr.timeranges = new_trs
logger.debug("- date range: %s", dr.__dict__)
new_drs.append(new_dr)
tp.dateranges = new_drs
def manage_initial_command_status_brok(self, b):
"""
For command we got 2 cases: do we already got it or not.
if got: just update it
if not: create it and declare it in our main commands
"""
data = b.data
cname = data['command_name']
inst_id = data['instance_id']
logger.debug("Creating a command: %s from scheduler %s", cname, inst_id)
logger.debug("Creating a command: %s ", data)
c = self.commands.find_by_name(cname)
if c:
self.update_element(c, data)
else:
c = Command({})
self.update_element(c, data)
self.commands.add_item(c)
def manage_initial_notificationway_status_brok(self, b):
"""
For notification ways we got 2 cases: do we already got it or not.
if got: just update it
if not: create it and declare it in our main commands
"""
data = b.data
nw_name = data['notificationway_name']
inst_id = data['instance_id']
logger.debug("Creating a notification way: %s from scheduler %s", nw_name, inst_id)
logger.debug("Creating a notification way: %s ", data)
nw = self.notificationways.find_by_name(nw_name)
if nw:
logger.debug("- updating a notification way: %s from scheduler %s", nw_name, inst_id)
self.update_element(nw, data)
else:
nw = NotificationWay({})
self.update_element(nw, data)
self.notificationways.add_item(nw)
# Linking the notification way with commands
self.linkify_commands(nw, 'host_notification_commands')
self.linkify_commands(nw, 'service_notification_commands')
# Now link timeperiods
self.linkify_a_timeperiod(nw, 'host_notification_period')
self.linkify_a_timeperiod(nw, 'service_notification_period')
logger.debug("Created: %s ", nw.get_name())
def manage_initial_scheduler_status_brok(self, b):
"""Got a scheduler status"""
data = b.data
sched = SchedulerLink({})
self._update_realm(data)
self.update_element(sched, data)
self.schedulers[data['scheduler_name']] = sched
def manage_initial_poller_status_brok(self, b):
"""Got a poller status"""
data = b.data
poller = PollerLink({})
self._update_realm(data)
self.update_element(poller, data)
self.pollers[data['poller_name']] = poller
def manage_initial_reactionner_status_brok(self, b):
"""Got a reactionner status"""
data = b.data
reac = ReactionnerLink({})
self._update_realm(data)
self.update_element(reac, data)
self.reactionners[data['reactionner_name']] = reac
def manage_initial_broker_status_brok(self, b):
"""Got a broker status"""
data = b.data
broker = BrokerLink({})
self._update_realm(data)
self.update_element(broker, data)
self.brokers[data['broker_name']] = broker
def manage_initial_receiver_status_brok(self, b):
"""Got a receiver status"""
data = b.data
receiver = ReceiverLink({})
self._update_realm(data)
self.update_element(receiver, data)
self.receivers[data['receiver_name']] = receiver
def manage_initial_broks_done_brok(self, b):
"""This brok is here when the WHOLE initial phase is done.
It is the last brok sent by the scheduler.
So we got all data, we can link all together :)"""
inst_id = b.data['instance_id']
self.all_done_linking(inst_id)
#################
# Status Update part
#################
def manage_update_program_status_brok(self, b):
"""Each scheduler sends us a "I'm alive" brok.
If we never heard about this one, we got some problem and we ask him some initial data :)
"""
data = b.data
c_id = data['instance_id']
c_name = data.get('instance_name', c_id)
logger.debug("Got a scheduler update status from %s", c_name)
logger.debug("Data: %s", data)
# If we got an update about an unknown instance, cry and ask for a full version!
# Checked that Alignak will also provide information if it gets such a message...
if c_id not in self.configs.keys():
# Do not ask data too quickly, very dangerous
# one a minute
if time.time() - self.last_need_data_send > 60 and self.from_q is not None:
logger.debug("I ask the broker for instance id data: %s", c_id)
if ALIGNAK:
msg = Message(_type='NeedData', data={'full_instance_id': c_id}, source='WebUI')
else:
msg = Message(id=0, type='NeedData', data={'full_instance_id': c_id}, source='WebUI')
self.from_q.put(msg)
self.last_need_data_send = time.time()
return
# Tag with the update time and store the configuration
data['_timestamp'] = time.time()
self.configs[c_id].update(data)
def manage_update_host_status_brok(self, b):
"""Got an host update
Something changed in the host configuration"""
data = b.data
hname = data['host_name']
host = self.hosts.find_by_name(hname)
if not host:
return
# There are some properties that should not change and are already linked
# so just remove them
clean_prop = ['uuid', 'check_command', 'hostgroups',
'contacts', 'notification_period', 'contact_groups',
'check_period', 'event_handler',
'maintenance_period', 'realm', 'customs', 'escalations']
# some are only use when a topology change happened
toplogy_change = b.data['topology_change']
if not toplogy_change:
# No childs property in Alignak hosts
if ALIGNAK:
clean_prop.extend(['parents', 'child_dependencies', 'parent_dependencies'])
else:
clean_prop.extend(['childs', 'parents', 'child_dependencies', 'parent_dependencies'])
for prop in clean_prop:
del data[prop]
logger.debug("Updated host: %s", hname)
self.before_after_hook(b, host)
self.update_element(host, data)
# We can have some change in our impacts and source problems.
self.linkify_dict_srv_and_hosts(host, 'impacts')
self.linkify_dict_srv_and_hosts(host, 'source_problems')
# If the topology change, update it
if toplogy_change:
logger.debug("Topology change for %s %s", host.get_name(), host.parent_dependencies)
self.linkify_host_and_hosts(host, 'parents')
self.linkify_host_and_hosts(host, 'childs')
self.linkify_dict_srv_and_hosts(host, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(host, 'child_dependencies')
# Update downtimes/comments
self._update_events(host)
def manage_update_service_status_brok(self, b):
"""Got a service update
Something changed in the service configuration"""
# There are some properties that should not change and are already linked
# so just remove them
clean_prop = ['uuid', 'check_command', 'servicegroups',
'contacts', 'notification_period', 'contact_groups',
'check_period', 'event_handler',
'maintenance_period', 'customs', 'escalations']
# some are only use when a topology change happened
toplogy_change = b.data['topology_change']
if not toplogy_change:
clean_prop.extend(['child_dependencies', 'parent_dependencies'])
data = b.data
for prop in clean_prop:
del data[prop]
hname = data['host_name']
sdesc = data['service_description']
service = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if not service:
return
logger.debug("Updated service: %s/%s", hname, sdesc)
self.before_after_hook(b, service)
self.update_element(service, data)
# We can have some change in our impacts and source problems.
self.linkify_dict_srv_and_hosts(service, 'impacts')
self.linkify_dict_srv_and_hosts(service, 'source_problems')
# If the topology change, update it
if toplogy_change:
self.linkify_dict_srv_and_hosts(service, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(service, 'child_dependencies')
# Update downtimes/comments
self._update_events(service)
def _update_satellite_status(self, sat_list, sat_name, data):
"""Update a satellite status"""
logger.debug("Update satellite '%s' status: %s", sat_name, data)
try:
# Get the satellite object
s = sat_list[sat_name]
# Update its realm
self._update_realm(data)
# Update its properties
self.update_element(s, data)
except KeyError:
# Not yet known
pass
except Exception as exp:
logger.warning("Failed updating %s satellite status: %s", sat_name, exp)
def manage_update_broker_status_brok(self, b):
"""Got a broker status update"""
self._update_satellite_status(self.brokers, b.data['broker_name'], b.data)
def manage_update_receiver_status_brok(self, b):
"""Got a receiver status update"""
self._update_satellite_status(self.receivers, b.data['receiver_name'], b.data)
def manage_update_reactionner_status_brok(self, b):
"""Got a reactionner status update"""
self._update_satellite_status(self.reactionners, b.data['reactionner_name'], b.data)
def manage_update_poller_status_brok(self, b):
"""Got a poller status update"""
self._update_satellite_status(self.pollers, b.data['poller_name'], b.data)
def manage_update_scheduler_status_brok(self, b):
"""Got a scheduler status update"""
self._update_satellite_status(self.schedulers, b.data['scheduler_name'], b.data)
#################
# Check result and schedule part
#################
def manage_host_check_result_brok(self, b):
"""This brok contains the result of an host check"""
data = b.data
hname = data['host_name']
h = self.hosts.find_by_name(hname)
if not h:
logger.warning("Got a check result brok for an unknown host: %s", hname)
return
logger.debug("Host check result: %s - %s (%s)", hname, h.state, h.state_type)
self.before_after_hook(b, h)
# Remove identifiers if they exist in the data - it happens that the
# identifier is changing on a configuration reload!
if 'id' in data:
data.pop('id')
if 'uuid' in data:
data.pop('uuid')
self.update_element(h, data)
def manage_host_next_schedule_brok(self, b):
"""This brok should arrive within a second after the host_check_result_brok.
It contains information about the next scheduled host check"""
self.manage_host_check_result_brok(b)
def manage_service_check_result_brok(self, b):
"""A service check have just arrived, we UPDATE data info with this"""
data = b.data
hname = data['host_name']
sdesc = data['service_description']
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if not s:
logger.warning("Got a check result brok for an unknown service: %s/%s", hname, sdesc)
return
logger.debug("Service check result: %s/%s - %s (%s)", hname, sdesc, s.state, s.state_type)
self.before_after_hook(b, s)
# Remove identifiers if they exist in the data - it happens that the
# identifier is changing on a configuration reload!
if 'id' in data:
data.pop('id')
if 'uuid' in data:
data.pop('uuid')
self.update_element(s, data)
def manage_service_next_schedule_brok(self, b):
"""This brok should arrive within a second after the service_check_result_brok.
It contains information about the next scheduled service check"""
self.manage_service_check_result_brok(b)
#################
# Acknowledge / downtime part
# ---
# Alignak raises broks for acknowledges and downtimes
#################
def manage_acknowledge_raise_brok(self, b):
"""An acknowledge has been set on an item"""
data = b.data
hname = data.get('host_name', data.get('host', None))
if hname:
h = self.hosts.find_by_name(hname)
if not h:
logger.warning("Got a acknowledge raise brok for an unknown host: %s", hname)
return
sdesc = data.get('service_description', data.get('service', None))
if sdesc:
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if not s:
logger.warning("Got a acknowledge raise brok for an unknown service: %s/%s", hname, sdesc)
return
logger.info("Acknowledge set: %s/%s - %s", hname, sdesc, s.state)
else:
logger.info("Acknowledge set: %s - %s", hname, h.state)
def manage_acknowledge_expire_brok(self, b):
"""An acknowledge has been set on an item"""
data = b.data
hname = data.get('host_name', data.get('host', None))
if hname:
h = self.hosts.find_by_name(hname)
if not h:
logger.warning("Got a acknowledge raise brok for an unknown host: %s", hname)
return
sdesc = data.get('service_description', data.get('service', None))
if sdesc:
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if not s:
logger.warning("Got a acknowledge raise brok for an unknown service: %s/%s", hname, sdesc)
return
logger.info("Acknowledge expired: %s/%s - %s", hname, sdesc, s.state)
else:
logger.info("Acknowledge expired: %s - %s", hname, h.state)
def manage_downtime_raise_brok(self, b):
"""A downtime has been set on an item"""
data = b.data
hname = data.get('host_name', data.get('host', None))
if hname:
h = self.hosts.find_by_name(hname)
if not h:
logger.warning("Got a downtime raise brok for an unknown host: %s", hname)
return
sdesc = data.get('service_description', data.get('service', None))
if sdesc:
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if not s:
logger.warning("Got a downtime raise brok for an unknown service: %s/%s", hname, sdesc)
return
logger.info("Downtime set: %s/%s - %s", hname, sdesc, s.state)
else:
logger.info("Downtime set: %s - %s", hname, h.state)
def manage_downtime_expire_brok(self, b):
"""A downtime has been set on an item"""
data = b.data
hname = data.get('host_name', data.get('host', None))
if hname:
h = self.hosts.find_by_name(hname)
if not h:
logger.warning("Got a downtime end brok for an unknown host: %s", hname)
return
sdesc = data.get('service_description', data.get('service', None))
if sdesc:
s = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if not s:
logger.warning("Got a downtime end brok for an unknown service: %s/%s", hname, sdesc)
return
logger.info("Downtime end: %s/%s - %s", hname, sdesc, s.state)
else:
logger.info("Downtime end: %s - %s", hname, h.state)
| agpl-3.0 | -8,760,738,743,202,586,000 | 40.068859 | 116 | 0.570473 | false |
johnynek/netmodeler | pytools/gdd.py | 2 | 1337 | #!/usr/bin/python
#This is a python version of the graph_degree_dist.cpp code:
from pynetmod import *
net = Network(cin)
print "#loaded network"
ns = IntStats()
if False:
for meth in net.getNIMembers():
ns.collectN(net, meth)
print "%s (Node):" % meth;
print "\tave: %f" % ns.getAverage();
print "\tm2: %f" % ns.getMoment(2.0);
print "\tmax: %f" % ns.getMax();
print "\tmin: %f" %ns.getMin();
print "\tH: %f" % ns.getEntropy();
ns.collectByEdge(net, meth)
(h1, h2, h3) = ns.getEdgeEntropy()
print "\t#H(e_i): %f\n\t#H(e_ij): %f" % (h1, h3)
print "\t#EdgeMI: %f" % ns.getEdgeMutualInfo()
ns.collectN(net,"getDegree")
ns.collectByEdge(net, "getDegree")
print "#assortativity: %f" % (ns.getEdgeCorrelation())
print "#cluster coeff: %f" % (net.getClusterCoefficient())
print "#transitivity: %f" % (net.getTransitivity())
print "#nodes: %i" % (net.getNodeSize())
print "#edges: %i" % (net.getEdgeSize())
print "#<k>: %f" % (ns.getAverage())
print "#<k^2>: %f" % (ns.getMoment(2.0))
print "#H(degs): %f" % (ns.getEntropy())
(h1, h2, h3) = ns.getEdgeEntropy();
print "#H(e_i): %f\n#H(e_ij): %f" % (h1, h3)
print "#EdgeMI: %f" % ns.getEdgeMutualInfo()
#Print out the degree distribution:
print "#printing out degree distribution"
for (deg, count) in ns.getDist().iteritems():
print "%i %i" % (deg, count)
| gpl-2.0 | 5,159,223,848,668,642,000 | 27.446809 | 60 | 0.626028 | false |
ebar0n/django | tests/backends/tests.py | 12 | 31274 | """Tests related to django.db.backends that haven't been organized."""
import datetime
import threading
import unittest
import warnings
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper
from django.db.models.sql.constants import CURSOR
from django.test import (
TestCase, TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
Article, Object, ObjectReference, Person, Post, RawData, Reporter,
ReporterProxy, SchoolClass, Square,
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ,
)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
years = SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
classes = SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
with connection.cursor() as cursor:
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""last_executed_query() returns a string."""
data = RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, str)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
with connection.cursor() as cursor:
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
with self.assertRaises(Exception):
cursor.executemany(query, [(1, 2, 3)])
with self.assertRaises(Exception):
cursor.executemany(query, [(1,)])
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
sql_list = connection.ops.sql_flush(no_style(), tables, sequences)
with connection.cursor() as cursor:
for statement in sql_list:
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [Post])
with connection.cursor() as cursor:
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
opts = Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
with connection.cursor() as cursor:
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
Person(first_name="John", last_name="Doe").save()
Person(first_name="Jane", last_name="Doe").save()
Person(first_name="Mary", last_name="Agnelline").save()
Person(first_name="Peter", last_name="Parker").save()
Person(first_name="Clark", last_name="Kent").save()
opts2 = Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
with connection.cursor() as cursor:
cursor.execute(
'SELECT %s, %s FROM %s ORDER BY %s' % (
qn(f3.column),
qn(f4.column),
connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column),
)
)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Creating an existing table returns a DatabaseError """
query = 'CREATE TABLE %s (id INTEGER);' % Article._meta.db_table
with connection.cursor() as cursor:
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
is_usable() doesn't crash when the database disconnects (#21553).
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertCountEqual(connection.queries[0], ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
The backend doesn't store an unlimited number of queries (#12581).
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(
str(w[0].message),
"Limit for query logging exceeded, only the last 3 queries will be returned."
)
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
a2 = Article(
headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30,
)
with self.assertRaises(IntegrityError):
a2.save()
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
# Create another article
r_proxy = ReporterProxy.objects.get(pk=self.r.pk)
Article.objects.create(
headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy,
)
# Retrieve the second article from the DB
a2 = Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
with self.assertRaises(IntegrityError):
a2.save()
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
The default connection (i.e. django.db.connection) is different for
each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Each created connection got different inner connection.
self.assertEqual(len({conn.connection for conn in connections_dict.values()}), 3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
The connections are different for each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
A connection can be passed from one thread to the other (#17258).
"""
Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
A connection that is not explicitly shareable cannot be closed by
another thread (#17258).
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = Object.objects.create()
ref = ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(Object.objects.filter(id=12345).exists())
ref = ObjectReference.objects.create(obj_id=12345)
ref_new = ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = Object.objects.create()
obj.related_objects.create()
self.assertEqual(Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
| bsd-3-clause | 8,204,491,491,207,911,000 | 38.93742 | 112 | 0.620127 | false |
ImaginaryLandscape/django-cumulus | cumulus/management/commands/syncfiles.py | 1 | 11355 | import datetime
import fnmatch
import optparse
import os
import re
from django.conf import settings
from django.core.management.base import CommandError, NoArgsCommand
from cumulus.authentication import Auth
from cumulus.settings import CUMULUS
from cumulus.storage import get_headers, get_content_type, get_gzipped_contents
class Command(NoArgsCommand):
help = "Synchronizes project static *or* media files to cloud files."
option_list = NoArgsCommand.option_list + (
optparse.make_option("-i", "--include", action="append", default=[],
dest="includes", metavar="PATTERN",
help="Include file or directories matching this glob-style "
"pattern. Use multiple times to include more."),
optparse.make_option("-e", "--exclude", action="append", default=[],
dest="excludes", metavar="PATTERN",
help="Exclude files or directories matching this glob-style "
"pattern. Use multiple times to exclude more."),
optparse.make_option("-w", "--wipe",
action="store_true", dest="wipe", default=False,
help="Wipes out entire contents of container first."),
optparse.make_option("-t", "--test-run",
action="store_true", dest="test_run", default=False,
help="Performs a test run of the sync."),
optparse.make_option("-q", "--quiet",
action="store_true", dest="test_run", default=False,
help="Do not display any output."),
optparse.make_option("-c", "--container",
dest="container", help="Override STATIC_CONTAINER."),
optparse.make_option("-s", "--static",
action="store_true", dest="syncstatic", default=False,
help="Sync static files located at settings.STATIC_ROOT path."),
optparse.make_option("-m", "--media",
action="store_true", dest="syncmedia", default=False,
help="Sync media files located at settings.MEDIA_ROOT path."),
)
def set_options(self, options):
"""
Sets instance variables based on an options dict
"""
# COMMAND LINE OPTIONS
self.wipe = options.get("wipe")
self.test_run = options.get("test_run")
self.quiet = options.get("test_run")
self.container_name = options.get("container")
self.verbosity = int(options.get("verbosity"))
self.syncmedia = options.get("syncmedia")
self.syncstatic = options.get("syncstatic")
if self.test_run:
self.verbosity = 2
cli_includes = options.get("includes")
cli_excludes = options.get("excludes")
# CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY
if self.syncmedia and self.syncstatic:
raise CommandError("options --media and --static are mutually exclusive")
if not self.container_name:
if self.syncmedia:
self.container_name = CUMULUS["CONTAINER"]
elif self.syncstatic:
self.container_name = CUMULUS["STATIC_CONTAINER"]
else:
raise CommandError("must select one of the required options, either --media or --static")
settings_includes = CUMULUS["INCLUDE_LIST"]
settings_excludes = CUMULUS["EXCLUDE_LIST"]
# PATH SETTINGS
if self.syncmedia:
self.file_root = os.path.abspath(settings.MEDIA_ROOT)
self.file_url = settings.MEDIA_URL
elif self.syncstatic:
self.file_root = os.path.abspath(settings.STATIC_ROOT)
self.file_url = settings.STATIC_URL
if not self.file_root.endswith("/"):
self.file_root = self.file_root + "/"
if self.file_url.startswith("/"):
self.file_url = self.file_url[1:]
# SYNCSTATIC VARS
# combine includes and excludes from the cli and django settings file
self.includes = list(set(cli_includes + settings_includes))
self.excludes = list(set(cli_excludes + settings_excludes))
# transform glob patterns to regular expressions
self.local_filenames = []
self.create_count = 0
self.upload_count = 0
self.update_count = 0
self.skip_count = 0
self.delete_count = 0
def handle_noargs(self, *args, **options):
# setup
self.set_options(options)
self._connection = Auth()._get_connection()
self.container = self._connection.get_container(self.container_name)
# wipe first
if self.wipe:
self.wipe_container()
# match local files
abspaths = self.match_local(self.file_root, self.includes, self.excludes)
relpaths = []
for path in abspaths:
filename = path.split(self.file_root)[1]
if filename.startswith("/"):
filename = filename[1:]
relpaths.append(filename)
if not relpaths:
settings_root_prefix = "MEDIA" if self.syncmedia else "STATIC"
raise CommandError("The {0}_ROOT directory is empty "
"or all files have been ignored.".format(settings_root_prefix))
for path in abspaths:
if not os.path.isfile(path):
raise CommandError("Unsupported filetype: {0}.".format(path))
# match cloud objects
cloud_objs = self.match_cloud(self.includes, self.excludes)
remote_objects = {
obj.name: datetime.datetime.strptime(
obj.last_modified,
"%Y-%m-%dT%H:%M:%S.%f") for obj in self.container.get_objects()
}
# sync
self.upload_files(abspaths, relpaths, remote_objects)
self.delete_extra_files(relpaths, cloud_objs)
if not self.quiet or self.verbosity > 1:
self.print_tally()
def match_cloud(self, includes, excludes):
"""
Returns the cloud objects that match the include and exclude patterns.
"""
cloud_objs = [cloud_obj.name for cloud_obj in self.container.get_objects()]
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
excludes = [o for o in cloud_objs if re.match(excludes_pattern, o)]
includes = [o for o in cloud_objs if re.match(includes_pattern, o)]
return [o for o in includes if o not in excludes]
def match_local(self, prefix, includes, excludes):
"""
Filters os.walk() with include and exclude patterns.
See: http://stackoverflow.com/a/5141829/93559
"""
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
matches = []
for root, dirs, files in os.walk(prefix, topdown=True):
# exclude dirs
dirs[:] = [os.path.join(root, d) for d in dirs]
dirs[:] = [d for d in dirs if not re.match(excludes_pattern,
d.split(root)[1])]
# exclude/include files
files = [os.path.join(root, f) for f in files]
files = [os.path.join(root, f) for f in files
if not re.match(excludes_pattern, f)]
files = [os.path.join(root, f) for f in files
if re.match(includes_pattern, f.split(prefix)[1])]
for fname in files:
matches.append(fname)
return matches
def upload_files(self, abspaths, relpaths, remote_objects):
"""
Determines files to be uploaded and call ``upload_file`` on each.
"""
for relpath in relpaths:
abspath = [p for p in abspaths if p.endswith(relpath)][0]
cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
if relpath in remote_objects:
self.update_count += 1
else:
self.create_count += 1
self.upload_file(abspath, relpath)
def upload_file(self, abspath, cloud_filename):
"""
Uploads a file to the container.
"""
if not self.test_run:
content = open(abspath, "rb")
content_type = get_content_type(cloud_filename, content)
headers = get_headers(cloud_filename, content_type)
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
size = content.size
else:
size = os.stat(abspath).st_size
self.container.create(
obj_name=cloud_filename,
data=content,
content_type=content_type,
content_length=size,
content_encoding=headers.get("Content-Encoding", None),
headers=headers,
ttl=CUMULUS["FILE_TTL"],
etag=None,
)
self.upload_count += 1
if not self.quiet or self.verbosity > 1:
print("Uploaded: {0}".format(cloud_filename))
def delete_extra_files(self, relpaths, cloud_objs):
"""
Deletes any objects from the container that do not exist locally.
"""
for cloud_obj in cloud_objs:
if cloud_obj not in relpaths:
if not self.test_run:
self.delete_cloud_obj(cloud_obj)
self.delete_count += 1
if not self.quiet or self.verbosity > 1:
print("Deleted: {0}".format(cloud_obj))
def delete_cloud_obj(self, cloud_obj):
"""
Deletes an object from the container.
"""
self._connection.delete_object(
container=self.container_name,
obj=cloud_obj,
)
def wipe_container(self):
"""
Completely wipes out the contents of the container.
"""
if self.test_run:
print("Wipe would delete {0} objects.".format(len(self.container.object_count)))
else:
if not self.quiet or self.verbosity > 1:
print("Deleting {0} objects...".format(len(self.container.object_count)))
self._connection.delete_all_objects()
def print_tally(self):
"""
Prints the final tally to stdout.
"""
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count))
| bsd-3-clause | -4,800,797,637,791,206,000 | 41.52809 | 105 | 0.565214 | false |
admiralobvious/vyper | vyper/errors.py | 1 | 1587 | class ConfigFileNotFoundError(Exception):
"""Denotes failing to find configuration file."""
def __init__(self, message, locations, *args):
self.message = message
self.locations = ", ".join(str(l) for l in locations)
super(ConfigFileNotFoundError, self).__init__(message, locations, *args)
def __str__(self):
return "Config File {0} Not Found in {1}".format(self.message, self.locations)
class RemoteConfigError(Exception):
"""Denotes encountering an error while trying to
pull the configuration from the remote provider.
"""
def __init__(self, message, *args):
self.message = message
super(RemoteConfigError, self).__init__(message, *args)
def __str__(self):
return "Remote Configuration Error {0}".format(self.message)
class UnsupportedConfigError(Exception):
"""Denotes encountering an unsupported configuration file type."""
def __init__(self, message, *args):
self.message = message
super(UnsupportedConfigError, self).__init__(message, *args)
def __str__(self):
return "Unsupported Config Type {0}".format(self.message)
class UnsupportedRemoteProviderError(Exception):
"""Denotes encountering an unsupported remote provider.
Currently only etcd, consul and zookeeper are supported.
"""
def __init__(self, message, *args):
self.message = message
super(UnsupportedRemoteProviderError, self).__init__(message, *args)
def __str__(self):
return "Unsupported Remote Provider Type {0}".format(self.message)
| mit | 6,197,484,613,048,238,000 | 32.765957 | 86 | 0.666037 | false |
theheros/kbengine | kbe/src/lib/python/Lib/unittest/test/test_assertions.py | 51 | 11863 | import datetime
import warnings
import unittest
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
| lgpl-3.0 | -7,332,875,693,342,941,000 | 40.479021 | 92 | 0.515047 | false |
SoftwareDefinedBuildings/smap | python/tinyos/message/__init__.py | 6 | 1655 | #
# Copyright (c) 2005
# The President and Fellows of Harvard College.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Geoffrey Mainland <[email protected]>
#
__all__ = ["Message", "MoteIF", "SerialPacket"]
| bsd-2-clause | 2,143,325,264,955,813,000 | 52.387097 | 77 | 0.772205 | false |
LiaoPan/blaze | blaze/__init__.py | 13 | 2475 | from __future__ import absolute_import, division, print_function
try:
import h5py # if we import h5py after tables we segfault
except ImportError:
pass
from pandas import DataFrame
from odo import odo, convert, append, resource, drop
from odo.backends.csv import CSV
from odo.backends.json import JSON, JSONLines
from multipledispatch import halt_ordering, restart_ordering
halt_ordering() # Turn off multipledispatch ordering
from datashape import dshape, discover
from .utils import ignoring
from .expr import (Symbol, TableSymbol, symbol, ndim, shape)
from .expr import (by, count, count_values, distinct, head, join, label, like,
mean, merge, nunique, relabel, selection, sort, summary, var,
transform)
from .expr import (date, datetime, day, hour, microsecond, millisecond, month,
second, time, year)
from .expr.arrays import (tensordot, transpose)
from .expr.functions import *
from .index import create_index
from .interactive import *
from .compute.pmap import set_default_pmap
from .compute.csv import *
from .compute.json import *
from .compute.python import *
from .compute.pandas import *
from .compute.numpy import *
from .compute.core import *
from .compute.core import compute
from .cached import CachedDataset
with ignoring(ImportError):
from .server import *
with ignoring(ImportError):
from .sql import *
from .compute.sql import *
with ignoring(ImportError):
from .compute.dask import *
with ignoring(ImportError, AttributeError):
from .compute.spark import *
with ignoring(ImportError, TypeError):
from .compute.sparksql import *
with ignoring(ImportError):
from dynd import nd
from .compute.dynd import *
with ignoring(ImportError):
from .compute.h5py import *
with ignoring(ImportError):
from .compute.hdfstore import *
with ignoring(ImportError):
from .compute.pytables import *
with ignoring(ImportError):
from .compute.chunks import *
with ignoring(ImportError):
from .compute.bcolz import *
with ignoring(ImportError):
from .mongo import *
from .compute.mongo import *
with ignoring(ImportError):
from .pytables import *
from .compute.pytables import *
from .expr import concat # Some module re-export toolz.concat and * catches it.
restart_ordering() # Restart multipledispatch ordering and do ordering
inf = float('inf')
nan = float('nan')
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| bsd-3-clause | -7,494,897,213,509,236,000 | 29.9375 | 80 | 0.745859 | false |
tianxiawuzhei/cocos-quick-cpp | publibs/cocos2dx/tools/cocos2d-console/plugins/plugin_package/helper/set_framework_helper.py | 6 | 3934 |
import os
import os.path
import json
import re
import shlex
import cocos
from MultiLanguage import MultiLanguage
class SetFrameworkHelper(object):
def __init__(self, project, package_name, version):
self._package_name = package_name
self._package_path = project["packages_dir"] + os.sep + package_name + "-" + version
self._project = project
def run(self):
package_name = self._package_name
package_path = self._package_path
if not os.path.isdir(package_path):
raise cocos.CCPluginError(MultiLanguage.get_string('PACKAGE_ERROR_PATH_NOT_FOUND_FMT', package_path),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
sln_txt = self.load_sln_win32()
if sln_txt is None:
print MultiLanguage.get_string('PACKAGE_ERROR_READ_SLN')
else:
find_tag = '(Project\(\"\{)(\S*)(\}\"\) = \"' + package_name + '\", \"\S*\", \"\{)(\S*)(\}\"\s*EndProject)'
match = re.search(find_tag, sln_txt, re.DOTALL)
if match is None:
print MultiLanguage.get_string('PACKAGE_ERROR_NOT_FOUND_PROJ', package_name)
else:
proj_id_win = match.group(2)
build_id_win = match.group(4)
self.set_win32(proj_id_win, build_id_win)
def set_win32(self, proj_id, build_id):
text = self.load_install_json()
if text is None:
print MultiLanguage.get_string('PACKAGE_ERROR_JSON_READ_FAILED')
return
find_tag = '(\{\s*\"command\":\s*\"add_project\",\s*\"name\":\s*\"\S*\",\s*\"project_id\":\s*\")(\S*)(\",\s*\"build_id\":\s*\")(\S*)(\",\s*\"platform\":\s*\"win\"\s*\})'
match = re.search(find_tag, text, re.DOTALL)
if not match is None:
old_id = match.group(2)
text = text.replace(old_id, proj_id)
old_id = match.group(4)
text = text.replace(old_id, build_id)
self.save_install_json(text)
return
index = text.find("[")
if index<0:
print MultiLanguage.get_string('PACKAGE_ERROR_JSON_ERROR')
return
headers = text[0:index+1]
tails = text[index+1:]
skip_str = '\n\t\t'
str_to_add = '\n\t{'
str_to_add += skip_str + '"command": "add_project",'
str_to_add += skip_str + '"name": "' + self._package_name + '",'
str_to_add += skip_str + '"project_id": "' + proj_id + '",'
str_to_add += skip_str + '"build_id": "' + build_id + '",'
str_to_add += skip_str + '"platform": "win"'
str_to_add += '\n\t},'
text = headers + str_to_add + tails
self.save_install_json(text)
def load_install_json(self):
install_json_file = self._package_path + os.sep + "install.json"
if not os.path.isfile(install_json_file):
print MultiLanguage.get_string('PACKAGE_ERROR_NOT_FOUND_JSON')
return
f = open(install_json_file, "rb")
text = f.read()
f.close()
return text
def save_install_json(self, text):
install_json_file = self._package_path + os.sep + "install.json"
f = open(install_json_file, "wb")
f.write(text)
f.close()
def load_sln_win32(self):
if not "proj.win32" in self._project:
print MultiLanguage.get_string('PACKAGE_ERROR_WIN32_NOT_FOUND')
return
workdir = self._project["proj.win32"]
files = os.listdir(workdir)
for filename in files:
if filename[-4:] == ".sln":
proj_file_path = workdir + os.sep + filename
break
if proj_file_path is None:
print MultiLanguage.get_string('PACKAGE_ERROR_NO_SLN_IN_WIN32')
return
f = open(proj_file_path, "rb")
text = f.read()
f.close()
return text
| mit | 7,517,647,855,172,593,000 | 34.441441 | 177 | 0.539654 | false |
simartin/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/acceptance_test.py | 3 | 40210 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import textwrap
import types
import attr
import py
import six
import pytest
from _pytest.compat import importlib_metadata
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import EXIT_USAGEERROR
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
def prepend_pythonpath(*dirs):
cur = os.getenv("PYTHONPATH")
if cur:
dirs += (cur,)
return os.pathsep.join(str(p) for p in dirs)
class TestGeneralUsage(object):
def test_config_error(self, testdir):
testdir.copy_example("conftest_usageerror/conftest.py")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret == EXIT_USAGEERROR
result.stderr.fnmatch_lines(["*ERROR: hello"])
result.stdout.fnmatch_lines(["*pytest_unconfigure_called"])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*def pytest_sessionstart():*", "*INTERNALERROR*0 / 0*"]
)
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
"""
)
result = testdir.runpytest("-s", "asd")
assert result.ret == 4 # EXIT_USAGEERROR
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
result.stdout.fnmatch_lines(["*---configure", "*---unconfigure"])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(
pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
"""
)
testdir.makepyfile(
test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
"""
)
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("load_cov_early", [True, False])
def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):
testdir.makepyfile(mytestplugin1_module="")
testdir.makepyfile(mytestplugin2_module="")
testdir.makepyfile(mycov_module="")
testdir.syspathinsert()
loaded = []
@attr.s
class DummyEntryPoint(object):
name = attr.ib()
module = attr.ib()
group = "pytest11"
def load(self):
__import__(self.module)
loaded.append(self.name)
return sys.modules[self.module]
entry_points = [
DummyEntryPoint("myplugin1", "mytestplugin1_module"),
DummyEntryPoint("myplugin2", "mytestplugin2_module"),
DummyEntryPoint("mycov", "mycov_module"),
]
@attr.s
class DummyDist(object):
entry_points = attr.ib()
files = ()
def my_dists():
return (DummyDist(entry_points),)
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
params = ("-p", "mycov") if load_cov_early else ()
testdir.runpytest_inprocess(*params)
if load_cov_early:
assert loaded == ["mycov", "myplugin1", "myplugin2"]
else:
assert loaded == ["myplugin1", "myplugin2", "mycov"]
def test_assertion_magic(self, testdir):
p = testdir.makepyfile(
"""
def test_this():
x = 0
assert x
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["> assert x", "E assert 0"])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
"""
)
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
# XXX on jython this fails: "> import import_fails",
"ImportError while importing test module*",
"*No module named *does_not_work*",
]
)
assert result.ret == 2
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret
result.stderr.fnmatch_lines(["*ERROR: not found:*{}".format(p2.basename)])
@pytest.mark.filterwarnings("default")
def test_better_reporting_on_conftest_load_failure(self, testdir, request):
"""Show a user-friendly traceback on conftest import failures (#486, #3332)"""
testdir.makepyfile("")
testdir.makeconftest(
"""
def foo():
import qwerty
foo()
"""
)
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines(
"""
*--version*
*warning*conftest.py*
"""
)
result = testdir.runpytest()
dirname = request.node.name + "0"
exc_name = (
"ModuleNotFoundError" if sys.version_info >= (3, 6) else "ImportError"
)
result.stderr.fnmatch_lines(
[
"ImportError while loading conftest '*{sep}{dirname}{sep}conftest.py'.".format(
dirname=dirname, sep=os.sep
),
"conftest.py:3: in <module>",
" foo()",
"conftest.py:2: in foo",
" import qwerty",
"E {}: No module named {q}qwerty{q}".format(
exc_name, q="" if six.PY2 else "'"
),
]
)
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest(
"""
import pytest
def pytest_collect_directory():
pytest.skip("early")
"""
)
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines(["*1 skip*"])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.copy_example("issue88_initial_file_multinodes")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(["*MyFile*test_issue88*", "*Module*test_issue88*"])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest(
"""
import sys
print("should not be seen")
sys.stderr.write("stder42\\n")
"""
)
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "should not be seen" not in result.stdout.str()
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest(
"""
print("should be seen")
assert 0
"""
)
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_chdir(self, testdir):
testdir.tmpdir.join("py").mksymlinkto(py._pydir)
p = testdir.tmpdir.join("main.py")
p.write(
textwrap.dedent(
"""\
import sys, os
sys.path.insert(0, '')
import py
print(py.__file__)
print(py.__path__)
os.chdir(os.path.dirname(os.getcwd()))
print(py.log)
"""
)
)
result = testdir.runpython(p)
assert not result.ret
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.mkdir("sub1")
sub2 = testdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == EXIT_NOTESTSCOLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest(sub1)
assert result.ret == EXIT_USAGEERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
"""
)
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector(path, parent)
"""
)
result = testdir.runpytest(c.basename + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 pass*"])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_generate_tests(metafunc):
metafunc.parametrize('x', [3], ids=['hello-123'])
def pytest_runtest_setup(item):
print(item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
"""
)
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p, SHOW_PYTEST_WARNINGS_ARG)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile(
"""
def pytest_generate_tests(metafunc):
metafunc.parametrize('i', [1, 2], ids=["1", "2"])
def test_func(i):
pass
"""
)
res = testdir.runpytest(
p.basename + "::" + "test_func[1]", SHOW_PYTEST_WARNINGS_ARG
)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
x
"""
)
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines(["INTERNAL*pytest_configure*", "INTERNAL*x*"])
assert "sessionstarttime" not in result.stderr.str()
@pytest.mark.parametrize("lookfor", ["test_fun.py::test_a"])
def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(
test_fun="""
def test_a():
pass
def"""
)
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(["*SyntaxError*"])
if "::" in lookfor:
result.stderr.fnmatch_lines(["*ERROR*"])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makeconftest(
"""
from _pytest.main import EXIT_USAGEERROR
def pytest_sessionfinish(exitstatus):
assert exitstatus == EXIT_USAGEERROR
print("pytest_sessionfinish_called")
"""
)
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"])
result.stdout.fnmatch_lines(["pytest_sessionfinish_called"])
assert result.ret == EXIT_USAGEERROR
@pytest.mark.usefixtures("recwarn")
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
"""
Ref #383. Python 3.3's namespace package messed with our import hooks
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
Use recwarn here to silence this warning in Python 2.7:
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
"""
testdir.mkdir("not_a_package")
p = testdir.makepyfile(
"""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
"""
)
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines(
"""
*unrecognized*
"""
)
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile(
"""
def raise_error(obj):
raise IOError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(
["*source code not available*", "E*fixture 'invalid_fixture' not found"]
)
def test_plugins_given_as_strings(self, tmpdir, monkeypatch, _sys_snapshot):
"""test that str values passed to main() as `plugins` arg
are interpreted as module names to be imported and registered.
#855.
"""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=["invalid.module"])
assert "invalid" in str(excinfo.value)
p = tmpdir.join("test_test_plugins_given_as_strings.py")
p.write("def test_foo(): pass")
mod = types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, "myplugin", mod)
assert pytest.main(args=[str(tmpdir)], plugins=["myplugin"]) == 0
def test_parametrized_with_bytes_regex(self, testdir):
p = testdir.makepyfile(
"""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(["*1 passed*"])
def test_parametrized_with_null_bytes(self, testdir):
"""Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)"""
p = testdir.makepyfile(
u"""
# encoding: UTF-8
import pytest
@pytest.mark.parametrize("data", [b"\\x00", "\\x00", u'ação'])
def test_foo(data):
assert data
"""
)
res = testdir.runpytest(p)
res.assert_outcomes(passed=3)
class TestInvocationVariants(object):
def test_earlyinit(self, testdir):
p = testdir.makepyfile(
"""
import pytest
assert hasattr(pytest, 'mark')
"""
)
result = testdir.runpython(p)
assert result.ret == 0
@pytest.mark.xfail("sys.platform.startswith('java')")
def test_pydoc(self, testdir):
for name in ("py.test", "pytest"):
result = testdir.runpython_c("import {};help({})".format(name, name))
assert result.ret == 0
s = result.stdout.str()
assert "MarkGenerator" in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile(
"""
from py.test import *
#collect
#cmdline
#Item
# assert collect.Item is Item
# assert collect.Collector is Collector
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile(
"""
from pytest import *
#Item
#File
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(
run="""
import pytest
pytest.main()
pytest.main()
"""
)
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(["*1 passed*", "*1 passed*"])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pytest(self):
assert pytest.main == py.test.cmdline.main
def test_invoke_with_invalid_type(self, capsys):
with pytest.raises(
TypeError, match="expected to be a list or tuple of strings, got: '-h'"
):
pytest.main("-h")
def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir)
assert retcode == EXIT_NOTESTSCOLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, testdir, capsys):
class MyPlugin(object):
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("raise ImportError")
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0
result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"])
def test_cmdline_python_package(self, testdir, monkeypatch):
import warnings
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv("PYTHONPATH", str(empty_package), prepend=os.pathsep)
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
monkeypatch.setenv("PYTHONPATH", str(testdir), prepend=os.pathsep)
result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True)
assert result.ret != 0
result.stderr.fnmatch_lines(["*not*found*test_missing*"])
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
"""
test --pyargs option with namespace packages (#1567)
Ref: https://packaging.python.org/guides/packaging-namespace-packages/
"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
search_path = []
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
search_path.append(d)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)"
)
lib = ns.mkdir(dirname)
lib.ensure("__init__.py")
lib.join("test_{}.py".format(dirname)).write(
"def test_{}(): pass\ndef test_other():pass".format(dirname)
)
# The structure of the test directory is now:
# .
# ├── hello
# │ └── ns_pkg
# │ ├── __init__.py
# │ └── hello
# │ ├── __init__.py
# │ └── test_hello.py
# └── world
# └── ns_pkg
# ├── __init__.py
# └── world
# ├── __init__.py
# └── test_world.py
# NOTE: the different/reversed ordering is intentional here.
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# mixed module and filenames:
monkeypatch.chdir("world")
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"test_hello.py::test_hello*PASSED*",
"test_hello.py::test_other*PASSED*",
"ns_pkg/world/test_world.py::test_world*PASSED*",
"ns_pkg/world/test_world.py::test_other*PASSED*",
"*4 passed in*",
]
)
# specify tests within a module
testdir.chdir()
result = testdir.runpytest(
"--pyargs", "-v", "ns_pkg.world.test_world::test_other"
)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*test_world.py::test_other*PASSED*", "*1 passed*"]
)
def test_invoke_test_and_doctestmodules(self, testdir):
p = testdir.makepyfile(
"""
def test():
pass
"""
)
result = testdir.runpytest(str(p) + "::test", "--doctest-modules")
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.skipif(not hasattr(os, "symlink"), reason="requires symlinks")
def test_cmdline_python_package_symlink(self, testdir, monkeypatch):
"""
test --pyargs option with packages with path containing symlink can
have conftest.py in their package (#2985)
"""
# dummy check that we can actually create symlinks: on Windows `os.symlink` is available,
# but normal users require special admin privileges to create symlinks.
if sys.platform == "win32":
try:
os.symlink(
str(testdir.tmpdir.ensure("tmpfile")),
str(testdir.tmpdir.join("tmpfile2")),
)
except OSError as e:
pytest.skip(six.text_type(e.args[0]))
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
dirname = "lib"
d = testdir.mkdir(dirname)
foo = d.mkdir("foo")
foo.ensure("__init__.py")
lib = foo.mkdir("bar")
lib.ensure("__init__.py")
lib.join("test_bar.py").write(
"def test_bar(): pass\ndef test_other(a_fixture):pass"
)
lib.join("conftest.py").write(
"import pytest\[email protected]\ndef a_fixture():pass"
)
d_local = testdir.mkdir("local")
symlink_location = os.path.join(str(d_local), "lib")
if six.PY2:
os.symlink(str(d), symlink_location)
else:
os.symlink(str(d), symlink_location, target_is_directory=True)
# The structure of the test directory is now:
# .
# ├── local
# │ └── lib -> ../lib
# └── lib
# └── foo
# ├── __init__.py
# └── bar
# ├── __init__.py
# ├── conftest.py
# └── test_bar.py
# NOTE: the different/reversed ordering is intentional here.
search_path = ["lib", os.path.join("local", "lib")]
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# module picked up in symlink-ed directory:
# It picks up local/lib/foo/bar (symlink) via sys.path.
result = testdir.runpytest("--pyargs", "-v", "foo.bar")
testdir.chdir()
assert result.ret == 0
if hasattr(py.path.local, "mksymlinkto"):
result.stdout.fnmatch_lines(
[
"lib/foo/bar/test_bar.py::test_bar PASSED*",
"lib/foo/bar/test_bar.py::test_other PASSED*",
"*2 passed*",
]
)
else:
result.stdout.fnmatch_lines(
[
"*lib/foo/bar/test_bar.py::test_bar PASSED*",
"*lib/foo/bar/test_bar.py::test_other PASSED*",
"*2 passed*",
]
)
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines(["ERROR*file*or*package*not*found*"])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile(
"""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile(
".txt",
"""
>>> x=3
>>> x
4
""",
)
result = testdir.runpytest("-rf")
lines = result.stdout.str().splitlines()
for line in lines:
if line.startswith(("FAIL ", "FAILED ")):
_fail, _sep, testid = line.partition(" ")
break
result = testdir.runpytest(testid, "-rf")
result.stdout.fnmatch_lines(
["FAILED test_doctest_id.txt::test_doctest_id.txt", "*1 failed*"]
)
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert (
type(_pytest.config.get_plugin_manager())
is _pytest.config.PytestPluginManager
)
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin("python")
class TestDurations(object):
source = """
import time
frag = 0.002
def test_something():
pass
def test_2():
time.sleep(frag*5)
def test_1():
time.sleep(frag)
def test_3():
time.sleep(frag*10)
"""
def test_calls(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
["*durations*", "*call*test_3*", "*call*test_2*"]
)
result.stdout.fnmatch_lines(
["(0.00 durations hidden. Use -vv to show these durations.)"]
)
def test_calls_show_2(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0")
assert result.ret == 0
for x in "23":
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_calls_showall_verbose(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0", "-vv")
assert result.ret == 0
for x in "123":
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_with_deselected(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2", "-k test_2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*durations*", "*call*test_2*"])
def test_with_failing_collection(self, testdir):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
assert "duration" not in result.stdout.str()
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("-k not 1")
assert result.ret == 0
class TestDurationWithFixture(object):
source = """
import pytest
import time
frag = 0.01
@pytest.fixture
def setup_fixt():
time.sleep(frag)
def test_1(setup_fixt):
time.sleep(frag)
"""
def test_setup_function(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
"""
*durations*
* setup *test_1*
* call *test_1*
"""
)
def test_zipimport_hook(testdir, tmpdir):
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip("zipapp")
testdir.tmpdir.join("app").ensure(dir=1)
testdir.makepyfile(
**{
"app/foo.py": """
import pytest
def main():
pytest.main(['--pyarg', 'foo'])
"""
}
)
target = tmpdir.join("foo.zip")
zipapp.create_archive(str(testdir.tmpdir.join("app")), str(target), main="foo:main")
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
assert "INTERNALERROR>" not in result.stdout.str()
def test_import_plugin_unicode_name(testdir):
testdir.makepyfile(myplugin="")
testdir.makepyfile(
"""
def test(): pass
"""
)
testdir.makeconftest(
"""
pytest_plugins = [u'myplugin']
"""
)
r = testdir.runpytest()
assert r.ret == 0
def test_pytest_plugins_as_module(testdir):
"""Do not raise an error if pytest_plugins attribute is a module (#3899)"""
testdir.makepyfile(
**{
"__init__.py": "",
"pytest_plugins.py": "",
"conftest.py": "from . import pytest_plugins",
"test_foo.py": "def test(): pass",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
def test_deferred_hook_checking(testdir):
"""
Check hooks as late as possible (#1821).
"""
testdir.syspathinsert()
testdir.makepyfile(
**{
"plugin.py": """
class Hooks(object):
def pytest_my_hook(self, config):
pass
def pytest_configure(config):
config.pluginmanager.add_hookspecs(Hooks)
""",
"conftest.py": """
pytest_plugins = ['plugin']
def pytest_my_hook(config):
return 40
""",
"test_foo.py": """
def test(request):
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
""",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_fixture_values_leak(testdir):
"""Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected
life-times (#2981).
"""
testdir.makepyfile(
"""
import attr
import gc
import pytest
import weakref
@attr.s
class SomeObj(object):
name = attr.ib()
fix_of_test1_ref = None
session_ref = None
@pytest.fixture(scope='session')
def session_fix():
global session_ref
obj = SomeObj(name='session-fixture')
session_ref = weakref.ref(obj)
return obj
@pytest.fixture
def fix(session_fix):
global fix_of_test1_ref
obj = SomeObj(name='local-fixture')
fix_of_test1_ref = weakref.ref(obj)
return obj
def test1(fix):
assert fix_of_test1_ref() is fix
def test2():
gc.collect()
# fixture "fix" created during test1 must have been destroyed by now
assert fix_of_test1_ref() is None
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 2 passed *"])
def test_fixture_order_respects_scope(testdir):
"""Ensure that fixtures are created according to scope order, regression test for #2405
"""
testdir.makepyfile(
"""
import pytest
data = {}
@pytest.fixture(scope='module')
def clean_data():
data.clear()
@pytest.fixture(autouse=True)
def add_data():
data.update(value=True)
@pytest.mark.usefixtures('clean_data')
def test_value():
assert data.get('value')
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_frame_leak_on_failing_test(testdir):
"""pytest would leak garbage referencing the frames of tests that failed that could never be reclaimed (#2798)
Unfortunately it was not possible to remove the actual circles because most of them
are made of traceback objects which cannot be weakly referenced. Those objects at least
can be eventually claimed by the garbage collector.
"""
testdir.makepyfile(
"""
import gc
import weakref
class Obj:
pass
ref = None
def test1():
obj = Obj()
global ref
ref = weakref.ref(obj)
assert 0
def test2():
gc.collect()
assert ref() is None
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 failed, 1 passed in*"])
def test_fixture_mock_integration(testdir):
"""Test that decorators applied to fixture are left working (#3774)"""
p = testdir.copy_example("acceptance/fixture_mock_integration.py")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_usage_error_code(testdir):
result = testdir.runpytest("-unknown-option-")
assert result.ret == EXIT_USAGEERROR
@pytest.mark.skipif(
sys.version_info[:2] < (3, 5), reason="async def syntax python 3.5+ only"
)
@pytest.mark.filterwarnings("default")
def test_warn_on_async_function(testdir):
testdir.makepyfile(
test_async="""
async def test_1():
pass
async def test_2():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"*Coroutine functions are not natively supported*",
"*2 skipped, 2 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("Coroutine functions are not natively supported") == 1
)
| mpl-2.0 | -4,402,272,539,979,977,700 | 32.0338 | 114 | 0.544971 | false |
amueller/advanced_training | mglearn/plots.py | 1 | 3078 | from .plot_linear_svc_regularization import plot_linear_svc_regularization
from .plot_interactive_tree import plot_tree_progressive, plot_tree_partition
from .plot_animal_tree import plot_animal_tree
from .plot_rbf_svm_parameters import plot_svm
from .plot_knn_regression import plot_knn_regression
from .plot_knn_classification import plot_knn_classification
from .plot_2d_separator import plot_2d_classification, plot_2d_separator
from .plot_nn_graphs import (plot_logistic_regression_graph,
plot_single_hidden_layer_graph,
plot_two_hidden_layer_graph)
from .plot_linear_regression import plot_linear_regression_wave
from .plot_tree_nonmonotonous import plot_tree_not_monotone
from .plot_scaling import plot_scaling
from .plot_pca import plot_pca_illustration, plot_pca_whitening, plot_pca_faces
from .plot_decomposition import plot_decomposition
from .plot_nmf import plot_nmf_illustration, plot_nmf_faces
from .plot_helpers import cm2, cm3
from .plot_agglomerative import plot_agglomerative, plot_agglomerative_algorithm
from .plot_kmeans import plot_kmeans_algorithm, plot_kmeans_boundaries
from .plot_improper_preprocessing import plot_improper_processing, plot_proper_processing
from .plot_cross_validation import (plot_threefold_split, plot_label_kfold,
plot_shuffle_split, plot_cross_validation,
plot_stratified_cross_validation)
from .plot_grid_search import plot_grid_search_overview, plot_cross_val_selection
from .plot_metrics import (plot_confusion_matrix_illustration,
plot_binary_confusion_matrix,
plot_decision_threshold)
__all__ = ['plot_linear_svc_regularization',
"plot_animal_tree", "plot_tree_progressive",
'plot_tree_partition', 'plot_svm',
'plot_knn_regression',
'plot_logistic_regression_graph',
'plot_single_hidden_layer_graph',
'plot_two_hidden_layer_graph',
'plot_2d_classification',
'plot_2d_separator',
'plot_knn_classification',
'plot_linear_regression_wave',
'plot_tree_not_monotone',
'plot_scaling',
'plot_pca_illustration',
'plot_pca_faces',
'plot_pca_whitening',
'plot_decomposition',
'plot_nmf_illustration',
'plot_nmf_faces',
'plot_agglomerative',
'plot_agglomerative_algorithm',
'plot_kmeans_boundaries',
'plot_kmeans_algorithm',
'cm3', 'cm2', 'plot_improper_processing', 'plot_proper_processing',
'plot_label_kfold',
'plot_shuffle_split',
'plot_stratified_cross_validation',
'plot_threefold_split',
'plot_cross_validation',
'plot_grid_search_overview',
'plot_cross_val_selection',
'plot_confusion_matrix_illustration',
'plot_binary_confusion_matrix',
'plot_decision_threshold'
]
| bsd-2-clause | -2,806,230,652,244,266,000 | 47.09375 | 89 | 0.651072 | false |
dcneeme/d4c | mb.py | 1 | 3615 | #mb.py - using pymodbus in serial mode to read and write modbus command. only for linux!
# parameters mba regadd count to read
# parameters mba regadd hexdata to write (1 register only)
# only for archlinux and only for 1 or 2 registers write at this stage
# selle too ajal ei tasu muid sama seriali kasutavaid asju kaimas hoida!
# 04.03.2014 kasutusele minimalmodbus.
#import time
#import datetime
#import decimal
import sys
import traceback
#import subprocess
#from socket import *
#import select
#import string
import minimalmodbus # kohendatud debug-variant python3 jaoks olinuxinole!
cmd=0
argnum=len(sys.argv)
#print(sys.argv,'arg count',argnum) # debug
mba=int(sys.argv[1]) # mb aadress
client = minimalmodbus.Instrument('/dev/ttyAPP0', mba)
client.debug = True # valjastab saadetu ja saadu hex stringina, base64 abil
regaddr=int(sys.argv[2]) # dec! # regaddrh=format("%04x" % int(sys.argv[1])) # parameetriks dec kujul aadress, register-1!
if len(sys.argv[3]) == 4: # tundub et on hex data 1 registri jaoks (4 kohta)
regcount=int(sys.argv[3],16) # data, hex kujul
print('writing single register data',regcount) # ajutine
cmd=6
elif len(sys.argv[3]) == 8: # 2 registrit korraga (8 kohta)
regcount=(0xffff & int(sys.argv[3],16))
cmd=10
else: # voib olla reg arv?
if len(sys.argv[3]) <3: # ilmselt reg arv, lugemine
regcount=int(sys.argv[3]) # loetavate reg arv
if argnum == 5:
if sys.argv[4] == 'i': # input register
print('reading',regcount,'input registers starting from',regaddr) # ajutine
cmd=4
elif sys.argv[4] == 'h': # holding register
print('reading',regcount,'holding registers starting from',regaddr) # ajutine
cmd=3
elif sys.argv[4] == 'c': # coils
print('reading',regcount,'coils starting from',regaddr) # ajutine
cmd=1
else:
print('unknown parameter',sys.argv[4])
else:
print('reading',regcount,'holding registers starting from',regaddr) # ajutine
cmd=3
else:
print('invalid length '+str(len(sys.argv[3]))+' for parameter 3!')
output=''
#try: # while 1:
if cmd == 3: # lugemine, n registrit jarjest
print('mba',mba,'regaddr',regaddr,'regcount',regcount,'cmd',cmd) # debug
result=client.read_registers(regaddr,regcount) # esimene on algusaadress, teine reg arv. fc 03
print(result)
elif cmd == 4: # lugemine, n registrit jarjest
print('mba',mba,'regaddr',regaddr,'regcount',regcount,'cmd',cmd) # debug
result=client.read_registers(regaddr,regcount,functioncode=4) # esimene on algusaadress, teine reg arv. fc 04
print(result)
elif cmd == 1: # lugemine, n coils jarjest
print('mba',mba,'regaddr',regaddr,'regcount',regcount,'cmd',cmd) # debug
elif cmd == 6: # kirjutamine, 1 register
print('mba',mba,'regaddr',regaddr,'data',regcount,'cmd',cmd) # debug
#client.write_register(address=regaddr, value=regcount, unit=mba) # only one regiter to write here
client.write_register(regaddr,regcount) # value dec, fc 06
print('ok')
elif cmd == 10: # kirjutamine, 2 registrit
print('mba',mba,'regaddr',regaddr,'data',regcount,'cmd',cmd) # debug
#client.write_registers(address=regaddr, values=[hidata,lodata], unit=mba) # only one regiter to write here
client.write_long(regaddr,regcount)
print('ok') # not yet implemented')
else:
print('failure, unknown function code',cmd)
| gpl-3.0 | -161,333,197,346,817,380 | 38.617978 | 123 | 0.649793 | false |
rgayon/plaso | plaso/parsers/sqlite_plugins/windows_timeline.py | 1 | 9900 | # -*- coding: utf-8 -*-
"""SQLite parser plugin for Windows 10 Timeline database files."""
from __future__ import unicode_literals
import json
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class WindowsTimelineGenericEventData(events.EventData):
"""Windows Timeline database generic event data.
Attributes:
package_identifier (str): the package ID or path to the executable run.
Depending on the program, this either looks like a path
(for example, c:\\python34\\python.exe) or like a package name
(for example Docker.DockerForWindows.Settings).
description (str): this is an optional field, used to describe the action in
the timeline view, and is usually populated with the path of the file
currently open in the program described by package_identifier.
Otherwise None.
application_display_name (str): a more human-friendly version of the
package_identifier, such as 'Docker for Windows' or 'Microsoft Store'.
"""
DATA_TYPE = 'windows:timeline:generic'
def __init__(self):
"""Initialize event data"""
super(WindowsTimelineGenericEventData, self).__init__(
data_type=self.DATA_TYPE)
self.package_identifier = None
self.description = None
self.application_display_name = None
class WindowsTimelineUserEngagedEventData(events.EventData):
"""Windows Timeline database User Engaged event data.
Contains information describing how long a user interacted with an application
for.
Attributes:
package_identifier (str): the package ID or location of the executable
the user interacted with.
reporting_app (str): the name of the application that reported the user's
interaction. This is the name of a monitoring tool, for example
"ShellActivityMonitor".
active_duration_seconds (int): the number of seconds the user spent
interacting with the program.
"""
DATA_TYPE = 'windows:timeline:user_engaged'
def __init__(self):
"""Initialize event data"""
super(WindowsTimelineUserEngagedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.package_identifier = None
self.reporting_app = None
self.active_duration_seconds = None
class WindowsTimelinePlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Windows 10 Timeline database files.
The Windows 10 Timeline database file is typically stored in:
%APPDATA%\\Local\\ConnectedDevicesPlatform\\L.<username>\\ActivitiesCache.db
"""
NAME = 'windows_timeline'
DATA_FORMAT = 'Windows 10 Timeline SQLite database (ActivitiesCache.db) file'
REQUIRED_STRUCTURE = {
'Activity': frozenset([
'StartTime', 'Payload', 'PackageName', 'Id', 'AppId']),
'Activity_PackageId': frozenset([
'ActivityId'])}
QUERIES = [
(('SELECT StartTime, Payload, PackageName FROM Activity '
'INNER JOIN Activity_PackageId ON Activity.Id = '
'Activity_PackageId.ActivityId WHERE instr(Payload, "UserEngaged") > 0'
' AND Platform = "packageid"'), 'ParseUserEngagedRow'),
(('SELECT StartTime, Payload, AppId FROM Activity '
'WHERE instr(Payload, "UserEngaged") = 0'), 'ParseGenericRow')]
SCHEMAS = [{
'Activity': (
'CREATE TABLE [Activity]([Id] GUID PRIMARY KEY NOT NULL, [AppId] '
'TEXT NOT NULL, [PackageIdHash] TEXT, [AppActivityId] TEXT, '
'[ActivityType] INT NOT NULL, [ActivityStatus] INT NOT NULL, '
'[ParentActivityId] GUID, [Tag] TEXT, [Group] TEXT, [MatchId] TEXT, '
'[LastModifiedTime] DATETIME NOT NULL, [ExpirationTime] DATETIME, '
'[Payload] BLOB, [Priority] INT, [IsLocalOnly] INT, '
'[PlatformDeviceId] TEXT, [CreatedInCloud] DATETIME, [StartTime] '
'DATETIME, [EndTime] DATETIME, [LastModifiedOnClient] DATETIME, '
'[GroupAppActivityId] TEXT, [ClipboardPayload] BLOB, [EnterpriseId] '
'TEXT, [OriginalPayload] BLOB, [OriginalLastModifiedOnClient] '
'DATETIME, [ETag] INT NOT NULL)'),
'ActivityAssetCache': (
'CREATE TABLE [ActivityAssetCache]([ResourceId] INTEGER PRIMARY KEY '
'AUTOINCREMENT NOT NULL, [AppId] TEXT NOT NULL, [AssetHash] TEXT '
'NOT NULL, [TimeToLive] DATETIME NOT NULL, [AssetUri] TEXT, '
'[AssetId] TEXT, [AssetKey] TEXT, [Contents] BLOB)'),
'ActivityOperation': (
'CREATE TABLE [ActivityOperation]([OperationOrder] INTEGER PRIMARY '
'KEY ASC NOT NULL, [Id] GUID NOT NULL, [OperationType] INT NOT '
'NULL, [AppId] TEXT NOT NULL, [PackageIdHash] TEXT, [AppActivityId] '
'TEXT, [ActivityType] INT NOT NULL, [ParentActivityId] GUID, [Tag] '
'TEXT, [Group] TEXT, [MatchId] TEXT, [LastModifiedTime] DATETIME '
'NOT NULL, [ExpirationTime] DATETIME, [Payload] BLOB, [Priority] '
'INT, [CreatedTime] DATETIME, [Attachments] TEXT, '
'[PlatformDeviceId] TEXT, [CreatedInCloud] DATETIME, [StartTime] '
'DATETIME NOT NULL, [EndTime] DATETIME, [LastModifiedOnClient] '
'DATETIME NOT NULL, [CorrelationVector] TEXT, [GroupAppActivityId] '
'TEXT, [ClipboardPayload] BLOB, [EnterpriseId] TEXT, '
'[OriginalPayload] BLOB, [OriginalLastModifiedOnClient] DATETIME, '
'[ETag] INT NOT NULL)'),
'Activity_PackageId': (
'CREATE TABLE [Activity_PackageId]([ActivityId] GUID NOT NULL, '
'[Platform] TEXT NOT NULL, [PackageName] TEXT NOT NULL, '
'[ExpirationTime] DATETIME NOT NULL)'),
'AppSettings': (
'CREATE TABLE [AppSettings]([AppId] TEXT PRIMARY KEY NOT NULL, '
'[SettingsPropertyBag] BLOB, [AppTitle] TEXT, [Logo4141] TEXT)'),
'ManualSequence': (
'CREATE TABLE [ManualSequence]([Key] TEXT PRIMARY KEY NOT NULL, '
'[Value] INT NOT NULL)'),
'Metadata': (
'CREATE TABLE [Metadata]([Key] TEXT PRIMARY KEY NOT NULL, [Value] '
'TEXT)')}]
def ParseGenericRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a generic windows timeline row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = WindowsTimelineGenericEventData()
# Payload is JSON serialized as binary data in a BLOB field, with the text
# encoded as UTF-8.
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
# AppId is JSON stored as unicode text.
appid_entries_string = self._GetRowValue(query_hash, row, 'AppId')
payload = json.loads(payload_json_string)
appid_entries = json.loads(appid_entries_string)
# Attempt to populate the package_identifier field by checking each of
# these fields in the AppId JSON.
package_id_locations = [
'packageId', 'x_exe_path', 'windows_win32', 'windows_universal',
'alternateId']
for location in package_id_locations:
for entry in appid_entries:
if entry['platform'] == location and entry['application'] != '':
event_data.package_identifier = entry['application']
break
if event_data.package_identifier is None:
# package_identifier has been populated and we're done.
break
if 'description' in payload:
event_data.description = payload['description']
else:
event_data.description = ''
if 'appDisplayName' in payload and payload['appDisplayName'] != '':
event_data.application_display_name = payload['appDisplayName']
elif 'displayText' in payload and payload['displayText'] != '':
# Fall back to displayText if appDisplayName isn't available
event_data.application_display_name = payload['displayText']
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseUserEngagedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a timeline row that describes a user interacting with an app.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = WindowsTimelineUserEngagedEventData()
event_data.package_identifier = self._GetRowValue(
query_hash, row, 'PackageName')
# Payload is JSON serialized as binary data in a BLOB field, with the text
# encoded as UTF-8.
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
payload = json.loads(payload_json_string)
if 'reportingApp' in payload:
event_data.reporting_app = payload['reportingApp']
if 'activeDurationSeconds' in payload:
event_data.active_duration_seconds = int(payload['activeDurationSeconds'])
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(WindowsTimelinePlugin)
| apache-2.0 | -387,559,241,829,818,050 | 42.043478 | 80 | 0.679596 | false |
gkoelln/youtube-dl | youtube_dl/extractor/sonyliv.py | 24 | 1529 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import smuggle_url
class SonyLIVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sonyliv\.com/details/[^/]+/(?P<id>\d+)'
_TESTS = [{
'url': "http://www.sonyliv.com/details/episodes/5024612095001/Ep.-1---Achaari-Cheese-Toast---Bachelor's-Delight",
'info_dict': {
'title': "Ep. 1 - Achaari Cheese Toast - Bachelor's Delight",
'id': 'ref:5024612095001',
'ext': 'mp4',
'upload_date': '20170923',
'description': 'md5:7f28509a148d5be9d0782b4d5106410d',
'uploader_id': '5182475815001',
'timestamp': 1506200547,
},
'params': {
'skip_download': True,
},
'add_ie': ['BrightcoveNew'],
}, {
'url': 'http://www.sonyliv.com/details/full%20movie/4951168986001/Sei-Raat-(Bangla)',
'only_matching': True,
}]
# BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4338955589001/default_default/index.html?videoId=%s'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5182475815001/default_default/index.html?videoId=ref:%s'
def _real_extract(self, url):
brightcove_id = self._match_id(url)
return self.url_result(
smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {
'geo_countries': ['IN'],
'referrer': url,
}),
'BrightcoveNew', brightcove_id)
| unlicense | 7,466,026,087,372,639,000 | 37.225 | 121 | 0.584696 | false |
cmajames/py_touhou | pytouhou/games/sample/game.py | 1 | 7672 | # -*- encoding: utf-8 -*-
##
## Copyright (C) 2011 Emmanuel Gil Peyrot <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
from pytouhou.utils.interpolator import Interpolator
from pytouhou.game.game import Game as GameBase
from pytouhou.game.bullettype import BulletType
from pytouhou.game.lasertype import LaserType
from pytouhou.game.itemtype import ItemType
from pytouhou.game.player import Player as PlayerBase
from pytouhou.game.orb import Orb
from pytouhou.game.background import Background
from pytouhou.vm import PythonMainRunner
from . import enemies, shots
class Common:
default_power = [0, 64, 128, 128, 128, 128, 0]
def __init__(self, resource_loader, player_characters, continues, *,
width=384, height=448):
self.width, self.height = width, height
self.etama = resource_loader.get_multi_anm(('etama3.anm', 'etama4.anm'))
self.bullet_types = [BulletType(self.etama[0], 0, 11, 14, 15, 16, hitbox_size=2,
type_id=0),
BulletType(self.etama[0], 1, 12, 17, 18, 19, hitbox_size=3,
type_id=1),
BulletType(self.etama[0], 2, 12, 17, 18, 19, hitbox_size=2,
type_id=2),
BulletType(self.etama[0], 3, 12, 17, 18, 19, hitbox_size=3,
type_id=3),
BulletType(self.etama[0], 4, 12, 17, 18, 19, hitbox_size=2.5,
type_id=4),
BulletType(self.etama[0], 5, 12, 17, 18, 19, hitbox_size=2,
type_id=5),
BulletType(self.etama[0], 6, 13, 20, 20, 20, hitbox_size=8,
launch_anim_offsets=(0, 1, 1, 2, 2, 3, 4, 0),
type_id=6),
BulletType(self.etama[0], 7, 13, 20, 20, 20, hitbox_size=5.5,
launch_anim_offsets=(1, 1, 1, 1),
type_id=7),
BulletType(self.etama[0], 8, 13, 20, 20, 20, hitbox_size=4.5,
launch_anim_offsets=(0, 1, 1, 2, 2, 3, 4, 0),
type_id=8),
BulletType(self.etama[1], 0, 1, 2, 2, 2, hitbox_size=16,
launch_anim_offsets=(0, 1, 2, 3),
type_id=9)]
self.laser_types = [LaserType(self.etama[0], 9),
LaserType(self.etama[0], 10)]
self.item_types = [ItemType(self.etama[0], 0, 7), #Power
ItemType(self.etama[0], 1, 8), #Point
ItemType(self.etama[0], 2, 9), #Big power
ItemType(self.etama[0], 3, 10), #Bomb
ItemType(self.etama[0], 4, 11), #Full power
ItemType(self.etama[0], 5, 12), #1up
ItemType(self.etama[0], 6, 13)] #Star
self.enemy_face = [('face03a.anm', 'face03b.anm'),
('face05a.anm',),
('face06a.anm', 'face06b.anm'),
('face08a.anm', 'face08b.anm'),
('face09a.anm', 'face09b.anm'),
('face09b.anm', 'face10a.anm', 'face10b.anm'),
('face08a.anm', 'face12a.anm', 'face12b.anm', 'face12c.anm')]
sample_characters = shots.characters
self.first_character = player_characters[0] // 2
self.player_anms = {}
self.players = [None] * len(player_characters)
for i, player_character in enumerate(player_characters):
character = player_character // 2
if character not in self.player_anms:
face = resource_loader.get_multi_anm(('face0%da.anm' % character,
'face0%db.anm' % character,
'face0%dc.anm' % character))
anm = resource_loader.get_single_anm('player0%d.anm' % character)
self.player_anms[character] = (anm, face)
self.players[i] = Player(i, self.player_anms[character][0],
sample_characters[player_character],
character, continues)
class Game(GameBase):
def __init__(self, resource_loader, stage, rank, difficulty,
common, prng, hints=None, friendly_fire=True,
nb_bullets_max=640):
self.etama = common.etama #XXX
try:
self.enm_anm = resource_loader.get_multi_anm(('stg%denm.anm' % stage,
'stg%denm2.anm' % stage))
except KeyError:
self.enm_anm = resource_loader.get_anm('stg%denm.anm' % stage)
self.ecl_runners = [PythonMainRunner(getattr(enemies, 'stage%d' % stage), self)]
self.spellcard_effect_anm = resource_loader.get_single_anm('eff0%d.anm' % stage)
self.msg = resource_loader.get_msg('msg%d.dat' % stage)
msg_anm = [common.player_anms[common.first_character][1], #TODO: does it break bomb face of non-first player?
resource_loader.get_multi_anm(common.enemy_face[stage - 1])]
self.msg_anm = [[], []]
for i, anms in enumerate(msg_anm):
for anm in anms:
for sprite in anm.sprites.values():
self.msg_anm[i].append((anm, sprite))
for player in common.players:
player._game = self
if player.power < 0:
player.power = common.default_power[stage - 1]
# Load stage data
self.std = resource_loader.get_stage('stage%d.std' % stage)
background_anm = resource_loader.get_single_anm('stg%dbg.anm' % stage)
self.background = Background(self.std, background_anm)
common.interface.start_stage(self, stage)
GameBase.__init__(self, common.players, stage, rank, difficulty,
common.bullet_types, common.laser_types,
common.item_types, nb_bullets_max, common.width,
common.height, prng, common.interface, hints,
friendly_fire)
class Player(PlayerBase):
def __init__(self, number, anm, shts, character, continues):
self.sht = shts[0]
self.focused_sht = shts[1]
PlayerBase.__init__(self, number, anm, character, continues, power=-1)
self.orbs = [Orb(anm, 128, self),
Orb(anm, 129, self)]
self.orbs[0].offset_x = -24
self.orbs[1].offset_x = 24
def start_focusing(self):
self.focused = True
def stop_focusing(self):
self.focused = False
@property
def objects(self):
return [self] + self.orbs
def update(self, keystate):
PlayerBase.update(self, keystate)
for orb in self.orbs:
orb.update()
| gpl-3.0 | 5,043,808,531,601,340,000 | 43.091954 | 117 | 0.512774 | false |
cscanlin/Super-Simple-VLOOKUP-in-Python | python_vlookup/python_vlookup.py | 1 | 3133 | from builtins import input
from collections import Iterable
import csv
# Returns a list of lists, where each row of the csv is a sublist
def get_csv_data(csv_file_path):
with open(csv_file_path, 'r') as csvfile:
for row in csv.reader(csvfile):
yield row
# Creates a dictionary with the first list item as the key,
# and the value based on the column index input
def create_column_dict(csv_rows, index, case_sensitive=False):
if case_sensitive:
column_dict = {row[0]: row[index] for row in csv_rows}
else:
column_dict = {row[0].lower(): row[index] for row in csv_rows}
return column_dict
# faster_vlookup allows you to call each function individually.
# You can use the two helper functions to create your array and dictionary only once,
# and then call faster_vlookup which is a much quicker
# Useful for using in loops.
def faster_vlookup(item, column_dict, debug=None, error_value='#N/A',
case_sensitive=False, csv_file_path=None, col_index_num=None):
if not case_sensitive:
item = lower_case_item(item)
try:
if isinstance(item, Iterable) and not isinstance(item, str):
return [column_dict[str(entry)] for entry in item]
else:
return column_dict[str(item)]
except KeyError as e:
if debug == 'fix' and csv_file_path:
entry = str(e)[1:-1]
debug_lookup(entry, csv_file_path)
return vlookup(item, csv_file_path, col_index_num, debug='fix')
if debug == 'skip':
if isinstance(item, Iterable) and not isinstance(item, str):
return [column_dict[str(entry)] if str(entry) in column_dict else error_value for entry in item]
else:
return error_value
else:
raise
# Returns the Looked up value as a list or string, uses excel column index numbering (no 0 column).
# If debug is on, the user is prompted to enter values for missing entries
def vlookup(item, csv_file_path, col_index_num, debug=None, error_value='#N/A', case_sensitive=False):
csv_rows = get_csv_data(csv_file_path)
column_dict = create_column_dict(csv_rows, col_index_num-1, case_sensitive)
return faster_vlookup(
item, column_dict, debug, error_value,
case_sensitive, csv_file_path, col_index_num,
)
def debug_lookup(entry, csv_file_path):
print('LOOKUP ENTRY MISSING')
new_entry = [entry]
with open(csv_file_path, 'r') as csvfile:
reader = csv.reader(csvfile)
for column_heading in next(reader)[1:]:
print('Enter {0} for {1}:'.format(column_heading, entry))
column_entry = input()
new_entry.append(column_entry)
with open(csv_file_path, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(new_entry)
def lower_case_item(item):
if isinstance(item, Iterable) and not isinstance(item, str):
item = [str(entry).lower() for entry in item]
return item
else:
return str(item).lower()
| mit | 7,505,803,180,778,867,000 | 38.74026 | 112 | 0.629429 | false |
vgrem/Office365-REST-Python-Client | office365/sharepoint/files/file.py | 1 | 18032 | from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.actions.download_file import DownloadFileQuery
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.directory.user import User
from office365.sharepoint.files.file_version_collection import FileVersionCollection
from office365.sharepoint.listitems.listitem import ListItem
from office365.sharepoint.webparts.limited_webpart_manager import LimitedWebPartManager
from office365.sharepoint.types.resource_path import ResourcePath as SPResPath
class AbstractFile(BaseEntity):
def read(self):
"""Immediately read content of file"""
if not self.is_property_available("ServerRelativeUrl"):
raise ValueError
response = File.open_binary(
self.context, self.properties["ServerRelativeUrl"])
return response.content
def write(self, content):
"""Immediately writes content of file"""
if not self.is_property_available("ServerRelativeUrl"):
raise ValueError
response = File.save_binary(
self.context, self.properties["ServerRelativeUrl"], content)
return response
class File(AbstractFile):
"""Represents a file in a SharePoint Web site that can be a Web Part Page, an item in a document library,
or a file in a folder."""
@staticmethod
def from_url(abs_url):
"""
Retrieves a File from absolute url
:type abs_url: str
"""
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext.from_url(abs_url)
file_relative_url = abs_url.replace(ctx.base_url, "")
file = ctx.web.get_file_by_server_relative_url(file_relative_url)
return file
def recycle(self):
"""Moves the file to the Recycle Bin and returns the identifier of the new Recycle Bin item."""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "Recycle", None, None, None, result)
self.context.add_query(qry)
return result
def approve(self, comment):
"""Approves the file submitted for content approval with the specified comment.
:type comment: str
"""
qry = ServiceOperationQuery(self,
"approve",
{
"comment": comment
})
self.context.add_query(qry)
return self
def deny(self, comment):
"""Denies approval for a file that was submitted for content approval.
:type comment: str
"""
qry = ServiceOperationQuery(self,
"deny",
{
"comment": comment
})
self.context.add_query(qry)
return self
def copyto(self, new_relative_url, overwrite):
"""Copies the file to the destination URL.
:type new_relative_url: str
:type overwrite: bool
"""
qry = ServiceOperationQuery(self,
"CopyTo",
{
"strNewUrl": new_relative_url,
"boverwrite": overwrite
},
None)
self.context.add_query(qry)
return self
def copyto_using_path(self, decoded_url, overwrite):
"""Copies the file to the destination URL.
:type decoded_url: str
:type overwrite: bool
"""
qry = ServiceOperationQuery(self,
"CopyToUsingPath",
{
"DecodedUrl": decoded_url,
"bOverWrite": overwrite
},
None)
self.context.add_query(qry)
return self
def moveto(self, new_relative_url, flag):
"""Moves the file to the specified destination URL.
:type new_relative_url: str
:type flag: int
"""
qry = ServiceOperationQuery(self,
"moveto",
{
"newurl": new_relative_url,
"flags": flag
},
None)
self.context.add_query(qry)
return self
def publish(self, comment):
"""Submits the file for content approval with the specified comment.
:type comment: str
"""
qry = ServiceOperationQuery(self,
"publish",
{
"comment": comment,
}
)
self.context.add_query(qry)
def unpublish(self, comment):
"""Removes the file from content approval or unpublish a major version.
:type comment: str
"""
qry = ServiceOperationQuery(self,
"unpublish",
{
"comment": comment,
}
)
self.context.add_query(qry)
return self
def checkout(self):
"""Checks out the file from a document library based on the check-out type."""
qry = ServiceOperationQuery(self,
"checkout",
)
self.context.add_query(qry)
return self
def checkin(self, comment, checkin_type):
"""
Checks the file in to a document library based on the check-in type.
:param comment: comment to the new version of the file
:param checkin_type: 0 (minor), or 1 (major) or 2 (overwrite)
For more information on checkin types, please see
https://docs.microsoft.com/en-us/previous-versions/office/sharepoint-csom/ee542953(v%3Doffice.15)
:type checkin_type: int
"""
qry = ServiceOperationQuery(self,
"checkin",
{
"comment": comment,
"checkInType": checkin_type
}
)
self.context.add_query(qry)
return self
def undocheckout(self):
"""Reverts an existing checkout for the file."""
qry = ServiceOperationQuery(self,
"undocheckout"
)
self.context.add_query(qry)
return self
def get_limited_webpart_manager(self, scope):
"""Specifies the control set used to access, modify, or add Web Parts associated with this Web Part Page and
view. """
return LimitedWebPartManager(self.context,
ResourcePathServiceOperation(
"getlimitedwebpartmanager",
[scope],
self.resource_path
))
def start_upload(self, upload_id, content):
"""Starts a new chunk upload session and uploads the first fragment.
:param bytes content: File content
:param str upload_id: Upload session id
"""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self,
"startUpload",
{
"uploadID": upload_id
},
content,
None,
result
)
self.context.add_query(qry)
return result
def continue_upload(self, upload_id, file_offset, content):
"""
Continues the chunk upload session with an additional fragment. The current file content is not changed.
:param str upload_id: Upload session id
:param int file_offset: File offset
:param bytes content: File content
"""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self,
"continueUpload",
{
"uploadID": upload_id,
"fileOffset": file_offset,
},
content,
None,
result
)
self.context.add_query(qry)
return result
def finish_upload(self, upload_id, file_offset, content):
"""Uploads the last file fragment and commits the file. The current file content is changed when this method
completes.
:param str upload_id: Upload session id
:param int file_offset: File offset
:param bytes content: File content
"""
qry = ServiceOperationQuery(self,
"finishUpload",
{
"uploadID": upload_id,
"fileOffset": file_offset,
},
content,
None,
self
)
self.context.add_query(qry)
return self
@staticmethod
def save_binary(ctx, server_relative_url, content):
"""Uploads a file
:type ctx: ClientContext
:type server_relative_url: str
:type content: str
"""
url = r"{0}web/getFileByServerRelativePath(DecodedUrl='{1}')/\$value".format(
ctx.service_root_url(), server_relative_url)
request = RequestOptions(url)
request.method = HttpMethod.Post
request.set_header('X-HTTP-Method', 'PUT')
request.data = content
response = ctx.execute_request_direct(request)
return response
@staticmethod
def open_binary(ctx, server_relative_url):
"""
Returns the file object located at the specified server-relative URL.
:type ctx: ClientContext
:type server_relative_url: str
:return Response
"""
url = r"{0}web/getFileByServerRelativePath(DecodedUrl='{1}')/\$value".format(ctx.service_root_url(),
server_relative_url)
request = RequestOptions(url)
request.method = HttpMethod.Get
response = ctx.execute_request_direct(request)
return response
def download(self, file_object):
"""Download a file content
:type file_object: typing.IO
"""
def _download_inner():
qry = DownloadFileQuery(self.context.web, self.serverRelativeUrl, file_object)
self.context.add_query(qry)
self.ensure_property("ServerRelativeUrl", _download_inner)
return self
def download_session(self, file_object, chunk_downloaded=None, chunk_size=1024 * 1024):
"""
:type file_object: typing.IO
:type chunk_downloaded: (int)->None or None
:type chunk_size: int
"""
def _download_inner():
request = RequestOptions(
r"{0}web/getFileByServerRelativeUrl('{1}')/\$value".format(self.context.service_root_url(),
self.serverRelativeUrl))
request.stream = True
response = self.context.execute_request_direct(request)
response.raise_for_status()
bytes_read = 0
for chunk in response.iter_content(chunk_size=chunk_size):
bytes_read += len(chunk)
if callable(chunk_downloaded):
chunk_downloaded(bytes_read)
file_object.write(chunk)
self.ensure_property("ServerRelativeUrl", _download_inner)
return self
@property
def listItemAllFields(self):
"""Gets a value that specifies the list item fields values for the list item corresponding to the file."""
return self.properties.get('ListItemAllFields',
ListItem(self.context, ResourcePath("listItemAllFields", self.resource_path)))
@property
def versions(self):
"""Gets a value that returns a collection of file version objects that represent the versions of the file."""
return self.properties.get('Versions',
FileVersionCollection(self.context, ResourcePath("versions", self.resource_path)))
@property
def modified_by(self):
"""
Gets a value that returns the user who last modified the file.
:rtype: office365.sharepoint.directory.user.User or None
"""
return self.properties.get("ModifiedBy", User(self.context, ResourcePath("ModifiedBy", self.resource_path)))
@property
def locked_by_user(self):
"""
Gets a value that returns the user that owns the current lock on the file.
:rtype: office365.sharepoint.directory.user.User or None
"""
return self.properties.get("LockedByUser", User(self.context, ResourcePath("LockedByUser", self.resource_path)))
@property
def serverRelativeUrl(self):
"""Gets the relative URL of the file based on the URL for the server.
:rtype: str or None
"""
return self.properties.get("ServerRelativeUrl", None)
@property
def server_relative_path(self):
"""Gets the server-relative Path of the list folder.
:rtype: SPResPath or None
"""
return self.properties.get("ServerRelativePath", SPResPath(None))
@property
def length(self):
"""Gets the file size.
:rtype: int or None
"""
if self.is_property_available('Length'):
return int(self.properties["Length"])
else:
return None
@property
def exists(self):
"""Specifies whether the file exists.
:rtype: bool or None
"""
return self.properties.get("Exists", None)
@property
def name(self):
"""Specifies the file name including the extension.
It MUST NOT be NULL. Its length MUST be equal to or less than 260.
:rtype: str or None
"""
return self.properties.get("Name", None)
@property
def list_id(self):
"""Gets the GUID that identifies the List containing the file.
:rtype: str or None
"""
return self.properties.get("ListId", None)
@property
def site_id(self):
"""Gets the GUID that identifies the site collection containing the file.
:rtype: str or None
"""
return self.properties.get("SiteId", None)
@property
def web_id(self):
"""Gets the GUID for the site containing the file.
:rtype: str or None
"""
return self.properties.get("WebId", None)
@property
def time_created(self):
"""Gets a value that specifies when the file was created.
:rtype: str or None
"""
return self.properties.get("TimeCreated", None)
@property
def time_last_modified(self):
"""Specifies when the file was last modified.
:rtype: str or None
"""
return self.properties.get("TimeLastModified", None)
@property
def minor_version(self):
"""
Gets a value that specifies the minor version of the file.
:rtype: int or None
"""
return self.properties.get("MinorVersion", None)
@property
def major_version(self):
"""
Gets a value that specifies the major version of the file.
:rtype: int or None
"""
return self.properties.get("MajorVersion", None)
@property
def unique_id(self):
"""
Gets a value that specifies the a file unique identifier
:rtype: str or None
"""
return self.properties.get("UniqueId", None)
def set_property(self, name, value, persist_changes=True):
super(File, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if self._resource_path is None:
if name == "ServerRelativeUrl":
self._resource_path = ResourcePathServiceOperation(
"GetFileByServerRelativeUrl", [value], ResourcePath("Web"))
elif name == "ServerRelativePath":
self._resource_path = ResourcePathServiceOperation("getFolderByServerRelativePath", [value],
ResourcePath("Web"))
elif name == "UniqueId":
self._resource_path = ResourcePathServiceOperation(
"GetFileById", [value], ResourcePath("Web"))
return self
| mit | 5,321,818,484,125,837,000 | 35.95082 | 120 | 0.52673 | false |
cuongnv23/ansible | lib/ansible/modules/network/avi/avi_alertemailconfig.py | 27 | 3729 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_alertemailconfig
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of AlertEmailConfig Avi RESTful Object
description:
- This module is used to configure AlertEmailConfig object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cc_emails:
description:
- Alerts are copied to the comma separated list of email recipients.
description:
description:
- User defined description for the object.
name:
description:
- A user-friendly name of the email notification service.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
to_emails:
description:
- Alerts are sent to the comma separated list of email recipients.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create AlertEmailConfig object
avi_alertemailconfig:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_alertemailconfig
"""
RETURN = '''
obj:
description: AlertEmailConfig (api/alertemailconfig) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cc_emails=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
to_emails=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'alertemailconfig',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | 5,406,416,330,351,382,000 | 29.818182 | 81 | 0.648163 | false |
OpenSPA/dvbapp | lib/python/Plugins/Extensions/DVDBurn/DVDTitle.py | 29 | 6521 | from Components.config import config, ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection, getConfigListEntry, ConfigSequence, ConfigYesNo
import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class DVDTitle:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.VideoType = -1
self.project = project
self.length = 0
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
from TitleProperties import languageChoices
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
language = audiotrack.language.getValue()
if languageChoices.langdict.has_key(language):
trackstring += ' (' + languageChoices.langdict[language] + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps | gpl-2.0 | -1,764,415,652,769,457,700 | 37.364706 | 162 | 0.701273 | false |
tfroehlich82/erpnext | erpnext/accounts/doctype/purchase_invoice/test_purchase_invoice.py | 3 | 24784 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe, erpnext
import frappe.model
from frappe.utils import cint, flt, today, nowdate, add_days
import frappe.defaults
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory, \
test_records as pr_test_records
from erpnext.controllers.accounts_controller import get_payment_terms
from erpnext.exceptions import InvalidCurrency
from erpnext.stock.doctype.stock_entry.test_stock_entry import get_qty_after_transaction
from erpnext.accounts.doctype.account.test_account import get_inventory_account
test_dependencies = ["Item", "Cost Center", "Payment Term", "Payment Terms Template"]
test_ignore = ["Serial No"]
class TestPurchaseInvoice(unittest.TestCase):
def setUp(self):
unlink_payment_on_cancel_of_invoice()
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
def tearDown(self):
unlink_payment_on_cancel_of_invoice(0)
def test_gl_entries_without_perpetual_inventory(self):
frappe.db.set_value("Company", "_Test Company", "round_off_account", "Round Off - _TC")
wrapper = frappe.copy_doc(test_records[0])
set_perpetual_inventory(0, wrapper.company)
self.assertTrue(not cint(erpnext.is_perpetual_inventory_enabled(wrapper.company)))
wrapper.insert()
wrapper.submit()
wrapper.load_from_db()
dl = wrapper
expected_gl_entries = {
"_Test Payable - _TC": [0, 1512.0],
"_Test Account Cost for Goods Sold - _TC": [1250, 0],
"_Test Account Shipping Charges - _TC": [100, 0],
"_Test Account Excise Duty - _TC": [140, 0],
"_Test Account Education Cess - _TC": [2.8, 0],
"_Test Account S&H Education Cess - _TC": [1.4, 0],
"_Test Account CST - _TC": [29.88, 0],
"_Test Account VAT - _TC": [156.25, 0],
"_Test Account Discount - _TC": [0, 168.03],
"Round Off - _TC": [0, 0.3]
}
gl_entries = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type = 'Purchase Invoice' and voucher_no = %s""", dl.name, as_dict=1)
for d in gl_entries:
self.assertEqual([d.debit, d.credit], expected_gl_entries.get(d.account))
def test_gl_entries_with_perpetual_inventory(self):
pi = frappe.copy_doc(test_records[1])
set_perpetual_inventory(1, pi.company)
self.assertTrue(cint(erpnext.is_perpetual_inventory_enabled(pi.company)), 1)
pi.insert()
pi.submit()
self.check_gle_for_pi(pi.name)
set_perpetual_inventory(0, pi.company)
def test_terms_added_after_save(self):
pi = frappe.copy_doc(test_records[1])
pi.insert()
self.assertTrue(pi.payment_schedule)
self.assertEqual(pi.payment_schedule[0].due_date, pi.due_date)
def test_payment_entry_unlink_against_purchase_invoice(self):
from erpnext.accounts.doctype.payment_entry.test_payment_entry import get_payment_entry
unlink_payment_on_cancel_of_invoice(0)
pi_doc = make_purchase_invoice()
pe = get_payment_entry("Purchase Invoice", pi_doc.name, bank_account="_Test Bank - _TC")
pe.reference_no = "1"
pe.reference_date = nowdate()
pe.paid_from_account_currency = pi_doc.currency
pe.paid_to_account_currency = pi_doc.currency
pe.source_exchange_rate = 1
pe.target_exchange_rate = 1
pe.paid_amount = pi_doc.grand_total
pe.save(ignore_permissions=True)
pe.submit()
pi_doc = frappe.get_doc('Purchase Invoice', pi_doc.name)
self.assertRaises(frappe.LinkExistsError, pi_doc.cancel)
def test_gl_entries_with_perpetual_inventory_against_pr(self):
pr = frappe.copy_doc(pr_test_records[0])
set_perpetual_inventory(1, pr.company)
self.assertTrue(cint(erpnext.is_perpetual_inventory_enabled(pr.company)), 1)
pr.submit()
pi = frappe.copy_doc(test_records[1])
for d in pi.get("items"):
d.purchase_receipt = pr.name
pi.insert()
pi.submit()
self.check_gle_for_pi(pi.name)
set_perpetual_inventory(0, pr.company)
def check_gle_for_pi(self, pi):
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi, as_dict=1)
self.assertTrue(gl_entries)
expected_values = dict((d[0], d) for d in [
["_Test Payable - _TC", 0, 720],
["Stock Received But Not Billed - _TC", 500.0, 0],
["_Test Account Shipping Charges - _TC", 100.0, 0],
["_Test Account VAT - _TC", 120.0, 0],
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[gle.account][0], gle.account)
self.assertEquals(expected_values[gle.account][1], gle.debit)
self.assertEquals(expected_values[gle.account][2], gle.credit)
def test_purchase_invoice_change_naming_series(self):
pi = frappe.copy_doc(test_records[1])
pi.insert()
pi.naming_series = 'TEST-'
self.assertRaises(frappe.CannotChangeConstantError, pi.save)
pi = frappe.copy_doc(test_records[0])
pi.insert()
pi.naming_series = 'TEST-'
self.assertRaises(frappe.CannotChangeConstantError, pi.save)
def test_gl_entries_with_aia_for_non_stock_items(self):
pi = frappe.copy_doc(test_records[1])
set_perpetual_inventory(1, pi.company)
self.assertTrue(cint(erpnext.is_perpetual_inventory_enabled(pi.company)), 1)
pi.get("items")[0].item_code = "_Test Non Stock Item"
pi.get("items")[0].expense_account = "_Test Account Cost for Goods Sold - _TC"
pi.get("taxes").pop(0)
pi.get("taxes").pop(1)
pi.insert()
pi.submit()
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = sorted([
["_Test Payable - _TC", 0, 620],
["_Test Account Cost for Goods Sold - _TC", 500.0, 0],
["_Test Account VAT - _TC", 120.0, 0],
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[i][0], gle.account)
self.assertEquals(expected_values[i][1], gle.debit)
self.assertEquals(expected_values[i][2], gle.credit)
set_perpetual_inventory(0, pi.company)
def test_purchase_invoice_calculation(self):
pi = frappe.copy_doc(test_records[0])
pi.insert()
pi.load_from_db()
expected_values = [
["_Test Item Home Desktop 100", 90, 59],
["_Test Item Home Desktop 200", 135, 177]
]
for i, item in enumerate(pi.get("items")):
self.assertEqual(item.item_code, expected_values[i][0])
self.assertEqual(item.item_tax_amount, expected_values[i][1])
self.assertEqual(item.valuation_rate, expected_values[i][2])
self.assertEqual(pi.base_net_total, 1250)
# tax amounts
expected_values = [
["_Test Account Shipping Charges - _TC", 100, 1350],
["_Test Account Customs Duty - _TC", 125, 1350],
["_Test Account Excise Duty - _TC", 140, 1490],
["_Test Account Education Cess - _TC", 2.8, 1492.8],
["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
["_Test Account CST - _TC", 29.88, 1524.08],
["_Test Account VAT - _TC", 156.25, 1680.33],
["_Test Account Discount - _TC", 168.03, 1512.30],
]
for i, tax in enumerate(pi.get("taxes")):
self.assertEqual(tax.account_head, expected_values[i][0])
self.assertEqual(tax.tax_amount, expected_values[i][1])
self.assertEqual(tax.total, expected_values[i][2])
def test_purchase_invoice_with_subcontracted_item(self):
wrapper = frappe.copy_doc(test_records[0])
wrapper.get("items")[0].item_code = "_Test FG Item"
wrapper.insert()
wrapper.load_from_db()
expected_values = [
["_Test FG Item", 90, 59],
["_Test Item Home Desktop 200", 135, 177]
]
for i, item in enumerate(wrapper.get("items")):
self.assertEqual(item.item_code, expected_values[i][0])
self.assertEqual(item.item_tax_amount, expected_values[i][1])
self.assertEqual(item.valuation_rate, expected_values[i][2])
self.assertEqual(wrapper.base_net_total, 1250)
# tax amounts
expected_values = [
["_Test Account Shipping Charges - _TC", 100, 1350],
["_Test Account Customs Duty - _TC", 125, 1350],
["_Test Account Excise Duty - _TC", 140, 1490],
["_Test Account Education Cess - _TC", 2.8, 1492.8],
["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
["_Test Account CST - _TC", 29.88, 1524.08],
["_Test Account VAT - _TC", 156.25, 1680.33],
["_Test Account Discount - _TC", 168.03, 1512.30],
]
for i, tax in enumerate(wrapper.get("taxes")):
self.assertEqual(tax.account_head, expected_values[i][0])
self.assertEqual(tax.tax_amount, expected_values[i][1])
self.assertEqual(tax.total, expected_values[i][2])
def test_purchase_invoice_with_advance(self):
from erpnext.accounts.doctype.journal_entry.test_journal_entry \
import test_records as jv_test_records
jv = frappe.copy_doc(jv_test_records[1])
jv.insert()
jv.submit()
pi = frappe.copy_doc(test_records[0])
pi.disable_rounded_total = 1
pi.append("advances", {
"reference_type": "Journal Entry",
"reference_name": jv.name,
"reference_row": jv.get("accounts")[0].name,
"advance_amount": 400,
"allocated_amount": 300,
"remarks": jv.remark
})
pi.insert()
self.assertEqual(pi.outstanding_amount, 1212.30)
pi.disable_rounded_total = 0
pi.get("payment_schedule")[0].payment_amount = 1512.0
pi.save()
self.assertEqual(pi.outstanding_amount, 1212.0)
pi.submit()
pi.load_from_db()
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Purchase Invoice'
and reference_name=%s and debit_in_account_currency=300""", pi.name))
pi.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Purchase Invoice' and reference_name=%s""", pi.name))
def test_invoice_with_advance_and_multi_payment_terms(self):
from erpnext.accounts.doctype.journal_entry.test_journal_entry \
import test_records as jv_test_records
jv = frappe.copy_doc(jv_test_records[1])
jv.insert()
jv.submit()
pi = frappe.copy_doc(test_records[0])
pi.disable_rounded_total = 1
pi.append("advances", {
"reference_type": "Journal Entry",
"reference_name": jv.name,
"reference_row": jv.get("accounts")[0].name,
"advance_amount": 400,
"allocated_amount": 300,
"remarks": jv.remark
})
pi.insert()
pi.update({
"payment_schedule": get_payment_terms("_Test Payment Term Template",
pi.posting_date, pi.grand_total)
})
pi.save()
pi.submit()
self.assertEqual(pi.payment_schedule[0].payment_amount, 756.15)
self.assertEqual(pi.payment_schedule[0].due_date, pi.posting_date)
self.assertEqual(pi.payment_schedule[1].payment_amount, 756.15)
self.assertEqual(pi.payment_schedule[1].due_date, add_days(pi.posting_date, 30))
pi.load_from_db()
self.assertTrue(
frappe.db.sql(
"select name from `tabJournal Entry Account` where reference_type='Purchase Invoice' and "
"reference_name=%s and debit_in_account_currency=300", pi.name)
)
self.assertEqual(pi.outstanding_amount, 1212.30)
pi.cancel()
self.assertFalse(
frappe.db.sql(
"select name from `tabJournal Entry Account` where reference_type='Purchase Invoice' and "
"reference_name=%s", pi.name)
)
def test_total_purchase_cost_for_project(self):
existing_purchase_cost = frappe.db.sql("""select sum(base_net_amount)
from `tabPurchase Invoice Item` where project = '_Test Project' and docstatus=1""")
existing_purchase_cost = existing_purchase_cost and existing_purchase_cost[0][0] or 0
pi = make_purchase_invoice(currency="USD", conversion_rate=60, project="_Test Project")
self.assertEqual(frappe.db.get_value("Project", "_Test Project", "total_purchase_cost"),
existing_purchase_cost + 15000)
pi1 = make_purchase_invoice(qty=10, project="_Test Project")
self.assertEqual(frappe.db.get_value("Project", "_Test Project", "total_purchase_cost"),
existing_purchase_cost + 15500)
pi1.cancel()
self.assertEqual(frappe.db.get_value("Project", "_Test Project", "total_purchase_cost"),
existing_purchase_cost + 15000)
pi.cancel()
self.assertEqual(frappe.db.get_value("Project", "_Test Project", "total_purchase_cost"), existing_purchase_cost)
def test_return_purchase_invoice(self):
set_perpetual_inventory()
pi = make_purchase_invoice()
return_pi = make_purchase_invoice(is_return=1, return_against=pi.name, qty=-2)
# check gl entries for return
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account desc""", ("Purchase Invoice", return_pi.name), as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"Creditors - _TC": [100.0, 0.0],
"Stock Received But Not Billed - _TC": [0.0, 100.0],
}
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.debit)
self.assertEquals(expected_values[gle.account][1], gle.credit)
set_perpetual_inventory(0)
def test_multi_currency_gle(self):
set_perpetual_inventory(0)
pi = make_purchase_invoice(supplier="_Test Supplier USD", credit_to="_Test Payable USD - _TC",
currency="USD", conversion_rate=50)
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"_Test Payable USD - _TC": {
"account_currency": "USD",
"debit": 0,
"debit_in_account_currency": 0,
"credit": 12500,
"credit_in_account_currency": 250
},
"_Test Account Cost for Goods Sold - _TC": {
"account_currency": "INR",
"debit": 12500,
"debit_in_account_currency": 12500,
"credit": 0,
"credit_in_account_currency": 0
}
}
for field in ("account_currency", "debit", "debit_in_account_currency", "credit", "credit_in_account_currency"):
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[gle.account][field], gle[field])
# Check for valid currency
pi1 = make_purchase_invoice(supplier="_Test Supplier USD", credit_to="_Test Payable USD - _TC",
do_not_save=True)
self.assertRaises(InvalidCurrency, pi1.save)
# cancel
pi.cancel()
gle = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Sales Invoice' and voucher_no=%s""", pi.name)
self.assertFalse(gle)
def test_purchase_invoice_update_stock_gl_entry_with_perpetual_inventory(self):
set_perpetual_inventory()
pi = make_purchase_invoice(update_stock=1, posting_date=frappe.utils.nowdate(),
posting_time=frappe.utils.nowtime())
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
stock_in_hand_account = get_inventory_account(pi.company, pi.get("items")[0].warehouse)
expected_gl_entries = dict((d[0], d) for d in [
[pi.credit_to, 0.0, 250.0],
[stock_in_hand_account, 250.0, 0.0]
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_gl_entries[gle.account][0], gle.account)
self.assertEquals(expected_gl_entries[gle.account][1], gle.debit)
self.assertEquals(expected_gl_entries[gle.account][2], gle.credit)
def test_purchase_invoice_for_is_paid_and_update_stock_gl_entry_with_perpetual_inventory(self):
set_perpetual_inventory()
pi = make_purchase_invoice(update_stock=1, posting_date=frappe.utils.nowdate(),
posting_time=frappe.utils.nowtime(), cash_bank_account="Cash - _TC", is_paid=1)
gl_entries = frappe.db.sql("""select account, account_currency, sum(debit) as debit,
sum(credit) as credit, debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
group by account, voucher_no order by account asc;""", pi.name, as_dict=1)
stock_in_hand_account = get_inventory_account(pi.company, pi.get("items")[0].warehouse)
self.assertTrue(gl_entries)
expected_gl_entries = dict((d[0], d) for d in [
[pi.credit_to, 250.0, 250.0],
[stock_in_hand_account, 250.0, 0.0],
["Cash - _TC", 0.0, 250.0]
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_gl_entries[gle.account][0], gle.account)
self.assertEquals(expected_gl_entries[gle.account][1], gle.debit)
self.assertEquals(expected_gl_entries[gle.account][2], gle.credit)
def test_auto_batch(self):
item_code = frappe.db.get_value('Item',
{'has_batch_no': 1, 'create_new_batch':1}, 'name')
if not item_code:
doc = frappe.get_doc({
'doctype': 'Item',
'is_stock_item': 1,
'item_code': 'test batch item',
'item_group': 'Products',
'has_batch_no': 1,
'create_new_batch': 1
}).insert(ignore_permissions=True)
item_code = doc.name
pi = make_purchase_invoice(update_stock=1, posting_date=frappe.utils.nowdate(),
posting_time=frappe.utils.nowtime(), item_code=item_code)
self.assertTrue(frappe.db.get_value('Batch',
{'item': item_code, 'reference_name': pi.name}))
def test_update_stock_and_purchase_return(self):
actual_qty_0 = get_qty_after_transaction()
pi = make_purchase_invoice(update_stock=1, posting_date=frappe.utils.nowdate(),
posting_time=frappe.utils.nowtime())
actual_qty_1 = get_qty_after_transaction()
self.assertEquals(actual_qty_0 + 5, actual_qty_1)
# return entry
pi1 = make_purchase_invoice(is_return=1, return_against=pi.name, qty=-2, rate=50, update_stock=1)
actual_qty_2 = get_qty_after_transaction()
self.assertEquals(actual_qty_1 - 2, actual_qty_2)
pi1.cancel()
self.assertEquals(actual_qty_1, get_qty_after_transaction())
pi.reload()
pi.cancel()
self.assertEquals(actual_qty_0, get_qty_after_transaction())
def test_subcontracting_via_purchase_invoice(self):
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
make_stock_entry(item_code="_Test Item", target="_Test Warehouse 1 - _TC", qty=100, basic_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse 1 - _TC",
qty=100, basic_rate=100)
pi = make_purchase_invoice(item_code="_Test FG Item", qty=10, rate=500,
update_stock=1, is_subcontracted="Yes")
self.assertEquals(len(pi.get("supplied_items")), 2)
rm_supp_cost = sum([d.amount for d in pi.get("supplied_items")])
self.assertEquals(pi.get("items")[0].rm_supp_cost, flt(rm_supp_cost, 2))
def test_rejected_serial_no(self):
pi = make_purchase_invoice(item_code="_Test Serialized Item With Series", received_qty=2, qty=1,
rejected_qty=1, rate=500, update_stock=1,
rejected_warehouse = "_Test Rejected Warehouse - _TC")
self.assertEquals(frappe.db.get_value("Serial No", pi.get("items")[0].serial_no, "warehouse"),
pi.get("items")[0].warehouse)
self.assertEquals(frappe.db.get_value("Serial No", pi.get("items")[0].rejected_serial_no,
"warehouse"), pi.get("items")[0].rejected_warehouse)
def test_outstanding_amount_after_advance_jv_cancelation(self):
from erpnext.accounts.doctype.journal_entry.test_journal_entry \
import test_records as jv_test_records
jv = frappe.copy_doc(jv_test_records[1])
jv.accounts[0].is_advance = 'Yes'
jv.insert()
jv.submit()
pi = frappe.copy_doc(test_records[0])
pi.append("advances", {
"reference_type": "Journal Entry",
"reference_name": jv.name,
"reference_row": jv.get("accounts")[0].name,
"advance_amount": 400,
"allocated_amount": 300,
"remarks": jv.remark
})
pi.insert()
pi.submit()
pi.load_from_db()
#check outstanding after advance allocation
self.assertEqual(flt(pi.outstanding_amount), flt(pi.rounded_total - pi.total_advance))
#added to avoid Document has been modified exception
jv = frappe.get_doc("Journal Entry", jv.name)
jv.cancel()
pi.load_from_db()
#check outstanding after advance cancellation
self.assertEqual(flt(pi.outstanding_amount), flt(pi.rounded_total + pi.total_advance))
def test_outstanding_amount_after_advance_payment_entry_cancelation(self):
pe = frappe.get_doc({
"doctype": "Payment Entry",
"payment_type": "Pay",
"party_type": "Supplier",
"party": "_Test Supplier",
"company": "_Test Company",
"paid_from_account_currency": "INR",
"paid_to_account_currency": "INR",
"source_exchange_rate": 1,
"target_exchange_rate": 1,
"reference_no": "1",
"reference_date": nowdate(),
"received_amount": 300,
"paid_amount": 300,
"paid_from": "_Test Cash - _TC",
"paid_to": "_Test Payable - _TC"
})
pe.insert()
pe.submit()
pi = frappe.copy_doc(test_records[0])
pi.is_pos = 0
pi.append("advances", {
"doctype": "Purchase Invoice Advance",
"reference_type": "Payment Entry",
"reference_name": pe.name,
"advance_amount": 300,
"allocated_amount": 300,
"remarks": pe.remarks
})
pi.insert()
pi.submit()
pi.load_from_db()
#check outstanding after advance allocation
self.assertEqual(flt(pi.outstanding_amount), flt(pi.rounded_total - pi.total_advance))
#added to avoid Document has been modified exception
pe = frappe.get_doc("Payment Entry", pe.name)
pe.cancel()
pi.load_from_db()
#check outstanding after advance cancellation
self.assertEqual(flt(pi.outstanding_amount), flt(pi.rounded_total + pi.total_advance))
def test_purchase_invoice_with_shipping_rule(self):
from erpnext.accounts.doctype.shipping_rule.test_shipping_rule \
import create_shipping_rule
shipping_rule = create_shipping_rule(shipping_rule_type = "Buying", shipping_rule_name = "Shipping Rule - Purchase Invoice Test")
pi = frappe.copy_doc(test_records[0])
pi.shipping_rule = shipping_rule.name
pi.insert()
shipping_amount = 0.0
for condition in shipping_rule.get("conditions"):
if not condition.to_value or (flt(condition.from_value) <= pi.net_total <= flt(condition.to_value)):
shipping_amount = condition.shipping_amount
shipping_charge = {
"doctype": "Purchase Taxes and Charges",
"category": "Valuation and Total",
"charge_type": "Actual",
"account_head": shipping_rule.account,
"cost_center": shipping_rule.cost_center,
"tax_amount": shipping_amount,
"description": shipping_rule.name,
"add_deduct_tax": "Add"
}
pi.append("taxes", shipping_charge)
pi.save()
self.assertEquals(pi.net_total, 1250)
self.assertEquals(pi.total_taxes_and_charges, 462.3)
self.assertEquals(pi.grand_total, 1712.3)
def test_make_pi_without_terms(self):
pi = make_purchase_invoice(do_not_save=1)
self.assertFalse(pi.get('payment_schedule'))
pi.insert()
self.assertTrue(pi.get('payment_schedule'))
def test_duplicate_due_date_in_terms(self):
pi = make_purchase_invoice(do_not_save=1)
pi.append('payment_schedule', dict(due_date='2017-01-01', invoice_portion=50.00, payment_amount=50))
pi.append('payment_schedule', dict(due_date='2017-01-01', invoice_portion=50.00, payment_amount=50))
self.assertRaises(frappe.ValidationError, pi.insert)
def unlink_payment_on_cancel_of_invoice(enable=1):
accounts_settings = frappe.get_doc("Accounts Settings")
accounts_settings.unlink_payment_on_cancellation_of_invoice = enable
accounts_settings.save()
def make_purchase_invoice(**args):
pi = frappe.new_doc("Purchase Invoice")
args = frappe._dict(args)
pi.posting_date = args.posting_date or today()
if args.posting_time:
pi.posting_time = args.posting_time
if args.update_stock:
pi.update_stock = 1
if args.is_paid:
pi.is_paid = 1
if args.cash_bank_account:
pi.cash_bank_account=args.cash_bank_account
pi.company = args.company or "_Test Company"
pi.supplier = args.supplier or "_Test Supplier"
pi.currency = args.currency or "INR"
pi.conversion_rate = args.conversion_rate or 1
pi.is_return = args.is_return
pi.return_against = args.return_against
pi.is_subcontracted = args.is_subcontracted or "No"
pi.supplier_warehouse = "_Test Warehouse 1 - _TC"
pi.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": args.qty or 5,
"received_qty": args.received_qty or 0,
"rejected_qty": args.rejected_qty or 0,
"rate": args.rate or 50,
"conversion_factor": 1.0,
"serial_no": args.serial_no,
"stock_uom": "_Test UOM",
"cost_center": "_Test Cost Center - _TC",
"project": args.project,
"rejected_warehouse": args.rejected_warehouse or "",
"rejected_serial_no": args.rejected_serial_no or ""
})
if not args.do_not_save:
pi.insert()
if not args.do_not_submit:
pi.submit()
return pi
test_records = frappe.get_test_records('Purchase Invoice') | gpl-3.0 | 4,113,275,564,396,692,500 | 33.615922 | 131 | 0.694117 | false |
karamelchef/kagent-chef | files/default/kagent_utils/kagent_utils/http.py | 1 | 2383 | # -*- coding: utf-8 -*-
# This file is part of Hopsworks
# Copyright (C) 2019, Logical Clocks AB. All rights reserved
# Hopsworks is free software: you can redistribute it and/or modify it under the terms of
# the GNU Affero General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
# Hopsworks is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License along with this program.
# If not, see <https://www.gnu.org/licenses/>.
import logging
import requests
import json
from threading import RLock
from requests import exceptions as requests_exceptions
class Http:
JSON_HEADER = {'User-Agent': 'Agent', 'content-type': 'application/json'}
FORM_HEADER = {'User-Agent': 'Agent', 'content-type': 'application/x-www-form-urlencoded'}
HTTPS_VERIFY = False
def __init__(self, k_config):
self.k_config = k_config
self.logged_in = False
self.session = None
self.LOG = logging.getLogger(__name__)
self.lock = RLock()
def _login(self):
try :
self.lock.acquire()
if not self.logged_in or self.session is None:
try :
self.session = requests.Session()
response = self.session.post(self.k_config.login_url, data={'email': self.k_config.server_username,
'password': self.k_config.server_password},
headers=Http.FORM_HEADER, verify=Http.HTTPS_VERIFY)
response.raise_for_status()
self.logged_in = True
self.LOG.debug("Logged in to Hopsworks!")
except requests_exceptions.RequestException as ex:
self.session = None
self.LOG.error("Could not login to Hopsworks! Error code: %i Reason: %s",
response.status_code, response.reason)
raise ex
finally:
self.lock.release()
| gpl-3.0 | 4,277,755,935,822,757,000 | 43.12963 | 123 | 0.598405 | false |
Intel-Corporation/tensorflow | tensorflow/python/platform/app.py | 32 | 1436 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic entry point script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from absl.app import run as _run
from tensorflow.python.platform import flags
from tensorflow.python.util.tf_export import tf_export
def _parse_flags_tolerate_undef(argv):
"""Parse args, returning any unknown flags (ABSL defaults to crashing)."""
return flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)
@tf_export(v1=['app.run'])
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""
main = main or _sys.modules['__main__'].main
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
| apache-2.0 | 2,620,317,866,694,249,500 | 34.9 | 80 | 0.701253 | false |
oshepherd/eforge | eforge/bugs/models.py | 1 | 9133 | # -*- coding: utf-8 -*-
# EForge project management system, Copyright © 2010, Element43
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from django.db import models
from eforge.models import Project, Milestone
from eforge.utils.picklefield import PickledObjectField
from eforge.utils.text import textscan
from eforge.update.models import Update, register_update_type
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.conf import settings
import os.path
class Component(models.Model):
project = models.ForeignKey(Project)
name = models.CharField(max_length=32)
def __unicode__(self):
return self.name
IssuePriority = (
(1, 'Security Vulnerability'),
(2, 'Critical'),
(3, 'High'),
(4, 'Medium'),
(5, 'Low'),
)
IssueType = (
(1, 'Bug'),
(2, 'Feature Request'),
(3, 'Patch'),
)
IssueStatus = (
(1, 'New'),
(2, 'Assigned'),
(3, 'Reopened'),
(4, 'In Progress'),
(5, 'Resolved'),
)
IssueResolution = (
(1, 'Fixed'),
(2, 'Invalid'),
(3, 'Won\'t Fix'),
(4, 'Duplicate'),
(5, 'Works for me'),
(6, 'Incomplete'),
)
class Bug(models.Model):
project = models.ForeignKey(Project)
component = models.ForeignKey(Component)
priority = models.SmallIntegerField(choices=IssuePriority, default=4)
issue_type = models.SmallIntegerField(choices=IssueType, default=1)
title = models.CharField(max_length=50)
target = models.ForeignKey(Milestone)
status = models.SmallIntegerField(choices=IssueStatus, default=1)
resolution = models.SmallIntegerField(choices=IssueResolution, default=0, blank=True)
submitter = models.ForeignKey(User, related_name='submitted_bugs')
owner = models.ForeignKey(User, related_name='owned_bugs', blank=True, null=True)
depends = models.ManyToManyField('self', symmetrical=False, related_name='blocks', blank=True)
watchers = models.ManyToManyField(User, related_name='watching_bugs', blank=True)
@property
def issue_type_name(self):
return IssueType[self.issue_type-1][1]
@property
def priority_name(self):
return IssuePriority[self.priority-1][1]
@property
def status_name(self):
return IssueStatus[self.status-1][1]
@property
def resolution_name(self):
return IssueResolution[self.resolution-1][1]
@property
def is_resolved(self):
return self.status == 5
@property
def slug(self):
return '#%s-%d' % (self.project.slug.upper(), self.id)
@property
def submitted(self):
return self.comment_set.all()[0].date
@property
def url(self):
return reverse('bug-show', args=[self.project.slug, self.id])
def __unicode__(self):
return self.slug
def fromSlug(slug):
parts = slug[1:].split('-')
if len(parts) <> 2:
raise ValueError('Bad slug')
else:
return Bug.objects.get(project__slug__iexact=parts[0], id=int(parts[1]))
class Comment(models.Model):
bug = models.ForeignKey(Bug)
submitter = models.ForeignKey(User)
date = models.DateTimeField(auto_now_add=True)
text = models.TextField()
@property
def formatted(self):
return textscan(self.bug.project, self.text)
class Update:
@classmethod
def user(self, comment):
return comment.submitter
@classmethod
def project(self, comment):
return comment.bug.project
@classmethod
def summary(self, comment):
return 'Comment on bug %s' % comment.bug.slug
@classmethod
def description(self, comment):
return 'User %s said:\n\n%s' % (comment.submitter, comment.text)
@classmethod
def url(self, comment):
return '%s#c%d' % (comment.bug.url, comment.pk)
@classmethod
def date(self, comment):
return comment.date
@classmethod
def recipients(self, comment):
return comment.bug.watchers.all()
register_update_type(Comment)
def up_file(this, name):
return 'bugfiles/%d-%d/%s' % (this.bug_id, this.comment_id, name)
class Attachment(models.Model):
bug = models.ForeignKey(Bug)
comment = models.ForeignKey(Comment)
file = models.FileField(upload_to = up_file, verbose_name='Attachment')
@property
def file_name(self):
return os.path.basename(str(self.file))
@property
def url(self):
return reverse('bug-attachment', args=[self.bug.project.slug, self.id])
# Renderers for action fields:
def component_renderer(old, to):
cold = Component.objects.get(pk=old)
cto = Component.objects.get(pk=to)
return 'Changed Component from %s to %s' % (cold, cto)
def target_renderer(old, to):
mold = Milestone.objects.get(pk=old)
mto = Milestone.objects.get(pk=to)
return 'Changed Milestone from %s to %s' % (mold, mto)
def priority_renderer(old, to):
pold = IssuePriority[old-1][1]
pto = IssuePriority[to-1][1]
return 'Changed Priority from %s to %s' % (pold, pto)
def issue_type_renderer(old, to):
told = IssueType[old-1][1]
tto = IssueType[to-1][1]
return 'Changed Issue Type from %s to %s' % (told, tto)
def title_renderer(old, to):
return 'Changed Title from "%s" to "%s"' % (old, to)
def status_renderer(old, to):
sold = IssueStatus[old-1][1]
sto = IssueStatus[to-1][1]
return 'Changed Issue Type from %s to %s' % (sold, sto)
def resolution_renderer(old, to):
rto = IssueResolution[to-1][1]
return 'Set resolution to %s' % rto
def owner_renderer(old, to):
if old and to:
oold = User.objects.get(pk=old)
oto = User.objects.get(pk=to)
return 'Changed Owner from %s to %s' % (oold, oto)
elif old:
oold = User.objects.get(pk=old)
return 'Removed assignment to %s' % oold
else:
oto = User.objects.get(pk=to)
return 'Assigned to %s' % oto
def autojoin(l, format):
if len(l) == 0: return ''
l = list(l)
s = format(l[0])
rem = l[1:]
for o in rem:
s += u', ' + format(o)
return s
def get_dep(id):
return Bug.objects.get(pk=id).slug
def depends_renderer(old, to):
sold = set(old)
sto = set(to)
removed = sold - sto
added = sto - sold
tremoved = autojoin(removed, get_dep)
tadded = autojoin(added, get_dep)
if len(removed) == 0:
return 'Added dependencies on %s' % tadded
elif len(added) == 0:
return 'Removed dependencies on %s' % tremoved
else:
return 'Removed dependencies on %s; added %s' % (tadded, tremoved)
action_renderer = {
'component': component_renderer,
'priority': priority_renderer,
'issue_type': issue_type_renderer,
'title': title_renderer,
'status': status_renderer,
'resolution': resolution_renderer,
'owner': owner_renderer,
'depends': depends_renderer,
'target': target_renderer,
}
class Action(models.Model):
bug = models.ForeignKey(Bug)
comment = models.ForeignKey(Comment)
field = models.TextField(max_length=32)
value = PickledObjectField()
@classmethod
def for_change(self, bug, comment, field, oldv, newv):
changed = False
valstr = str(newv)
if isinstance(oldv, models.Model): oldv = oldv.pk
if isinstance(newv, models.Model): newv = newv.pk
if field == 'depends' or field =='blocks':
oldv = [o.pk for o in oldv.all()]
newv = [o.pk for o in newv]
changed = len(set(oldv) ^ set(newv))
changed = oldv <> newv
else:
changed = oldv <> newv
if changed:
a = Action(bug=bug, comment=comment, field=field, value=(oldv, newv))
a.save()
return a
def __unicode__(self):
try:
name = getattr(Bug, self.field).verbose_name
except AttributeError:
name = self.field.title()
curval = getattr(self.bug, self.field)
val = self.value
if name == 'Issue_type':
name = 'Issue Type'
return action_renderer[self.field](val[0], val[1])
| isc | -5,572,373,331,587,627,000 | 29.039474 | 101 | 0.615309 | false |
stamhe/bitcoin | test/functional/p2p_node_network_limited.py | 8 | 4993 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack
from test_framework.mininode import NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS, P2PInterface, wait_until, mininode_lock, network_thread_start, network_thread_join
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
network_thread_start()
node.wait_for_verack()
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
network_thread_join()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
network_thread_start()
node1.wait_for_verack()
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| mit | -572,976,092,451,093,400 | 40.957983 | 166 | 0.666533 | false |
saurabhjn76/sympy | sympy/simplify/radsimp.py | 46 | 35730 | from __future__ import print_function, division
from collections import defaultdict
from sympy import SYMPY_DEBUG
from sympy.core.evaluate import global_evaluate
from sympy.core.compatibility import iterable, ordered, as_int, default_sort_key
from sympy.core import expand_power_base, sympify, Add, S, Mul, Derivative, Pow, symbols, expand_mul
from sympy.core.numbers import Rational, Float
from sympy.core.exprtools import Factors, gcd_terms
from sympy.core.mul import _keep_coeff, _unevaluated_Mul
from sympy.core.function import _mexpand
from sympy.core.add import _unevaluated_Add
from sympy.functions import exp, sqrt, log
from sympy.polys import gcd
from sympy.simplify.sqrtdenest import sqrtdenest
import mpmath
def collect(expr, syms, func=None, evaluate=None, exact=False, distribute_order_term=True):
"""
Collect additive terms of an expression.
This function collects additive terms of an expression with respect
to a list of expression up to powers with rational exponents. By the
term symbol here are meant arbitrary expressions, which can contain
powers, products, sums etc. In other words symbol is a pattern which
will be searched for in the expression's terms.
The input expression is not expanded by :func:`collect`, so user is
expected to provide an expression is an appropriate form. This makes
:func:`collect` more predictable as there is no magic happening behind the
scenes. However, it is important to note, that powers of products are
converted to products of powers using the :func:`expand_power_base`
function.
There are two possible types of output. First, if ``evaluate`` flag is
set, this function will return an expression with collected terms or
else it will return a dictionary with expressions up to rational powers
as keys and collected coefficients as values.
Examples
========
>>> from sympy import S, collect, expand, factor, Wild
>>> from sympy.abc import a, b, c, x, y, z
This function can collect symbolic coefficients in polynomials or
rational expressions. It will manage to find all integer or rational
powers of collection variable::
>>> collect(a*x**2 + b*x**2 + a*x - b*x + c, x)
c + x**2*(a + b) + x*(a - b)
The same result can be achieved in dictionary form::
>>> d = collect(a*x**2 + b*x**2 + a*x - b*x + c, x, evaluate=False)
>>> d[x**2]
a + b
>>> d[x]
a - b
>>> d[S.One]
c
You can also work with multivariate polynomials. However, remember that
this function is greedy so it will care only about a single symbol at time,
in specification order::
>>> collect(x**2 + y*x**2 + x*y + y + a*y, [x, y])
x**2*(y + 1) + x*y + y*(a + 1)
Also more complicated expressions can be used as patterns::
>>> from sympy import sin, log
>>> collect(a*sin(2*x) + b*sin(2*x), sin(2*x))
(a + b)*sin(2*x)
>>> collect(a*x*log(x) + b*(x*log(x)), x*log(x))
x*(a + b)*log(x)
You can use wildcards in the pattern::
>>> w = Wild('w1')
>>> collect(a*x**y - b*x**y, w**y)
x**y*(a - b)
It is also possible to work with symbolic powers, although it has more
complicated behavior, because in this case power's base and symbolic part
of the exponent are treated as a single symbol::
>>> collect(a*x**c + b*x**c, x)
a*x**c + b*x**c
>>> collect(a*x**c + b*x**c, x**c)
x**c*(a + b)
However if you incorporate rationals to the exponents, then you will get
well known behavior::
>>> collect(a*x**(2*c) + b*x**(2*c), x**c)
x**(2*c)*(a + b)
Note also that all previously stated facts about :func:`collect` function
apply to the exponential function, so you can get::
>>> from sympy import exp
>>> collect(a*exp(2*x) + b*exp(2*x), exp(x))
(a + b)*exp(2*x)
If you are interested only in collecting specific powers of some symbols
then set ``exact`` flag in arguments::
>>> collect(a*x**7 + b*x**7, x, exact=True)
a*x**7 + b*x**7
>>> collect(a*x**7 + b*x**7, x**7, exact=True)
x**7*(a + b)
You can also apply this function to differential equations, where
derivatives of arbitrary order can be collected. Note that if you
collect with respect to a function or a derivative of a function, all
derivatives of that function will also be collected. Use
``exact=True`` to prevent this from happening::
>>> from sympy import Derivative as D, collect, Function
>>> f = Function('f') (x)
>>> collect(a*D(f,x) + b*D(f,x), D(f,x))
(a + b)*Derivative(f(x), x)
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), f)
(a + b)*Derivative(f(x), x, x)
>>> collect(a*D(D(f,x),x) + b*D(D(f,x),x), D(f,x), exact=True)
a*Derivative(f(x), x, x) + b*Derivative(f(x), x, x)
>>> collect(a*D(f,x) + b*D(f,x) + a*f + b*f, f)
(a + b)*f(x) + (a + b)*Derivative(f(x), x)
Or you can even match both derivative order and exponent at the same time::
>>> collect(a*D(D(f,x),x)**2 + b*D(D(f,x),x)**2, D(f,x))
(a + b)*Derivative(f(x), x, x)**2
Finally, you can apply a function to each of the collected coefficients.
For example you can factorize symbolic coefficients of polynomial::
>>> f = expand((x + a + 1)**3)
>>> collect(f, x, factor)
x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + (a + 1)**3
.. note:: Arguments are expected to be in expanded form, so you might have
to call :func:`expand` prior to calling this function.
See Also
========
collect_const, collect_sqrt, rcollect
"""
if evaluate is None:
evaluate = global_evaluate[0]
def make_expression(terms):
product = []
for term, rat, sym, deriv in terms:
if deriv is not None:
var, order = deriv
while order > 0:
term, order = Derivative(term, var), order - 1
if sym is None:
if rat is S.One:
product.append(term)
else:
product.append(Pow(term, rat))
else:
product.append(Pow(term, rat*sym))
return Mul(*product)
def parse_derivative(deriv):
# scan derivatives tower in the input expression and return
# underlying function and maximal differentiation order
expr, sym, order = deriv.expr, deriv.variables[0], 1
for s in deriv.variables[1:]:
if s == sym:
order += 1
else:
raise NotImplementedError(
'Improve MV Derivative support in collect')
while isinstance(expr, Derivative):
s0 = expr.variables[0]
for s in expr.variables:
if s != s0:
raise NotImplementedError(
'Improve MV Derivative support in collect')
if s0 == sym:
expr, order = expr.expr, order + len(expr.variables)
else:
break
return expr, (sym, Rational(order))
def parse_term(expr):
"""Parses expression expr and outputs tuple (sexpr, rat_expo,
sym_expo, deriv)
where:
- sexpr is the base expression
- rat_expo is the rational exponent that sexpr is raised to
- sym_expo is the symbolic exponent that sexpr is raised to
- deriv contains the derivatives the the expression
for example, the output of x would be (x, 1, None, None)
the output of 2**x would be (2, 1, x, None)
"""
rat_expo, sym_expo = S.One, None
sexpr, deriv = expr, None
if expr.is_Pow:
if isinstance(expr.base, Derivative):
sexpr, deriv = parse_derivative(expr.base)
else:
sexpr = expr.base
if expr.exp.is_Number:
rat_expo = expr.exp
else:
coeff, tail = expr.exp.as_coeff_Mul()
if coeff.is_Number:
rat_expo, sym_expo = coeff, tail
else:
sym_expo = expr.exp
elif expr.func is exp:
arg = expr.args[0]
if arg.is_Rational:
sexpr, rat_expo = S.Exp1, arg
elif arg.is_Mul:
coeff, tail = arg.as_coeff_Mul(rational=True)
sexpr, rat_expo = exp(tail), coeff
elif isinstance(expr, Derivative):
sexpr, deriv = parse_derivative(expr)
return sexpr, rat_expo, sym_expo, deriv
def parse_expression(terms, pattern):
"""Parse terms searching for a pattern.
terms is a list of tuples as returned by parse_terms;
pattern is an expression treated as a product of factors
"""
pattern = Mul.make_args(pattern)
if len(terms) < len(pattern):
# pattern is longer than matched product
# so no chance for positive parsing result
return None
else:
pattern = [parse_term(elem) for elem in pattern]
terms = terms[:] # need a copy
elems, common_expo, has_deriv = [], None, False
for elem, e_rat, e_sym, e_ord in pattern:
if elem.is_Number and e_rat == 1 and e_sym is None:
# a constant is a match for everything
continue
for j in range(len(terms)):
if terms[j] is None:
continue
term, t_rat, t_sym, t_ord = terms[j]
# keeping track of whether one of the terms had
# a derivative or not as this will require rebuilding
# the expression later
if t_ord is not None:
has_deriv = True
if (term.match(elem) is not None and
(t_sym == e_sym or t_sym is not None and
e_sym is not None and
t_sym.match(e_sym) is not None)):
if exact is False:
# we don't have to be exact so find common exponent
# for both expression's term and pattern's element
expo = t_rat / e_rat
if common_expo is None:
# first time
common_expo = expo
else:
# common exponent was negotiated before so
# there is no chance for a pattern match unless
# common and current exponents are equal
if common_expo != expo:
common_expo = 1
else:
# we ought to be exact so all fields of
# interest must match in every details
if e_rat != t_rat or e_ord != t_ord:
continue
# found common term so remove it from the expression
# and try to match next element in the pattern
elems.append(terms[j])
terms[j] = None
break
else:
# pattern element not found
return None
return [_f for _f in terms if _f], elems, common_expo, has_deriv
if evaluate:
if expr.is_Mul:
return expr.func(*[
collect(term, syms, func, True, exact, distribute_order_term)
for term in expr.args])
elif expr.is_Pow:
b = collect(
expr.base, syms, func, True, exact, distribute_order_term)
return Pow(b, expr.exp)
if iterable(syms):
syms = [expand_power_base(i, deep=False) for i in syms]
else:
syms = [expand_power_base(syms, deep=False)]
expr = sympify(expr)
order_term = None
if distribute_order_term:
order_term = expr.getO()
if order_term is not None:
if order_term.has(*syms):
order_term = None
else:
expr = expr.removeO()
summa = [expand_power_base(i, deep=False) for i in Add.make_args(expr)]
collected, disliked = defaultdict(list), S.Zero
for product in summa:
terms = [parse_term(i) for i in Mul.make_args(product)]
for symbol in syms:
if SYMPY_DEBUG:
print("DEBUG: parsing of expression %s with symbol %s " % (
str(terms), str(symbol))
)
result = parse_expression(terms, symbol)
if SYMPY_DEBUG:
print("DEBUG: returned %s" % str(result))
if result is not None:
terms, elems, common_expo, has_deriv = result
# when there was derivative in current pattern we
# will need to rebuild its expression from scratch
if not has_deriv:
index = 1
for elem in elems:
e = elem[1]
if elem[2] is not None:
e *= elem[2]
index *= Pow(elem[0], e)
else:
index = make_expression(elems)
terms = expand_power_base(make_expression(terms), deep=False)
index = expand_power_base(index, deep=False)
collected[index].append(terms)
break
else:
# none of the patterns matched
disliked += product
# add terms now for each key
collected = dict([(k, Add(*v)) for k, v in collected.items()])
if disliked is not S.Zero:
collected[S.One] = disliked
if order_term is not None:
for key, val in collected.items():
collected[key] = val + order_term
if func is not None:
collected = dict(
[(key, func(val)) for key, val in collected.items()])
if evaluate:
return Add(*[key*val for key, val in collected.items()])
else:
return collected
def rcollect(expr, *vars):
"""
Recursively collect sums in an expression.
Examples
========
>>> from sympy.simplify import rcollect
>>> from sympy.abc import x, y
>>> expr = (x**2*y + x*y + x + y)/(x + y)
>>> rcollect(expr, y)
(x + y*(x**2 + x + 1))/(x + y)
See Also
========
collect, collect_const, collect_sqrt
"""
if expr.is_Atom or not expr.has(*vars):
return expr
else:
expr = expr.__class__(*[rcollect(arg, *vars) for arg in expr.args])
if expr.is_Add:
return collect(expr, vars)
else:
return expr
def collect_sqrt(expr, evaluate=None):
"""Return expr with terms having common square roots collected together.
If ``evaluate`` is False a count indicating the number of sqrt-containing
terms will be returned and, if non-zero, the terms of the Add will be
returned, else the expression itself will be returned as a single term.
If ``evaluate`` is True, the expression with any collected terms will be
returned.
Note: since I = sqrt(-1), it is collected, too.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b
>>> r2, r3, r5 = [sqrt(i) for i in [2, 3, 5]]
>>> collect_sqrt(a*r2 + b*r2)
sqrt(2)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r3)
sqrt(2)*(a + b) + sqrt(3)*(a + b)
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5)
sqrt(3)*a + sqrt(5)*b + sqrt(2)*(a + b)
If evaluate is False then the arguments will be sorted and
returned as a list and a count of the number of sqrt-containing
terms will be returned:
>>> collect_sqrt(a*r2 + b*r2 + a*r3 + b*r5, evaluate=False)
((sqrt(3)*a, sqrt(5)*b, sqrt(2)*(a + b)), 3)
>>> collect_sqrt(a*sqrt(2) + b, evaluate=False)
((b, sqrt(2)*a), 1)
>>> collect_sqrt(a + b, evaluate=False)
((a + b,), 0)
See Also
========
collect, collect_const, rcollect
"""
if evaluate is None:
evaluate = global_evaluate[0]
# this step will help to standardize any complex arguments
# of sqrts
coeff, expr = expr.as_content_primitive()
vars = set()
for a in Add.make_args(expr):
for m in a.args_cnc()[0]:
if m.is_number and (
m.is_Pow and m.exp.is_Rational and m.exp.q == 2 or
m is S.ImaginaryUnit):
vars.add(m)
# we only want radicals, so exclude Number handling; in this case
# d will be evaluated
d = collect_const(expr, *vars, Numbers=False)
hit = expr != d
if not evaluate:
nrad = 0
# make the evaluated args canonical
args = list(ordered(Add.make_args(d)))
for i, m in enumerate(args):
c, nc = m.args_cnc()
for ci in c:
# XXX should this be restricted to ci.is_number as above?
if ci.is_Pow and ci.exp.is_Rational and ci.exp.q == 2 or \
ci is S.ImaginaryUnit:
nrad += 1
break
args[i] *= coeff
if not (hit or nrad):
args = [Add(*args)]
return tuple(args), nrad
return coeff*d
def collect_const(expr, *vars, **kwargs):
"""A non-greedy collection of terms with similar number coefficients in
an Add expr. If ``vars`` is given then only those constants will be
targeted. Although any Number can also be targeted, if this is not
desired set ``Numbers=False`` and no Float or Rational will be collected.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import a, s, x, y, z
>>> from sympy.simplify.radsimp import collect_const
>>> collect_const(sqrt(3) + sqrt(3)*(1 + sqrt(2)))
sqrt(3)*(sqrt(2) + 2)
>>> collect_const(sqrt(3)*s + sqrt(7)*s + sqrt(3) + sqrt(7))
(sqrt(3) + sqrt(7))*(s + 1)
>>> s = sqrt(2) + 2
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7))
(sqrt(2) + 3)*(sqrt(3) + sqrt(7))
>>> collect_const(sqrt(3)*s + sqrt(3) + sqrt(7)*s + sqrt(7), sqrt(3))
sqrt(7) + sqrt(3)*(sqrt(2) + 3) + sqrt(7)*(sqrt(2) + 2)
The collection is sign-sensitive, giving higher precedence to the
unsigned values:
>>> collect_const(x - y - z)
x - (y + z)
>>> collect_const(-y - z)
-(y + z)
>>> collect_const(2*x - 2*y - 2*z, 2)
2*(x - y - z)
>>> collect_const(2*x - 2*y - 2*z, -2)
2*x - 2*(y + z)
See Also
========
collect, collect_sqrt, rcollect
"""
if not expr.is_Add:
return expr
recurse = False
Numbers = kwargs.get('Numbers', True)
if not vars:
recurse = True
vars = set()
for a in expr.args:
for m in Mul.make_args(a):
if m.is_number:
vars.add(m)
else:
vars = sympify(vars)
if not Numbers:
vars = [v for v in vars if not v.is_Number]
vars = list(ordered(vars))
for v in vars:
terms = defaultdict(list)
Fv = Factors(v)
for m in Add.make_args(expr):
f = Factors(m)
q, r = f.div(Fv)
if r.is_one:
# only accept this as a true factor if
# it didn't change an exponent from an Integer
# to a non-Integer, e.g. 2/sqrt(2) -> sqrt(2)
# -- we aren't looking for this sort of change
fwas = f.factors.copy()
fnow = q.factors
if not any(k in fwas and fwas[k].is_Integer and not
fnow[k].is_Integer for k in fnow):
terms[v].append(q.as_expr())
continue
terms[S.One].append(m)
args = []
hit = False
uneval = False
for k in ordered(terms):
v = terms[k]
if k is S.One:
args.extend(v)
continue
if len(v) > 1:
v = Add(*v)
hit = True
if recurse and v != expr:
vars.append(v)
else:
v = v[0]
# be careful not to let uneval become True unless
# it must be because it's going to be more expensive
# to rebuild the expression as an unevaluated one
if Numbers and k.is_Number and v.is_Add:
args.append(_keep_coeff(k, v, sign=True))
uneval = True
else:
args.append(k*v)
if hit:
if uneval:
expr = _unevaluated_Add(*args)
else:
expr = Add(*args)
if not expr.is_Add:
break
return expr
def radsimp(expr, symbolic=True, max_terms=4):
"""
Rationalize the denominator by removing square roots.
Note: the expression returned from radsimp must be used with caution
since if the denominator contains symbols, it will be possible to make
substitutions that violate the assumptions of the simplification process:
that for a denominator matching a + b*sqrt(c), a != +/-b*sqrt(c). (If
there are no symbols, this assumptions is made valid by collecting terms
of sqrt(c) so the match variable ``a`` does not contain ``sqrt(c)``.) If
you do not want the simplification to occur for symbolic denominators, set
``symbolic`` to False.
If there are more than ``max_terms`` radical terms then the expression is
returned unchanged.
Examples
========
>>> from sympy import radsimp, sqrt, Symbol, denom, pprint, I
>>> from sympy import factor_terms, fraction, signsimp
>>> from sympy.simplify.radsimp import collect_sqrt
>>> from sympy.abc import a, b, c
>>> radsimp(1/(I + 1))
(1 - I)/2
>>> radsimp(1/(2 + sqrt(2)))
(-sqrt(2) + 2)/2
>>> x,y = map(Symbol, 'xy')
>>> e = ((2 + 2*sqrt(2))*x + (2 + sqrt(8))*y)/(2 + sqrt(2))
>>> radsimp(e)
sqrt(2)*(x + y)
No simplification beyond removal of the gcd is done. One might
want to polish the result a little, however, by collecting
square root terms:
>>> r2 = sqrt(2)
>>> r5 = sqrt(5)
>>> ans = radsimp(1/(y*r2 + x*r2 + a*r5 + b*r5)); pprint(ans)
___ ___ ___ ___
\/ 5 *a + \/ 5 *b - \/ 2 *x - \/ 2 *y
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
>>> n, d = fraction(ans)
>>> pprint(factor_terms(signsimp(collect_sqrt(n))/d, radical=True))
___ ___
\/ 5 *(a + b) - \/ 2 *(x + y)
------------------------------------------
2 2 2 2
5*a + 10*a*b + 5*b - 2*x - 4*x*y - 2*y
If radicals in the denominator cannot be removed or there is no denominator,
the original expression will be returned.
>>> radsimp(sqrt(2)*x + sqrt(2))
sqrt(2)*x + sqrt(2)
Results with symbols will not always be valid for all substitutions:
>>> eq = 1/(a + b*sqrt(c))
>>> eq.subs(a, b*sqrt(c))
1/(2*b*sqrt(c))
>>> radsimp(eq).subs(a, b*sqrt(c))
nan
If symbolic=False, symbolic denominators will not be transformed (but
numeric denominators will still be processed):
>>> radsimp(eq, symbolic=False)
1/(a + b*sqrt(c))
"""
from sympy.simplify.simplify import signsimp
syms = symbols("a:d A:D")
def _num(rterms):
# return the multiplier that will simplify the expression described
# by rterms [(sqrt arg, coeff), ... ]
a, b, c, d, A, B, C, D = syms
if len(rterms) == 2:
reps = dict(list(zip([A, a, B, b], [j for i in rterms for j in i])))
return (
sqrt(A)*a - sqrt(B)*b).xreplace(reps)
if len(rterms) == 3:
reps = dict(list(zip([A, a, B, b, C, c], [j for i in rterms for j in i])))
return (
(sqrt(A)*a + sqrt(B)*b - sqrt(C)*c)*(2*sqrt(A)*sqrt(B)*a*b - A*a**2 -
B*b**2 + C*c**2)).xreplace(reps)
elif len(rterms) == 4:
reps = dict(list(zip([A, a, B, b, C, c, D, d], [j for i in rterms for j in i])))
return ((sqrt(A)*a + sqrt(B)*b - sqrt(C)*c - sqrt(D)*d)*(2*sqrt(A)*sqrt(B)*a*b
- A*a**2 - B*b**2 - 2*sqrt(C)*sqrt(D)*c*d + C*c**2 +
D*d**2)*(-8*sqrt(A)*sqrt(B)*sqrt(C)*sqrt(D)*a*b*c*d + A**2*a**4 -
2*A*B*a**2*b**2 - 2*A*C*a**2*c**2 - 2*A*D*a**2*d**2 + B**2*b**4 -
2*B*C*b**2*c**2 - 2*B*D*b**2*d**2 + C**2*c**4 - 2*C*D*c**2*d**2 +
D**2*d**4)).xreplace(reps)
elif len(rterms) == 1:
return sqrt(rterms[0][0])
else:
raise NotImplementedError
def ispow2(d, log2=False):
if not d.is_Pow:
return False
e = d.exp
if e.is_Rational and e.q == 2 or symbolic and fraction(e)[1] == 2:
return True
if log2:
q = 1
if e.is_Rational:
q = e.q
elif symbolic:
d = fraction(e)[1]
if d.is_Integer:
q = d
if q != 1 and log(q, 2).is_Integer:
return True
return False
def handle(expr):
# Handle first reduces to the case
# expr = 1/d, where d is an add, or d is base**p/2.
# We do this by recursively calling handle on each piece.
from sympy.simplify.simplify import nsimplify
n, d = fraction(expr)
if expr.is_Atom or (d.is_Atom and n.is_Atom):
return expr
elif not n.is_Atom:
n = n.func(*[handle(a) for a in n.args])
return _unevaluated_Mul(n, handle(1/d))
elif n is not S.One:
return _unevaluated_Mul(n, handle(1/d))
elif d.is_Mul:
return _unevaluated_Mul(*[handle(1/d) for d in d.args])
# By this step, expr is 1/d, and d is not a mul.
if not symbolic and d.free_symbols:
return expr
if ispow2(d):
d2 = sqrtdenest(sqrt(d.base))**fraction(d.exp)[0]
if d2 != d:
return handle(1/d2)
elif d.is_Pow and (d.exp.is_integer or d.base.is_positive):
# (1/d**i) = (1/d)**i
return handle(1/d.base)**d.exp
if not (d.is_Add or ispow2(d)):
return 1/d.func(*[handle(a) for a in d.args])
# handle 1/d treating d as an Add (though it may not be)
keep = True # keep changes that are made
# flatten it and collect radicals after checking for special
# conditions
d = _mexpand(d)
# did it change?
if d.is_Atom:
return 1/d
# is it a number that might be handled easily?
if d.is_number:
_d = nsimplify(d)
if _d.is_Number and _d.equals(d):
return 1/_d
while True:
# collect similar terms
collected = defaultdict(list)
for m in Add.make_args(d): # d might have become non-Add
p2 = []
other = []
for i in Mul.make_args(m):
if ispow2(i, log2=True):
p2.append(i.base if i.exp is S.Half else i.base**(2*i.exp))
elif i is S.ImaginaryUnit:
p2.append(S.NegativeOne)
else:
other.append(i)
collected[tuple(ordered(p2))].append(Mul(*other))
rterms = list(ordered(list(collected.items())))
rterms = [(Mul(*i), Add(*j)) for i, j in rterms]
nrad = len(rterms) - (1 if rterms[0][0] is S.One else 0)
if nrad < 1:
break
elif nrad > max_terms:
# there may have been invalid operations leading to this point
# so don't keep changes, e.g. this expression is troublesome
# in collecting terms so as not to raise the issue of 2834:
# r = sqrt(sqrt(5) + 5)
# eq = 1/(sqrt(5)*r + 2*sqrt(5)*sqrt(-sqrt(5) + 5) + 5*r)
keep = False
break
if len(rterms) > 4:
# in general, only 4 terms can be removed with repeated squaring
# but other considerations can guide selection of radical terms
# so that radicals are removed
if all([x.is_Integer and (y**2).is_Rational for x, y in rterms]):
nd, d = rad_rationalize(S.One, Add._from_args(
[sqrt(x)*y for x, y in rterms]))
n *= nd
else:
# is there anything else that might be attempted?
keep = False
break
from sympy.simplify.powsimp import powsimp, powdenest
num = powsimp(_num(rterms))
n *= num
d *= num
d = powdenest(_mexpand(d), force=symbolic)
if d.is_Atom:
break
if not keep:
return expr
return _unevaluated_Mul(n, 1/d)
coeff, expr = expr.as_coeff_Add()
expr = expr.normal()
old = fraction(expr)
n, d = fraction(handle(expr))
if old != (n, d):
if not d.is_Atom:
was = (n, d)
n = signsimp(n, evaluate=False)
d = signsimp(d, evaluate=False)
u = Factors(_unevaluated_Mul(n, 1/d))
u = _unevaluated_Mul(*[k**v for k, v in u.factors.items()])
n, d = fraction(u)
if old == (n, d):
n, d = was
n = expand_mul(n)
if d.is_Number or d.is_Add:
n2, d2 = fraction(gcd_terms(_unevaluated_Mul(n, 1/d)))
if d2.is_Number or (d2.count_ops() <= d.count_ops()):
n, d = [signsimp(i) for i in (n2, d2)]
if n.is_Mul and n.args[0].is_Number:
n = n.func(*n.args)
return coeff + _unevaluated_Mul(n, 1/d)
def rad_rationalize(num, den):
"""
Rationalize num/den by removing square roots in the denominator;
num and den are sum of terms whose squares are rationals
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import rad_rationalize
>>> rad_rationalize(sqrt(3), 1 + sqrt(2)/3)
(-sqrt(3) + sqrt(6)/3, -7/9)
"""
if not den.is_Add:
return num, den
g, a, b = split_surds(den)
a = a*sqrt(g)
num = _mexpand((a - b)*num)
den = _mexpand(a**2 - b**2)
return rad_rationalize(num, den)
def fraction(expr, exact=False):
"""Returns a pair with expression's numerator and denominator.
If the given expression is not a fraction then this function
will return the tuple (expr, 1).
This function will not make any attempt to simplify nested
fractions or to do any term rewriting at all.
If only one of the numerator/denominator pair is needed then
use numer(expr) or denom(expr) functions respectively.
>>> from sympy import fraction, Rational, Symbol
>>> from sympy.abc import x, y
>>> fraction(x/y)
(x, y)
>>> fraction(x)
(x, 1)
>>> fraction(1/y**2)
(1, y**2)
>>> fraction(x*y/2)
(x*y, 2)
>>> fraction(Rational(1, 2))
(1, 2)
This function will also work fine with assumptions:
>>> k = Symbol('k', negative=True)
>>> fraction(x * y**k)
(x, y**(-k))
If we know nothing about sign of some exponent and 'exact'
flag is unset, then structure this exponent's structure will
be analyzed and pretty fraction will be returned:
>>> from sympy import exp
>>> fraction(2*x**(-y))
(2, x**y)
>>> fraction(exp(-x))
(1, exp(x))
>>> fraction(exp(-x), exact=True)
(exp(-x), 1)
"""
expr = sympify(expr)
numer, denom = [], []
for term in Mul.make_args(expr):
if term.is_commutative and (term.is_Pow or term.func is exp):
b, ex = term.as_base_exp()
if ex.is_negative:
if ex is S.NegativeOne:
denom.append(b)
else:
denom.append(Pow(b, -ex))
elif ex.is_positive:
numer.append(term)
elif not exact and ex.is_Mul:
n, d = term.as_numer_denom()
numer.append(n)
denom.append(d)
else:
numer.append(term)
elif term.is_Rational:
n, d = term.as_numer_denom()
numer.append(n)
denom.append(d)
else:
numer.append(term)
return Mul(*numer), Mul(*denom)
def numer(expr):
return fraction(expr)[0]
def denom(expr):
return fraction(expr)[1]
def fraction_expand(expr, **hints):
return expr.expand(frac=True, **hints)
def numer_expand(expr, **hints):
a, b = fraction(expr)
return a.expand(numer=True, **hints) / b
def denom_expand(expr, **hints):
a, b = fraction(expr)
return a / b.expand(denom=True, **hints)
expand_numer = numer_expand
expand_denom = denom_expand
expand_fraction = fraction_expand
def split_surds(expr):
"""
split an expression with terms whose squares are rationals
into a sum of terms whose surds squared have gcd equal to g
and a sum of terms with surds squared prime with g
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.radsimp import split_surds
>>> split_surds(3*sqrt(3) + sqrt(5)/7 + sqrt(6) + sqrt(10) + sqrt(15))
(3, sqrt(2) + sqrt(5) + 3, sqrt(5)/7 + sqrt(10))
"""
args = sorted(expr.args, key=default_sort_key)
coeff_muls = [x.as_coeff_Mul() for x in args]
surds = [x[1]**2 for x in coeff_muls if x[1].is_Pow]
surds.sort(key=default_sort_key)
g, b1, b2 = _split_gcd(*surds)
g2 = g
if not b2 and len(b1) >= 2:
b1n = [x/g for x in b1]
b1n = [x for x in b1n if x != 1]
# only a common factor has been factored; split again
g1, b1n, b2 = _split_gcd(*b1n)
g2 = g*g1
a1v, a2v = [], []
for c, s in coeff_muls:
if s.is_Pow and s.exp == S.Half:
s1 = s.base
if s1 in b1:
a1v.append(c*sqrt(s1/g2))
else:
a2v.append(c*s)
else:
a2v.append(c*s)
a = Add(*a1v)
b = Add(*a2v)
return g2, a, b
def _split_gcd(*a):
"""
split the list of integers ``a`` into a list of integers, ``a1`` having
``g = gcd(a1)``, and a list ``a2`` whose elements are not divisible by
``g``. Returns ``g, a1, a2``
Examples
========
>>> from sympy.simplify.radsimp import _split_gcd
>>> _split_gcd(55, 35, 22, 14, 77, 10)
(5, [55, 35, 10], [22, 14, 77])
"""
g = a[0]
b1 = [g]
b2 = []
for x in a[1:]:
g1 = gcd(g, x)
if g1 == 1:
b2.append(x)
else:
g = g1
b1.append(x)
return g, b1, b2
| bsd-3-clause | 8,404,757,451,436,556,000 | 32.330224 | 100 | 0.521215 | false |
marivipelaez/bicingbot | tests/test_groups.py | 1 | 16480 | # -*- coding: utf-8 -*-
u"""
Copyright 2016 Marivi Pelaez Alonso.
This file is part of BicingBot.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import mock
from bicingbot.groups import GROUPS_CACHE, MAX_NUMBER_GROUPS, MAX_NUMBER_STATIONS
from bicingbot.groups import GROUP_STATUS_NEWGROUP_NAME, GROUP_STATUS_NEWGROUP_STATIONS
from bicingbot.groups import newgroup_command, del_group_status, is_valid_group_name, groups_command
from bicingbot.groups import remove_group_command, remove_group, remove_group_cancel
from bicingbot.internationalization import STRINGS
from tests.utils import CallbackQuery
chat_id = '333'
@mock.patch('bicingbot.commands.get_bot')
@mock.patch('bicingbot.commands.Bicing')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command(get_bot, DatabaseConnection, Bicing, commands_get_bot):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = []
Bicing.return_value = mock.MagicMock()
commands_get_bot.return_value = mock.MagicMock()
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
# Check bot calls and temporal cache
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_name'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_NAME
newgroup_command(chat_id, 'casa')
# Check bot calls and temporal cache
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_stations'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_STATIONS
assert GROUPS_CACHE[chat_id]['name'] == 'casa'
newgroup_command(chat_id, '1')
newgroup_command(chat_id, '2')
# Check bot calls and temporal cache
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_stations'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_STATIONS
assert GROUPS_CACHE[chat_id]['name'] == 'casa'
assert GROUPS_CACHE[chat_id]['stations'] == [1, 2]
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_created'].format('casa'))
DatabaseConnection().delete_group.assert_called_with(chat_id=chat_id, name='casa')
DatabaseConnection().create_group.assert_called_with(chat_id=chat_id, name='casa', stations=[1, 2])
commands_get_bot().send_message.assert_called_once()
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_cancel(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_not_created'])
DatabaseConnection().create_group.assert_not_called()
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_number_groups_limit(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['casa{}'.format(i) for i in range(MAX_NUMBER_GROUPS)]
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
expected_text = STRINGS['es']['newgroup_number_groups_limit'].format(MAX_NUMBER_GROUPS)
get_bot().send_message.assert_called_with(chat_id=chat_id, text=expected_text)
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_bad_formatted_name(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = []
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
# Check name is number
newgroup_command(chat_id, '1')
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_name_format_error'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_NAME
# Check name starts with /
newgroup_command(chat_id, '/casa')
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_name_format_error'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_NAME
# Check name is a command
newgroup_command(chat_id, 'settings')
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_name_format_error'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_NAME
@mock.patch('bicingbot.commands.get_bot')
@mock.patch('bicingbot.commands.Bicing')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_existing_name_overwrite(get_bot, DatabaseConnection, Bicing, commands_get_bot):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['casa']
DatabaseConnection().get_group.return_value = {'chat_id': 333, 'name': 'casa', 'stations': [1, 2, 3]}
Bicing.return_value = mock.MagicMock()
commands_get_bot.return_value = mock.MagicMock()
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
# Check warning message is sent
newgroup_command(chat_id, 'casa')
message = STRINGS['es']['newgroup_name_already_existing'].format(STRINGS['es']['newgroup_stations'].lower())
get_bot().send_message.assert_called_with(chat_id=chat_id, text=message)
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_STATIONS
newgroup_command(chat_id, '1')
newgroup_command(chat_id, '2')
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_created'].format('casa'))
DatabaseConnection().delete_group.assert_called_with(chat_id=chat_id, name='casa')
DatabaseConnection().create_group.assert_called_with(chat_id=chat_id, name='casa', stations=[1, 2])
commands_get_bot().send_message.assert_called_once()
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_existing_name_cancel(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['casa']
DatabaseConnection().get_group.return_value = {'chat_id': 333, 'name': 'casa', 'stations': [1, 2, 3]}
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
# Check warning message is sent
newgroup_command(chat_id, 'casa')
message = STRINGS['es']['newgroup_name_already_existing'].format(STRINGS['es']['newgroup_stations'].lower())
get_bot().send_message.assert_called_with(chat_id=chat_id, text=message)
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_STATIONS
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id,
text=STRINGS['es']['newgroup_not_overwrite'].format('casa'))
DatabaseConnection().create_group.assert_not_called()
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_no_stations(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = []
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
newgroup_command(chat_id, 'casa')
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_not_created'])
DatabaseConnection().create_group.assert_not_called()
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.commands.get_bot')
@mock.patch('bicingbot.commands.Bicing')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_wrong_station(get_bot, DatabaseConnection, Bicing, commands_get_bot):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = []
Bicing.return_value = mock.MagicMock()
commands_get_bot.return_value = mock.MagicMock()
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
newgroup_command(chat_id, 'casa')
newgroup_command(chat_id, '1')
newgroup_command(chat_id, 'not a number')
# Check bot calls and temporal cache
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_unknown_command'])
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_STATIONS
assert GROUPS_CACHE[chat_id]['name'] == 'casa'
assert GROUPS_CACHE[chat_id]['stations'] == [1]
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_created'].format('casa'))
DatabaseConnection().delete_group.assert_called_with(chat_id=chat_id, name='casa')
DatabaseConnection().create_group.assert_called_with(chat_id=chat_id, name='casa', stations=[1])
commands_get_bot().send_message.assert_called_once()
assert chat_id not in GROUPS_CACHE
@mock.patch('bicingbot.commands.get_bot')
@mock.patch('bicingbot.commands.Bicing')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_newgroup_command_number_stations_limit(get_bot, DatabaseConnection, Bicing, commands_get_bot):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = []
Bicing.return_value = mock.MagicMock()
commands_get_bot.return_value = mock.MagicMock()
del_group_status(chat_id)
newgroup_command(chat_id, 'newgroup')
newgroup_command(chat_id, 'casa')
for i in range(MAX_NUMBER_STATIONS):
newgroup_command(chat_id, str(i))
newgroup_command(chat_id, '1000')
# Check bot calls and temporal cache
expected_text = STRINGS['es']['newgroup_number_stations_limit'].format(MAX_NUMBER_STATIONS)
get_bot().send_message.assert_called_with(chat_id=chat_id, text=expected_text)
assert GROUPS_CACHE[chat_id]['status'] == GROUP_STATUS_NEWGROUP_STATIONS
assert GROUPS_CACHE[chat_id]['name'] == 'casa'
assert GROUPS_CACHE[chat_id]['stations'] == [i for i in range(MAX_NUMBER_STATIONS)]
newgroup_command(chat_id, 'end')
# Check bot and database calls
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['newgroup_created'].format('casa'))
DatabaseConnection().delete_group.assert_called_with(chat_id=chat_id, name='casa')
DatabaseConnection().create_group.assert_called_with(chat_id=chat_id, name='casa',
stations=[i for i in range(MAX_NUMBER_STATIONS)])
commands_get_bot().send_message.assert_called_once()
assert chat_id not in GROUPS_CACHE
def test_is_valid_group_name():
assert is_valid_group_name('casa')
assert is_valid_group_name('casacasacasacasacasa')
assert is_valid_group_name('casa_casa')
assert is_valid_group_name('casa-casa')
assert is_valid_group_name('casa10')
assert not is_valid_group_name('1')
assert not is_valid_group_name('/14')
assert not is_valid_group_name('casa/casa')
assert not is_valid_group_name('casa casa')
assert not is_valid_group_name('casacasacasacasacasac')
assert not is_valid_group_name('settings')
assert not is_valid_group_name('fin')
assert not is_valid_group_name('casa\casa')
assert not is_valid_group_name('casa*')
assert not is_valid_group_name('casa.casa')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_groups_command(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['group1', 'group2']
groups_command(chat_id, '/grupos')
get_bot().send_message.assert_called_with(chat_id=chat_id, text='/group1, /group2')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_groups_command_empty(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = []
groups_command(chat_id, '/grupos')
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['groups_empty'])
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_remove_group_command(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['group1', 'group2']
remove_group_command(chat_id, 'removegroup')
# Check bot calls
get_bot().send_message.assert_called_once()
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_remove_group(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['group1', 'group2']
DatabaseConnection().get_group.return_value = {'stations': [1, 2]}
callback_query = CallbackQuery(1, message_id=111)
remove_group(chat_id, 'group2', callback_query)
# Check bot calls
get_bot().answer_callback_query.assert_called_once()
get_bot().edit_message_text.assert_called_with(chat_id=chat_id,
text=STRINGS['es']['removegroup_removed'].format('group2', '1, 2'),
message_id=111)
DatabaseConnection().delete_group.assert_called_with(chat_id=chat_id, name='group2')
@mock.patch('bicingbot.groups.DatabaseConnection')
@mock.patch('bicingbot.groups.get_bot')
def test_remove_group_not_found(get_bot, DatabaseConnection):
get_bot.return_value = mock.MagicMock()
DatabaseConnection.return_value = mock.MagicMock()
DatabaseConnection().get_groups_names.return_value = ['group1', 'group2']
callback_query = CallbackQuery(1, message_id=111)
remove_group(chat_id, 'notfound', callback_query)
# Check bot calls
get_bot().answer_callback_query.assert_not_called()
get_bot().send_message.assert_called_with(chat_id=chat_id, text=STRINGS['es']['removegroup_not_found'])
DatabaseConnection().delete_group.assert_not_called()
@mock.patch('bicingbot.groups.get_bot')
def test_remove_group_cancel(get_bot):
get_bot.return_value = mock.MagicMock()
callback_query = CallbackQuery(1, message_id=111)
remove_group_cancel(chat_id, callback_query)
# Check bot calls
get_bot().answer_callback_query.assert_called_once()
get_bot().edit_message_text.assert_called_with(chat_id=chat_id, text=STRINGS['es']['removegroup_canceled'],
message_id=111)
| apache-2.0 | 2,873,989,933,453,724,700 | 42.141361 | 118 | 0.709709 | false |
rbbratta/virt-test | qemu/tests/check_unhalt_vcpu.py | 2 | 1186 | import logging, time
from autotest.client.shared import error
from autotest.client import utils
def run_check_unhalt_vcpu(test, params, env):
"""
Check unhalt vcpu of guest.
1) Use qemu-img create any image which can not boot.
2) Start vm with the image created by step 1
3) Use ps get qemu-kvm process %cpu, if greater than 90%, report fial.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
pid = vm.get_pid()
if not pid:
raise error.TestError("Could not get QEMU's PID")
sleep_time = params.get("sleep_time", 60)
time.sleep(sleep_time)
cpu_get_usage_cmd = params["cpu_get_usage_cmd"]
cpu_get_usage_cmd = cpu_get_usage_cmd % pid
cpu_usage = utils.system_output(cpu_get_usage_cmd)
try:
cpu_usage = float(cpu_usage)
except ValueError, detail:
raise error.TestError("Could not get correct cpu usage value with cmd"
" '%s', detail: '%s'" % (cpu_get_usage_cmd, detail))
logging.info("Guest's reported CPU usage: %s", cpu_usage)
if cpu_usage >= 90:
raise error.TestFail("Guest have unhalt vcpu.")
logging.info("Guest vcpu work normally")
| gpl-2.0 | 9,104,607,708,743,783,000 | 31.944444 | 78 | 0.639966 | false |
levkar/odoo-addons | adhoc_base_setup/__openerp__.py | 2 | 1495 | # -*- coding: utf-8 -*-
{
'name': 'ADHOC Modules Configuration',
'version': '1.0',
'category': 'ADHOC Modules',
'summary': 'extra, addons, modules',
'description': """
ADHOC Modules Configuration
===============================================================================
Here, you can configure the whole business suite based on your requirements. You'll be provided different configuration options in the Settings where by only selecting some booleans you will be able to install several modules and apply access rights in just one go.
Repositories required:
---------------------
* https://github.com/ingadhoc/odoo-addons
* https://github.com/ingadhoc/odoo-web
* https://github.com/akretion/odoo-usability
* https://github.com/OCA/account-invoicing
* https://github.com/OCA/knowledge
* https://github.com/OCA/server-tools
* https://github.com/OCA/account-financial-reporting
* https://github.com/OCA/account-financial-tools
* https://github.com/OCA/reporting-engine
* https://github.com/OCA/purchase-workflow
* https://github.com/aeroo/aeroo_reports
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'base_setup'
],
'data': [
'res_config_view.xml',
],
'demo': [
'demo/company_demo.xml',
],
'test': [
],
'installable': True,
'auto_install': True,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,399,476,708,183,574,000 | 32.977273 | 265 | 0.626087 | false |
hryamzik/ansible | lib/ansible/modules/cloud/misc/helm.py | 25 | 5009 | #!/usr/bin/python
# (c) 2016, Flavio Percoco <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: helm
short_description: Manages Kubernetes packages with the Helm package manager
version_added: "2.4"
author: "Flavio Percoco (flaper87)"
description:
- Install, upgrade, delete and list packages with the Helm package manage
requirements:
- "pyhelm"
- "grpcio"
options:
host:
description:
- Tiller's server host
default: "localhost"
port:
description:
- Tiller's server port
default: 44134
namespace:
description:
- Kubernetes namespace where the chart should be installed
default: "default"
name:
description:
- Release name to manage
state:
description:
- Whether to install C(present), remove C(absent), or purge C(purged) a package.
choices: ['absent', 'purged', 'present']
default: "present"
chart:
description: |
A map describing the chart to install. For example:
chart:
name: memcached
version: 0.4.0
source:
type: repo
location: https://kubernetes-charts.storage.googleapis.com
default: {}
values:
description:
- A map of value options for the chart.
default: {}
disable_hooks:
description:
- Whether to disable hooks during the uninstall process
type: bool
default: 'no'
'''
RETURN = ''' # '''
EXAMPLES = '''
- name: Install helm chart
helm:
host: localhost
chart:
name: memcached
version: 0.4.0
source:
type: repo
location: https://kubernetes-charts.storage.googleapis.com
state: present
name: my-memcached
namespace: default
- name: Uninstall helm chart
helm:
host: localhost
state: absent
name: my-memcached
'''
try:
import grpc
from pyhelm import tiller
from pyhelm import chartbuilder
HAS_PYHELM = True
except ImportError as exc:
HAS_PYHELM = False
from ansible.module_utils.basic import AnsibleModule
def install(module, tserver):
changed = False
params = module.params
name = params['name']
values = params['values']
chart = module.params['chart']
namespace = module.params['namespace']
chartb = chartbuilder.ChartBuilder(chart)
r_matches = (x for x in tserver.list_releases()
if x.name == name and x.namespace == namespace)
installed_release = next(r_matches, None)
if installed_release:
if installed_release.chart.metadata.version != chart['version']:
tserver.update_release(chartb.get_helm_chart(), False,
namespace, name=name, values=values)
changed = True
else:
tserver.install_release(chartb.get_helm_chart(), namespace,
dry_run=False, name=name,
values=values)
changed = True
return dict(changed=changed)
def delete(module, tserver, purge=False):
changed = False
params = module.params
if not module.params['name']:
module.fail_json(msg='Missing required field name')
name = module.params['name']
disable_hooks = params['disable_hooks']
try:
tserver.uninstall_release(name, disable_hooks, purge)
changed = True
except grpc._channel._Rendezvous as exc:
if 'not found' not in str(exc):
raise exc
return dict(changed=changed)
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='localhost'),
port=dict(type='int', default=44134),
name=dict(type='str', default=''),
chart=dict(type='dict'),
state=dict(
choices=['absent', 'purged', 'present'],
default='present'
),
# Install options
values=dict(type='dict'),
namespace=dict(type='str', default='default'),
# Uninstall options
disable_hooks=dict(type='bool', default=False),
),
supports_check_mode=True)
if not HAS_PYHELM:
module.fail_json(msg="Could not import the pyhelm python module. "
"Please install `pyhelm` package.")
host = module.params['host']
port = module.params['port']
state = module.params['state']
tserver = tiller.Tiller(host, port)
if state == 'present':
rst = install(module, tserver)
if state in 'absent':
rst = delete(module, tserver)
if state in 'purged':
rst = delete(module, tserver, True)
module.exit_json(**rst)
if __name__ == '__main__':
main()
| gpl-3.0 | -710,268,326,820,829,000 | 25.363158 | 92 | 0.603913 | false |
demonchild2112/travis-test | grr/server/grr_response_server/threadpool_test.py | 2 | 13125 | #!/usr/bin/env python
"""Tests for the ThreadPool class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import threading
import time
from absl import app
from future.builtins import range
import queue
from grr_response_core.lib import utils
from grr_response_server import threadpool
from grr.test_lib import stats_test_lib
from grr.test_lib import test_lib
class ThreadPoolTest(stats_test_lib.StatsTestMixin, test_lib.GRRBaseTest):
"""Tests for the ThreadPool class."""
NUMBER_OF_THREADS = 1
MAXIMUM_THREADS = 20
NUMBER_OF_TASKS = 1500
sleep_time = 0.1
def setUp(self):
super(ThreadPoolTest, self).setUp()
self.base_thread_count = threading.active_count()
prefix = "pool-%s" % self._testMethodName
self.test_pool = threadpool.ThreadPool.Factory(
prefix, self.NUMBER_OF_THREADS, max_threads=self.MAXIMUM_THREADS)
self.test_pool.Start()
self.addCleanup(self.test_pool.Stop)
def WaitUntil(self, condition_cb, timeout=5):
"""Wait a fixed time until the condition is true."""
for _ in range(int(timeout / self.sleep_time)):
res = condition_cb()
if res:
return res
time.sleep(self.sleep_time)
raise RuntimeError("Timeout exceeded. Condition not true")
def Count(self, thread_name):
worker_threads = [
thread for thread in threading.enumerate() if thread_name in thread.name
]
return len(worker_threads)
def testThreadCreation(self):
"""Ensure the thread pool started the minimum number of threads."""
self.assertEqual(
self.Count("pool-testThreadCreation"), self.NUMBER_OF_THREADS)
def testStopping(self):
"""Tests if all worker threads terminate if the thread pool is stopped."""
self.assertEqual(self.Count("pool-testStopping"), self.NUMBER_OF_THREADS)
self.test_pool.Stop()
self.assertEqual(self.Count("pool-testStopping"), 0)
self.test_pool.Start()
self.assertEqual(self.Count("pool-testStopping"), self.NUMBER_OF_THREADS)
self.test_pool.Stop()
self.assertEqual(self.Count("pool-testStopping"), 0)
# This test leaves the test pool in stopped state. tearDown will try to
# Stop() it again but this should work and just log a warning.
def testRunTasks(self):
"""Test for running jobs on the thread pool.
This runs 1500 tasks on the ThreadPool and waits for them to
complete.
"""
# Tests if calling Join on an empty ThreadPool works.
self.test_pool.Join()
self.lock = threading.Lock()
def Insert(list_obj, element):
with self.lock:
list_obj.append(element)
test_list = []
for i in range(self.NUMBER_OF_TASKS):
self.test_pool.AddTask(Insert, (test_list, i))
self.test_pool.Join()
test_list.sort()
self.assertEqual(list(range(self.NUMBER_OF_TASKS)), test_list)
def testAdditionalWorkersAreAllocatedWhenSingleTaskTakesLong(self):
wait_event_1, wait_event_2 = threading.Event(), threading.Event()
signal_event_1, signal_event_2 = threading.Event(), threading.Event()
try:
sample = []
def RunFn(signal_event, wait_event, num):
sample.append(num)
signal_event.set()
wait_event.wait()
self.test_pool.AddTask(
RunFn, (wait_event_1, signal_event_1, 0),
blocking=False,
inline=False)
wait_event_1.wait(10)
self.assertEqual(sample, [0])
# Now task 1 is running, schedule task 2 and make sure it runs and
# completes.
self.test_pool.AddTask(
RunFn, (wait_event_2, signal_event_2, 1),
blocking=False,
inline=False)
wait_event_2.wait(10)
self.assertEqual(sample, [0, 1])
finally:
signal_event_1.set()
signal_event_2.set()
def testAddingTaskToNonStartedThreadPoolRaises(self):
pool = threadpool.ThreadPool.Factory("t", 10)
with self.assertRaises(threadpool.ThreadPoolNotStartedError):
pool.AddTask(lambda: None, ())
def testRunRaisingTask(self):
"""Tests the behavior of the pool if a task throws an exception."""
self.lock = threading.Lock()
def IRaise(some_obj):
"""This method just raises an exception."""
with self.lock:
# This simulates an error by calling a non-existent function.
some_obj.process()
self.exception_args = []
def MockException(*args):
self.exception_args = args
with self.assertStatsCounterDelta(
2, threadpool.THREADPOOL_TASK_EXCEPTIONS, fields=[self.test_pool.name]):
with utils.Stubber(logging, "exception", MockException):
self.test_pool.AddTask(IRaise, (None,), "Raising")
self.test_pool.AddTask(IRaise, (None,), "Raising")
self.test_pool.Join()
# Check that an exception is raised.
self.assertIn("exception in worker thread", self.exception_args[0])
self.assertEqual(self.exception_args[1], "Raising")
def testFailToCreateThread(self):
"""Test that we handle thread creation problems ok."""
# The pool starts off with the minimum number of threads.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS)
done_event = threading.Event()
def Block(done):
done.wait()
def RaisingStart(_):
raise threading.ThreadError()
# Now simulate failure of creating threads.
with utils.Stubber(threadpool._WorkerThread, "start", RaisingStart):
# Fill all the existing threads and wait for them to become busy.
self.test_pool.AddTask(Block, (done_event,))
self.WaitUntil(lambda: self.test_pool.busy_threads == self.
NUMBER_OF_THREADS)
# Now fill the queue completely..
for _ in range(self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event,))
# Trying to push this task will overflow the queue, and would normally
# cause a new thread to start. We use non blocking mode to receive the
# exception.
self.assertRaises(
threadpool.Full,
self.test_pool.AddTask,
Block, (done_event,),
blocking=False,
inline=False)
# Release the blocking tasks.
done_event.set()
self.test_pool.Join()
def testBlockingTasks(self):
# The pool starts off with the minimum number of threads.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS)
done_event = threading.Event()
self.lock = threading.Lock()
res = []
def Block(done):
done.wait()
def Insert(list_obj, element):
with self.lock:
list_obj.append(element)
# Schedule the maximum number of threads of blocking tasks and the same of
# insert tasks. The threads are now all blocked, and the inserts are
# waiting in the queue.
for _ in range(self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event,), "Blocking")
# Wait until the threadpool picks up tasks.
self.WaitUntil(lambda: self.test_pool.busy_threads == self.MAXIMUM_THREADS)
# Now there's maximum number of threads active and the queue is empty.
self.assertEqual(self.test_pool.pending_tasks, 0)
# Now we push these tasks on the queue, but they're not going to be
# processed, since all threads are busy.
for i in range(self.MAXIMUM_THREADS):
self.test_pool.AddTask(
Insert, (res, i), "Insert", blocking=True, inline=False)
# There should be 20 workers created and they should consume all the
# blocking tasks.
self.WaitUntil(lambda: self.test_pool.busy_threads == self.MAXIMUM_THREADS)
# No Insert tasks are running yet.
self.assertEqual(res, [])
# There are 20 tasks waiting on the queue.
self.assertEqual(self.test_pool.pending_tasks, self.MAXIMUM_THREADS)
# Inserting more tasks than the queue can hold should lead to processing
# the tasks inline. This effectively causes these tasks to skip over the
# tasks which are waiting in the queue.
for i in range(10, 20):
self.test_pool.AddTask(Insert, (res, i), "Insert", inline=True)
res.sort()
self.assertEqual(res, list(range(10, 20)))
# This should release all the busy tasks. It will also cause the workers
# to process all the Insert tasks in the queue.
done_event.set()
self.test_pool.Join()
# Now the rest of the tasks should have been processed as well.
self.assertEqual(sorted(res[10:]), list(range(20)))
def testThreadsReaped(self):
"""Check that threads are reaped when too old."""
self.now = 0
with utils.MultiStubber((time, "time", lambda: self.now),
(threading, "_time", lambda: self.now),
(queue, "_time", lambda: self.now),
(self.test_pool, "CPUUsage", lambda: 0)):
done_event = threading.Event()
res = []
def Block(done, count):
done.wait()
res.append(count)
for i in range(2 * self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event, i), "Blocking", inline=False)
self.assertLen(self.test_pool, self.MAXIMUM_THREADS)
# Release the threads. All threads are now idle.
done_event.set()
# Fast forward the time
self.now = 1000
# Threads will now kill themselves off and the threadpool will be reduced
# to the minimum number of threads..
self.WaitUntil(lambda: len(self.test_pool) == self.NUMBER_OF_THREADS)
# Ensure we have the minimum number of threads left now.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS)
def testExportedFunctions(self):
"""Tests if the outstanding tasks variable is exported correctly."""
signal_event, wait_event = threading.Event(), threading.Event()
def RunFn():
signal_event.set()
wait_event.wait()
pool_name = "test_pool3"
pool = threadpool.ThreadPool.Factory(pool_name, 10)
pool.Start()
try:
# First 10 tasks should be scheduled immediately, as we have max_threads
# set to 10.
for _ in range(10):
signal_event.clear()
pool.AddTask(RunFn, ())
signal_event.wait(10)
# Next 5 tasks should sit in the queue.
for _ in range(5):
with self.assertStatsCounterDelta(
1, threadpool.THREADPOOL_OUTSTANDING_TASKS, fields=[pool_name]):
pool.AddTask(RunFn, ())
finally:
wait_event.set()
pool.Stop()
def testDuplicateNameError(self):
"""Tests that creating two pools with the same name fails."""
prefix = self.test_pool.name
self.assertRaises(threadpool.DuplicateThreadpoolError,
threadpool.ThreadPool, prefix, 10)
def testDuplicateName(self):
"""Tests that we can get the same pool again through the factory."""
prefix = "duplicate_name"
pool = threadpool.ThreadPool.Factory(prefix, 10)
try:
self.assertEqual(pool.started, False)
pool.Start()
self.assertEqual(pool.started, True)
# This should return the same pool as before.
pool2 = threadpool.ThreadPool.Factory(prefix, 10)
self.assertEqual(pool2.started, True)
finally:
pool.Stop()
def testAnonymousThreadpool(self):
"""Tests that we can't starts anonymous threadpools."""
prefix = None
with self.assertRaises(ValueError):
threadpool.ThreadPool.Factory(prefix, 10)
class DummyConverter(threadpool.BatchConverter):
def __init__(self, **kwargs):
self.sleep_time = kwargs.pop("sleep_time")
super(DummyConverter, self).__init__(**kwargs)
self.batches = []
self.threads = []
self.results = []
def ConvertBatch(self, batch):
time.sleep(self.sleep_time)
self.batches.append(batch)
self.threads.append(threading.current_thread().ident)
self.results.extend([s + "*" for s in batch])
class BatchConverterTest(test_lib.GRRBaseTest):
"""BatchConverter tests."""
def testMultiThreadedConverter(self):
converter = DummyConverter(
threadpool_size=10,
batch_size=2,
sleep_time=0.1,
threadpool_prefix="multi_threaded")
test_data = [str(i) for i in range(10)]
converter.Convert(test_data)
self.assertLen(set(converter.threads), 5)
self.assertLen(converter.batches, 5)
for batch in converter.batches:
self.assertLen(batch, 2)
self.assertLen(converter.results, 10)
for i, r in enumerate(sorted(converter.results)):
self.assertEqual(r, str(i) + "*")
def testSingleThreadedConverter(self):
converter = DummyConverter(
threadpool_size=0,
batch_size=2,
sleep_time=0,
threadpool_prefix="single_threaded")
test_data = [str(i) for i in range(10)]
converter.Convert(test_data)
self.assertLen(set(converter.threads), 1)
self.assertLen(converter.batches, 5)
for batch in converter.batches:
self.assertLen(batch, 2)
self.assertLen(converter.results, 10)
for i, r in enumerate(sorted(converter.results)):
self.assertEqual(r, str(i) + "*")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 7,651,877,454,231,066,000 | 30.25 | 80 | 0.6608 | false |
qsantos/crpyt | digests/sha1.py | 1 | 1741 | # crpyt: toy cryptographic python library
# Copyright (C) 2014 Quentin SANTOS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# END LICENCE
from digest import Digest
# Reference: RFC 3174, FIPS PUB 180-4
class SHA1(Digest):
def __init__(self):
super(SHA1,self).__init__(64, 4, 'big')
self.H = [0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0]
def block(self, X):
global A, B, C, D, E
(A,B,C,D,E) = self.H
maskint = 0xffffffff
def ROT(x,n): return ((x << n) & maskint) | (x >> (32-n))
def F(X,Y,Z): return (X & Y) | ((X^maskint) & Z)
def G(X,Y,Z): return X ^ Y ^ Z
def H(X,Y,Z): return (X & Y) | (X & Z) | (Y & Z)
W = X + [0] * 64
for t in range(16,80):
W[t] = ROT(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1)
def OP(f,t,i):
global A, B, C, D, E
T = (ROT(A,5) + f(B,C,D) + E + W[t] + i) & maskint
(A,B,C,D,E) = (T, A, ROT(B,30), C, D)
for t in range( 0, 20): OP(F, t, 0x5a827999);
for t in range(20, 40): OP(G, t, 0x6ed9eba1);
for t in range(40, 60): OP(H, t, 0x8f1bbcdc);
for t in range(60, 80): OP(G, t, 0xca62c1d6);
self.H = [(oi + ni) & maskint for (oi,ni) in zip(self.H, [A,B,C,D,E])]
| gpl-3.0 | -936,268,773,884,999,400 | 33.137255 | 72 | 0.628949 | false |
ezequielpereira/Time-Line | timelinelib/wxgui/frames/helpbrowserframe/helpbrowserframe.py | 2 | 8163 | # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import os.path
import webbrowser
import wx.html
from timelinelib.config.paths import HELP_RESOURCES_DIR
from timelinelib.config.paths import ICONS_DIR
from timelinelib.wxgui.utils import display_error_message
class HelpBrowserFrame(wx.Frame):
HOME_ID = 10
BACKWARD_ID = 20
FORWARD_ID = 30
def __init__(self, parent):
wx.Frame.__init__(self, parent, title=_("Help"),
size=(600, 650), style=wx.DEFAULT_FRAME_STYLE)
self.history = []
self.current_pos = -1
self._create_help_system()
self._create_gui()
self._update_buttons()
def _create_help_system(self):
try:
import markdown
except ImportError:
self.help_system = None
else:
import timelinelib.help.system as help
import timelinelib.help.pages as help_pages
self.help_system = help.HelpSystem(
"contents", HELP_RESOURCES_DIR + "/", "page:")
help_pages.install(self.help_system)
def show_page(self, id, type="page", change_history=True):
"""
Where which is a tuple (type, id):
* (page, page_id)
* (search, search_string)
"""
if self.help_system is None:
display_error_message(
_("Could not find markdown Python package. It is needed by the help system."),
self.GetParent())
return
if change_history:
same_page_as_last = False
if self.current_pos != -1:
_, current_id = self.history[self.current_pos]
if id == current_id:
same_page_as_last = True
if same_page_as_last is False:
self.history = self.history[:self.current_pos + 1]
self.history.append((type, id))
self.current_pos += 1
self._update_buttons()
if type == "page":
self.html_window.SetPage(self._generate_page(id))
elif type == "search":
self.html_window.SetPage(self.help_system.get_search_results_page(id))
self.Show()
self.Raise()
def _create_gui(self):
self.Bind(wx.EVT_CLOSE, self._window_on_close)
self.toolbar = self.CreateToolBar()
size = (24, 24)
if 'wxMSW' in wx.PlatformInfo:
home_bmp = wx.Bitmap(os.path.join(ICONS_DIR, "home.png"))
back_bmp = wx.Bitmap(os.path.join(ICONS_DIR, "backward.png"))
forward_bmp = wx.Bitmap(os.path.join(ICONS_DIR, "forward.png"))
else:
home_bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR,
size)
back_bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR,
size)
forward_bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD,
wx.ART_TOOLBAR, size)
self.toolbar.SetToolBitmapSize(size)
# Home
home_str = _("Go to home page")
self.toolbar.AddLabelTool(HelpBrowserFrame.HOME_ID, home_str,
home_bmp, shortHelp=home_str)
self.Bind(wx.EVT_TOOL, self._toolbar_on_click, id=HelpBrowserFrame.HOME_ID)
# Separator
self.toolbar.AddSeparator()
# Backward
backward_str = _("Go back one page")
self.toolbar.AddLabelTool(HelpBrowserFrame.BACKWARD_ID, backward_str,
back_bmp, shortHelp=backward_str)
self.Bind(wx.EVT_TOOL, self._toolbar_on_click,
id=HelpBrowserFrame.BACKWARD_ID)
# Forward
forward_str = _("Go forward one page")
self.toolbar.AddLabelTool(HelpBrowserFrame.FORWARD_ID, forward_str,
forward_bmp, shortHelp=forward_str)
self.Bind(wx.EVT_TOOL, self._toolbar_on_click,
id=HelpBrowserFrame.FORWARD_ID)
# Separator
self.toolbar.AddSeparator()
# Search
self.search = wx.SearchCtrl(self.toolbar, size=(150, -1),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_TEXT_ENTER, self._search_on_text_enter, self.search)
self.toolbar.AddControl(self.search)
self.toolbar.Realize()
# Html window
self.html_window = wx.html.HtmlWindow(self)
self.Bind(wx.html.EVT_HTML_LINK_CLICKED,
self._html_window_on_link_clicked, self.html_window)
self.html_window.Connect(wx.ID_ANY, wx.ID_ANY, wx.EVT_KEY_DOWN.typeId,
self._window_on_key_down)
def _window_on_close(self, e):
self.Show(False)
def _window_on_key_down(self, evt):
"""
Event handler used when a keyboard key has been pressed.
The following keys are handled:
Key Action
-------- ------------------------------------
Backspace Go to previous page
"""
keycode = evt.GetKeyCode()
if keycode == wx.WXK_BACK:
self._go_back()
evt.Skip()
def _toolbar_on_click(self, e):
if e.GetId() == HelpBrowserFrame.HOME_ID:
self._go_home()
elif e.GetId() == HelpBrowserFrame.BACKWARD_ID:
self._go_back()
elif e.GetId() == HelpBrowserFrame.FORWARD_ID:
self._go_forward()
def _search_on_text_enter(self, e):
self._search(self.search.GetValue())
def _html_window_on_link_clicked(self, e):
url = e.GetLinkInfo().GetHref()
if url.startswith("page:"):
self.show_page(url[5:])
else:
webbrowser.open(url)
def _go_home(self):
self.show_page(self.help_system.home_page)
def _go_back(self):
if self.current_pos > 0:
self.current_pos -= 1
current_type, current_id = self.history[self.current_pos]
self.show_page(current_id, type=current_type, change_history=False)
def _go_forward(self):
if self.current_pos < len(self.history) - 1:
self.current_pos += 1
current_type, current_id = self.history[self.current_pos]
self.show_page(current_id, type=current_type, change_history=False)
def _search(self, string):
self.show_page(string, type="search")
def _update_buttons(self):
history_len = len(self.history)
enable_backward = history_len > 1 and self.current_pos > 0
enable_forward = history_len > 1 and self.current_pos < history_len - 1
self.toolbar.EnableTool(HelpBrowserFrame.BACKWARD_ID, enable_backward)
self.toolbar.EnableTool(HelpBrowserFrame.FORWARD_ID, enable_forward)
def _generate_page(self, page_id):
page = self.help_system.get_page(page_id)
if page is None:
error_page_html = "<h1>%s</h1><p>%s</p>" % (
_("Page not found"),
_("Could not find page '%s'.") % page_id)
return self._wrap_in_html(error_page_html)
else:
return self._wrap_in_html(page.render_to_html())
def _wrap_in_html(self, content):
HTML_SKELETON = """
<html>
<head>
</head>
<body>
%s
</body>
</html>
"""
return HTML_SKELETON % content
| gpl-3.0 | -174,582,379,653,521,440 | 36.791667 | 94 | 0.569521 | false |
nylar/fora | threads/tests/test_views.py | 1 | 2074 | from django.core.urlresolvers import reverse
from django.test import RequestFactory
from fora.tests.base import BaseTestCase
from forums.models import Forum
from threads.models import Thread
from threads.views import NewThreadView, ShowThreadView
class NewThreadViewTestCase(BaseTestCase):
def setUp(self):
self.factory = RequestFactory()
super(NewThreadViewTestCase, self).setUp()
def test_get_new_view(self):
request = self.factory.get('/')
request.user = self.user
response = NewThreadView.as_view()(request)
response.render()
self.assertEqual(response.status_code, 200)
self.assertIn(
'<label for="id_subject">Subject:</label>',
response.content
)
self.assertIn(
'<label for="id_forum">Forum:</label>',
response.content
)
def test_post_form(self):
forum = Forum.objects.create(
name='My Forum', description='Hello World')
request = self.factory.post(reverse('threads:new'), data={
'subject': 'My thread',
'forum': forum.pk
})
request.user = self.user
response = NewThreadView.as_view()(request)
thread = Thread.objects.get(pk=1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse(
'threads:show', kwargs={'slug': thread.slug}))
class ShowThreadViewTestCase(BaseTestCase):
def setUp(self):
self.factory = RequestFactory()
super(ShowThreadViewTestCase, self).setUp()
def test_show_view(self):
forum = Forum.objects.create(
name='Forum',
description='A forum',
active=True,
)
thread = Thread.objects.create(subject='Thread', forum=forum)
request = self.factory.get('/')
response = ShowThreadView.as_view()(request, slug=thread.slug)
response.render()
self.assertEqual(response.status_code, 200)
self.assertIn(thread.subject, response.content)
| cc0-1.0 | 2,857,833,093,021,781,500 | 29.955224 | 70 | 0.624879 | false |
matthewalbani/scipy | scipy/sparse/linalg/dsolve/linsolve.py | 8 | 12675 | from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import asarray, empty, ravel, nonzero
from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
SparseEfficiencyWarning, csc_matrix)
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = not noScikit
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning']
class MatrixRankWarning(UserWarning):
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is
installed. Default: True
Notes
-----
The default sparse solver is umfpack when available
(scikits.umfpack is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
#TODO: pass other options to scikit
def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
family = {'di': 'di', 'Di': 'zi', 'dl': 'dl', 'Dl': 'zl'}
dt = A.dtype.char + A.indices.dtype.char
return family[dt]
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
use_umfpack : bool, optional
if True (default) then use umfpack for the solution. This is
only referenced if b is a vector and ``scikit-umfpack`` is installed.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
"""
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = isspmatrix(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.shape[0]:
raise ValueError("matrix - rhs dimension mismatch (%s - %s)"
% (A.shape, b.shape[0]))
use_umfpack = use_umfpack and useUmfpack
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
umf = umfpack.UmfpackContext(_get_umf_family(A))
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if isspmatrix_csc(A):
flag = 1 # CSC format
else:
flag = 0 # CSR format
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not isspmatrix_csc(b):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format', SparseEfficiencyWarning)
b = csc_matrix(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = b[:, j].A.ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.ones(segment_length, dtype=int)*j)
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
sparse_row = np.concatenate(row_segs)
sparse_col = np.concatenate(col_segs)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
drop_tol=None, relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
drop_tol : float, optional
(deprecated) No effect.
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=True, options=_options)
def factorized(A):
"""
Return a function for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> from scipy.sparse.linalg import factorized
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(A) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if useUmfpack:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
umf = umfpack.UmfpackContext(_get_umf_family(A))
# Make LU decomposition.
umf.numeric(A)
def solve(b):
return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return solve
else:
return splu(A).solve
| bsd-3-clause | 2,024,622,048,438,784,300 | 32.355263 | 80 | 0.591479 | false |
esteinig/netviewP | program/win/0.7.1/netview.py | 1 | 37628 | #!/usr/bin/env python
# NetView P v.0.7.1 - Windows
# Dependencies: PLINK
# Eike Steinig
# Zenger Lab, JCU
# https://github.com/esteinig/netview
import os
import time
import json
import shutil
import argparse
import subprocess
import numpy as np
import multiprocessing as mp
import scipy.sparse.csgraph as csg
import scipy.spatial.distance as sd
from sklearn.neighbors import NearestNeighbors
def main():
commands = CommandLine()
dat = Data()
dat.project = commands.arg_dict['project']
dat.prefix = commands.arg_dict['prefix']
dat.ploidy = commands.arg_dict['ploidy']
dat.missing = commands.arg_dict['missing']
if commands.arg_dict['visual']:
print('\nGenerated node attribute files only.\n')
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
dat.writeData(f='attributes')
makeProject(commands.arg_dict['project'] + '_attributes', commands.arg_dict['prefix'])
exit(1)
print()
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + " NETVIEW P v.0.7.1 ")
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "File =", commands.arg_dict['data_file'].upper())
if commands.arg_dict['plink']:
dat.filetype = 'plink'
dat.readData(commands.arg_dict['data_file'], f='plink', sep=commands.arg_dict['sep'])
elif commands.arg_dict['snps']:
dat.filetype = 'snps'
dat.readData(commands.arg_dict['data_file'], f='snp_mat', sep=commands.arg_dict['sep'])
elif commands.arg_dict['nexus']:
dat.filetype = 'nexus'
dat.readData(commands.arg_dict['data_file'], f='nexus', sep=commands.arg_dict['sep'])
elif commands.arg_dict['raxml']:
dat.filetype = 'raxml'
dat.readData(commands.arg_dict['data_file'], f='raxml', sep=commands.arg_dict['sep'])
else:
dat.filetype = 'dist'
dat.readData(commands.arg_dict['data_file'], f='matrix', sep=commands.arg_dict['sep'])
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
for stored in dat.meta_data.values():
if len(stored) != dat.n:
print('\nError. N in Data != N in Attribute File.')
exit(1)
if dat.ploidy == 'diploid':
nsnp = dat.nSNP//2
else:
nsnp = dat.nSNP
print(get_time() + "\t" + "N =", str(dat.n).upper())
print(get_time() + "\t" + "SNPs =", str(nsnp).upper())
print(get_time() + "\t" + "Ploidy =", dat.ploidy.upper())
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "Quality Control =", str(commands.arg_dict['qc']).upper())
pipeline = Analysis(dat)
if commands.arg_dict['qc'] and pipeline.data.filetype != 'dist':
qc_params = {'--mind': commands.arg_dict['mind'],
'--geno': commands.arg_dict['geno'],
'--maf': commands.arg_dict['maf'],
'--hwe': commands.arg_dict['hwe']}
pipeline.runPLINK(qc_parameters=qc_params, quality=True)
pipeline.updateNodeAttributes(commands.arg_dict['attribute_file'])
if commands.arg_dict['mat'] and pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.data.writeData(file=commands.arg_dict['prefix'] + '_mat.dist', f='matrix')
makeProject(commands.arg_dict['project'] + '_dist', commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
exit(1)
elif commands.arg_dict['mat'] and pipeline.data.filetype == 'dist':
print('\nError. Input is already a Distance Matrix.\n')
exit(1)
if not commands.arg_dict['off']:
if pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.runNetView(tree=commands.arg_dict['tree'], start=commands.arg_dict['start'],
stop=commands.arg_dict['stop'], step=commands.arg_dict['step'],
algorithm=commands.arg_dict['algorithm'], edges=commands.arg_dict['edges'],
html=commands.arg_dict['web'])
pipeline.data.writeData(f='attributes')
makeProject(commands.arg_dict['project'], commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
def makeProject(project, prefix):
cwd = os.getcwd()
project_path = os.path.realpath(os.path.join(os.getcwd(), project))
plink_path = os.path.realpath(os.path.join(project_path, 'plink'))
network_path = os.path.realpath(os.path.join(project_path, 'networks'))
other_path = os.path.realpath(os.path.join(project_path, 'other'))
node_path = os.path.realpath(os.path.join(project_path, 'nodes'))
d3_path = os.path.realpath(os.path.join(project_path, 'd3'))
if os.path.exists(project_path):
shutil.rmtree(project_path)
architecture = [project_path, plink_path, network_path, other_path, node_path, d3_path]
for directory in architecture:
try:
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
for name in os.listdir(cwd):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
if name.endswith('.edges'):
shutil.move(pathname, network_path)
elif name.endswith('.dist'):
shutil.move(pathname, other_path)
elif name.endswith('.nat'):
shutil.move(pathname, node_path)
elif name.startswith(prefix + '_plink'):
shutil.move(pathname, plink_path)
elif name.endswith('_qc.csv'):
shutil.move(pathname, other_path)
elif name.endswith('.json') or name.endswith('.html'):
shutil.move(pathname, d3_path)
elif name.startswith(prefix + '_plink_in'):
os.remove(pathname)
#### Functions for Multiprocessing ####
def netview(matrix, k, mst, algorithm, tree):
nbrs = NearestNeighbors(n_neighbors=k+1, algorithm=algorithm).fit(matrix)
adj_knn = nbrs.kneighbors_graph(matrix).toarray()
np.fill_diagonal(adj_knn, 0)
adj_mknn = (adj_knn == adj_knn.T) * adj_knn
if tree:
adj = mst + adj_mknn
else:
adj = adj_mknn
adjacency = np.tril(adj)
if tree:
mst_edges = np.argwhere(adjacency < 1)
else:
mst_edges = np.array([])
adjacency[adjacency > 0] = 1.
edges = np.argwhere(adjacency != 0)
weights = matrix[edges[:, 0], edges[:, 1]]
return [k, edges, weights, adjacency, mst_edges]
def netview_callback(k):
print(get_time() + "\t" + ' k=' + str(k[0]))
def get_time():
return time.strftime("[%H:%M:%S]")
#### Command Line Module ####
class CommandLine:
def __init__(self):
self.parser = argparse.ArgumentParser(description='NetView P v0.7.1', add_help=True)
self.setParser()
self.args = self.parser.parse_args()
self.arg_dict = vars(self.args)
def setParser(self):
data_type = self.parser.add_mutually_exclusive_group(required=True)
# Required Options
self.parser.add_argument('-f', dest='data_file', required=True, type=str,
help="Name of Data File")
data_type.add_argument('-p', dest='plink', action='store_true',
help="PLINK format (.ped/.map)")
data_type.add_argument('-s', dest='snps', action='store_true',
help="SNP matrix (N x SNPs)")
data_type.add_argument('-m', dest='dist', action='store_true',
help="Distance matrix (N x N)")
data_type.add_argument('-n', dest='nexus', action='store_true',
help="Nexus format from SPANDx")
data_type.add_argument('-r', dest='raxml', action='store_true',
help="RAxML format from SPANDx")
self.parser.add_argument('-a', dest='attribute_file', default='', type=str, required=True,
help="Node attribute file (.csv)")
# MAIN Options
self.parser.add_argument('--quality', dest='qc', action='store_true', default=False,
help="Quality control in PLINK (OFF)")
self.parser.add_argument('--distance', dest='distance', default='asd', type=str,
help="Distance measure for SNPs: hamming, asd, correlation... (asd)")
self.parser.add_argument('--algorithm', dest='algorithm', default='brute', type=str,
help="Algorithm for NN: auto, ball_tree, kd_tree, brute (brute)")
self.parser.add_argument('--mst-off', dest='tree', action='store_false', default=True,
help="Disable minimum spanning tree (OFF)")
self.parser.add_argument('--ploidy', dest='ploidy', default='diploid', type=str,
help="Set ploidy: haploid, diploid (diploid.")
self.parser.add_argument('--missing', dest='missing', default='0', type=str,
help="Set missing character (0)")
self.parser.add_argument('--prefix', dest='prefix', default='project', type=str,
help="Set prefix (project)")
self.parser.add_argument('--project', dest='project', default=time.strftime("%d-%m-%Y_%H-%M-%S"), type=str,
help="Output project name (timestamp)")
self.parser.add_argument('--sep', dest='sep', default='\t', type=str,
help="Delimiter for data file (\\t).")
self.parser.add_argument('--html', dest='web', action='store_true', default=True,
help="Generate D3/JSON graphs (ON)")
self.parser.add_argument('--edges', dest='edges', action='store_false', default=True,
help="Generate graphs as edge files (ON)")
# PARAMETER Options
self.parser.add_argument('--mind', dest='mind', default=0.1, type=float,
help="Filter samples > missing rate (0.1)")
self.parser.add_argument('--geno', dest='geno', default=0.1, type=float,
help="Filter SNPs > missing rate (0.1)")
self.parser.add_argument('--maf', dest='maf', default=0.01, type=float,
help="Filter SNPs < minor allele frequency (0.01)")
self.parser.add_argument('--hwe', dest='hwe', default=0.001, type=float,
help="Filter SNPs failing HWE test at P < (0.001)")
self.parser.add_argument('--start', dest='start', default=10, type=int,
help="Start at k = (10)")
self.parser.add_argument('--stop', dest='stop', default=40, type=int,
help="Stop at k = (40)")
self.parser.add_argument('--step', dest='step', default=10, type=int,
help="Step by k = (10)")
# PIPELINE Options
self.parser.add_argument('--visual', dest='visual', action='store_true', default=False,
help="Node attributes ONLY (OFF)")
self.parser.add_argument('--off', dest='off', action='store_true', default=False,
help="Switch off NetView and run only QC (OFF).")
self.parser.add_argument('--matrix', dest='mat', action='store_true', default=False,
help="Generate distance matrix ONLY (OFF).")
#### Data Module ####
class Data:
### DATA ATTRIBUTES ###
def __init__(self):
self.project = "project"
self.prefix = "prefix"
self.ploidy = 'diploid'
self.missing = "0"
self.n = 0
self.nSNP = 0
self.ids = [] # IDs
self.alleles = []
self.snps = np.array([]) # Array (N x SNPs)
self.biodata = [] # List/Alignment of BioPython SeqRecords
self.meta_data = {}
self.snp_data = {}
self.matrices = {}
self.networks = {}
self.matrix = np.array([]) # Current Matrix
self.netview_runs = 0
self.filetype = ''
### DATA READER ###
def readData(self, file, f, sep="\t", header=False, add_col=0):
def _read_nexus(file, sep=sep):
snp_position = []
snps = []
matrix = False
for line in file:
content = line.strip().split(sep)
if matrix:
if ";" in line:
break
snp_position.append(content[0])
snps.append(content[1:])
else:
if "dimensions" in line:
self.n = int(content[1].split("=")[1])
self.nSNP = int(content[2].split("=")[1][:-1])
elif "taxlabels" in line:
self.ids = content[1:]
elif "matrix" in line:
matrix = True
self.snps = np.array([list(i) for i in zip(*snps)]) # ordered by N
self.snp_data['snp_id'] = [''.join(p.split("_")[:-1]) for p in snp_position]
self.snp_data['snp_position'] = [p.split("_")[-1] for p in snp_position]
self.filetype = 'nexus'
def _read_raxml(file, sep=sep):
header = []
ids = []
snps = []
for line in file:
content = line.strip().split(sep)
if header:
ids.append(content[0])
snps.append(content[1])
else:
header = content
self.n = int(header[0])
self.nSNP = int(header[1])
snps = [[letter for letter in snp]for snp in snps]
self.ids = ids
self.snps = np.array(snps)
self.filetype = 'raxml'
def _read_plink(file, filename, sep=sep):
map_name = filename.split(".")[0] + ".map"
map_file = open(map_name)
ids = []
meta = []
snps = []
for line in file:
content = line.strip().split(sep)
ids.append(content[1])
snps.append(content[6:])
meta.append(content[:6])
self.ids = ids
self.snps = np.array(snps)
self.nSNP = len(self.snps[0])
self.n = len(self.ids)
self.meta_data["pop"] = [i[0] for i in meta]
self.meta_data["dam"] = [i[2] for i in meta]
self.meta_data["sire"] = [i[3] for i in meta]
self.meta_data["sex"] = [i[4] for i in meta]
self.meta_data["phenotype"] = [i[5] for i in meta]
map_content = [line.strip().split() for line in map_file]
map_content = list(zip(*map_content))
self.snp_data['snp_chromosome'] = list(map_content[0])
self.snp_data['snp_id'] = list(map_content[1])
self.snp_data['snp_genetic_distance'] = list(map_content[2])
self.snp_data['snp_position'] = list(map_content[3])
map_file.close()
self.filetype = 'plink'
def _read_matrix(file, header=header, add_col=add_col, sep=sep):
content = [line.strip().split(sep)[add_col:] for line in file]
if header:
content = content[1:]
matrix = np.array([list(map(float, ind)) for ind in content])
self.matrix = matrix
self.n = len(matrix[0])
self.matrices['input'] = matrix
return matrix
def _read_snp_mat(file, sep):
matrix = np.array([line.strip().split(sep) for line in file])
self.snps = matrix
self.n = len(matrix[:, 1])
self.nSNP = len(matrix[1, :])
if self.ploidy == 'diploid':
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP//2)]
else:
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP)]
def _read_attributes(file):
content = [line.strip().split(',') for line in file]
head = content[0]
content = list(zip(*content[1:]))
for i in range(len(head)):
self.meta_data[head[i]] = content[i]
self.ids = list(content[0])
## Main Read ##
infile = open(file)
f = f.lower()
if f == "nexus":
_read_nexus(infile, sep)
elif f =="raxml":
_read_raxml(infile, sep)
elif f == "plink":
_read_plink(infile, file, sep)
elif f == "matrix":
matrix = _read_matrix(infile, header, add_col, sep)
elif f == 'snp_mat':
_read_snp_mat(infile, sep)
elif f == 'attributes':
_read_attributes(infile)
else:
print("File format not supported.")
raise IOError
infile.close()
if f != 'attributes':
alleles = np.unique(self.snps).tolist()
if self.missing in alleles:
alleles.remove(self.missing)
self.alleles = alleles
if f == 'matrix':
return matrix
### DATA WRITER ###
def writeData(self, f, file='data.out', sep="\t"):
def _write_raxml(outfile, sep):
outfile.write(str(self.n) + sep + str(self.nSNP) + "\n")
for i in range(self.n):
outfile.write(self.ids[i] + sep + ''.join(self.snps[i]) + "\n")
def _write_nexus(outfile, sep):
taxlabels = " ".join(self.ids)
header = '#nexus\nbegin data;\ndimensions ntax=' + str(self.n) + ' nchar=' + str(self.nSNP) + \
';\nformat symbols="AGCT" gap=. datatype=nucleotide;\ntaxlabels ' + taxlabels + ';\nmatrix\n'
tail = ";\nend;"
snps = list(zip(*self.snps))
outfile.write(header)
for i in range(self.nSNP):
if 'snp_chromosome' in self.snp_data.keys():
outfile.write(self.snp_data['snp_chromosome'][i] + "_")
else:
outfile.write(sep)
if 'snp_id' in self.snp_data.keys():
outfile.write(self.snp_data['snp_id'][i] + sep)
else:
outfile.write("SNP" + str(i) + sep)
outfile.write(sep.join(snps[i]) + "\n")
outfile.write(tail)
def _write_plink(outfile, filename, sep):
mapname = filename.split('.')[0] + ".map"
for i in range(self.n):
if 'pop' in self.meta_data.keys():
outfile.write(self.meta_data['pop'][i] + sep)
else:
outfile.write("NA" + sep)
if self.ids:
outfile.write(self.ids[i] + sep)
else:
outfile.write("N" + str(i+1) + sep)
if 'dam' in self.meta_data.keys():
outfile.write(self.meta_data['dam'][i] + sep)
else:
outfile.write("0" + sep)
if 'sire' in self.meta_data.keys():
outfile.write(self.meta_data['sire'][i] + sep)
else:
outfile.write("0" + sep)
if 'sex' in self.meta_data.keys():
outfile.write(self.meta_data['sex'][i] + sep)
else:
outfile.write("0" + sep)
if 'phenotype' in self.meta_data.keys():
outfile.write(self.meta_data['phenotype'][i] + sep)
else:
outfile.write("0" + sep)
outfile.write(sep.join(self.snps[i]) + "\n")
map_file = open(mapname, "w")
if 'snp_id' in self.snp_data:
for i in range(len(self.snp_data['snp_id'])):
if 'snp_chromosome' in self.snp_data.keys():
map_file.write(self.snp_data['snp_chromosome'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_id' in self.snp_data.keys():
map_file.write(self.snp_data['snp_id'][i] + sep)
else:
map_file.write("SNP" + str(i+1) + sep)
if 'snp_genetic_distance' in self.snp_data.keys():
map_file.write(self.snp_data['snp_genetic_distance'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_position' in self.snp_data.keys():
map_file.write(self.snp_data['snp_position'][i] + sep + "\n")
else:
map_file.write("0" + sep + "\n")
map_file.close()
def _write_metadata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
ordered_keys = sorted([key for key in self.meta_data.keys()])
outfile.write("Isolate")
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.n):
if self.ids:
outfile.write(self.ids[i])
else:
outfile.write("N" + str(1))
for key in ordered_keys:
outfile.write(sep + self.meta_data[key][i])
outfile.write("\n")
def _write_snpdata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
snp_data = dict(self.snp_data)
ordered_keys = sorted([key for key in snp_data.keys()])
outfile.write("SNP" + sep)
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.nSNP):
outfile.write("SNP_" + str(i))
for key in ordered_keys:
outfile.write(sep + snp_data[key][i])
outfile.write("\n")
def _write_attributes():
for key, value in self.meta_data.items():
outname = self.prefix + '_' + key + '.nat'
out = open(outname, 'w')
out.write('ID\t' + self.prefix + '_' + key + '\n')
for i in range(len(value)):
out.write(self.ids[i] + '\t' + value[i] + '\n')
out.close()
def _write_graph_json():
col_dict = {'dimgray': '#696969', 'olive': '#808000', 'burlywood': '#deb887', 'darkgreen': '#006400',
'navy': '#000080', 'white': '#ffffff', 'violet': '#ee82ee', 'darkblue': '#00008b',
'steelblue': '#4682b4', 'deepskyblue': '#00bfff', 'tan': '#d2b48c', 'rebeccapurple': '#663399',
'honeydew': '#f0fff0', 'slategray': '#708090', 'powderblue': '#b0e0e6', 'palevioletred': '#db7093',
'chocolate': '#d2691e', 'coral': '#ff7f50', 'azure': '#f0ffff', 'peru': '#cd853f',
'springgreen': '#00ff7f', 'darkorange': '#ff8c00', 'mediumvioletred': '#c71585',
'mediumaquamarine': '#66cdaa', 'darkmagenta': '#8b008b', 'mediumslateblue': '#7b68ee',
'mediumseagreen': '#3cb371', 'crimson': '#dc143c', 'gainsboro': '#dcdcdc', 'darkgray': '#a9a9a9',
'plum': '#dda0dd', 'forestgreen': '#228b22', 'seagreen': '#2e8b57', 'teal': '#008080',
'gold': '#ffd700', 'dodgerblue': '#1e90ff', 'lightpink': '#ffb6c1', 'papayawhip': '#ffefd5',
'orchid': '#da70d6', 'black': '#000000', 'cornflowerblue': '#6495ed', 'lightyellow': '#ffffe0',
'goldenrod': '#daa520', 'purple': '#800080', 'khaki': '#f0e68c', 'aquamarine': '#7fffd4',
'lightskyblue': '#87cefa', 'fuchsia': '#ff00ff', 'mediumblue': '#0000cd', 'sandybrown': '#f4a460',
'moccasin': '#ffe4b5', 'darkslategray': '#2f4f4f', 'cornsilk': '#fff8dc', 'lightcyan': '#e0ffff',
'darkolivegreen': '#556b2f', 'silver': '#c0c0c0', 'lightgoldenrodyellow': '#fafad2',
'navajowhite': '#ffdead', 'turquoise': '#40e0d0', 'rosybrown': '#bc8f8f', 'antiquewhite': '#faebd7',
'thistle': '#d8bfd8', 'lightcoral': '#f08080', 'floralwhite': '#fffaf0', 'indianred': '#cd5c5c',
'ghostwhite': '#f8f8ff', 'blue': '#0000ff', 'snow': '#fffafa', 'orangered': '#ff4500',
'darkred': '#8b0000', 'greenyellow': '#adff2f', 'ivory': '#fffff0', 'mediumorchid': '#ba55d3',
'lawngreen': '#7cfc00', 'lightsalmon': '#ffa07a', 'lightgray': '#d3d3d3',
'lightslategray': '#778899', 'mediumpurple': '#9370db', 'darkcyan': '#008b8b', 'tomato': '#ff6347',
'lightsteelblue': '#b0c4de', 'darkseagreen': '#8fbc8f', 'aqua': '#00ffff', 'olivedrab': '#6b8e23',
'darkgoldenrod': '#b8860b', 'darkorchid': '#9932cc', 'seashell': '#fff5ee', 'skyblue': '#87ceeb',
'blanchedalmond': '#ffebcd', 'beige': '#f5f5dc', 'darkturquoise': '#00ced1', 'slateblue': '#6a5acd',
'red': '#ff0000', 'lavender': '#e6e6fa', 'hotpink': '#ff69b4', 'yellowgreen': '#9acd32',
'cyan': '#00ffff', 'firebrick': '#b22222', 'lemonchiffon': '#fffacd', 'darksalmon': '#e9967a',
'sienna': '#a0522d', 'mediumturquoise': '#48d1cc', 'salmon': '#fa8072', 'green': '#008000',
'lightgreen': '#90ee90', 'deeppink': '#ff1493', 'palegoldenrod': '#eee8aa', 'orange': '#ffa500',
'wheat': '#f5deb3', 'lime': '#00ff00', 'lavenderblush': '#fff0f5', 'brown': '#a52a2a',
'blueviolet': '#8a2be2', 'magenta': '#ff00ff', 'lightseagreen': '#20b2aa', 'mistyrose': '#ffe4e1',
'saddlebrown': '#8b4513', 'midnightblue': '#191970', 'mediumspringgreen': '#00fa9a',
'cadetblue': '#5f9ea0', 'paleturquoise': '#afeeee', 'palegreen': '#98fb98', 'pink': '#ffc0cb',
'darkkhaki': '#bdb76b', 'oldlace': '#fdf5e6', 'whitesmoke': '#f5f5f5', 'royalblue': '#4169e1',
'gray': '#808080', 'lightblue': '#add8e6', 'maroon': '#800000', 'peachpuff': '#ffdab9',
'darkslateblue': '#483d8b', 'linen': '#faf0e6', 'limegreen': '#32cd32',
'mintcream': '#f5fffa', 'chartreuse': '#7fff00', 'yellow': '#ffff00', 'indigo': '#4b0082',
'bisque': '#ffe4c4', 'aliceblue': '#f0f8ff', 'darkviolet': '#9400d3'}
if self.networks.keys() == '':
print('No networks to write to JSON.')
templ_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
templ_file = open(os.path.join(templ_path, 'fd_network.html'))
templ_str = templ_file.read()
templ_file.close()
for network, properties in self.networks.items():
json_name = self.prefix + '_' + network + '.json'
html_name = self.prefix + '_' + network + '.html'
edges = properties[1]
weights = properties[2]
node_array = []
for i in range(len(self.ids)):
if 'lat' in self.meta_data.keys() and 'lon' in self.meta_data.keys():
node_array.append({'name': self.ids[i], 'group': self.meta_data['pop'][i], 'color':
col_dict[self.meta_data['col'][i]], 'lon': self.meta_data['lon'][i],
'lat': self.meta_data['lat'][i]})
else:
node_array.append({'name': self.ids[i], 'group': self.meta_data['pop'][i], 'color':
col_dict[self.meta_data['col'][i]]})
edge_array = []
for i in range(len(edges)):
if 'lat' in self.meta_data.keys() and 'lon' in self.meta_data.keys():
edge_array.append({'source': int(edges[i, 0]), 'target': int(edges[i, 1]), 'value':
float(weights[i]), 'slon': self.meta_data['lon'][edges[i, 0]], 'slat':
self.meta_data['lat'][edges[i, 0]], 'tlon': self.meta_data['lon'][edges[i, 1]],
'tlat': self.meta_data['lat'][edges[i, 1]]})
else:
edge_array.append({'source': int(edges[i, 0]), 'target': int(edges[i, 1]), 'value':
float(weights[i])})
json_file = open(json_name, 'w')
json_file.write(json.dumps({'nodes': node_array, 'links': edge_array, }, sort_keys=True, indent=2))
json_file.close()
if self.ploidy == 'diploid':
nsnps = self.nSNP//2
else:
nsnps = self.nSNP
html_file = open(html_name, 'w')
html = templ_str.replace('template.json', '"' + json_name + '"')
html = html.replace('temp_n', str(self.n))
html = html.replace('temp_snp', str(nsnps))
html = html.replace('temp_k', str(properties[0]))
html = html.replace('temp_project', str(self.project))
html_file.write(html)
html_file.close()
def _write_graph_edges():
if self.networks.keys() == '':
print(get_time() + '\t' + 'Warning: No networks to write to JSON.')
for network, properties in self.networks.items():
filename = network + '.edges'
edges = properties[1].tolist()
weights = properties[2].tolist()
mst_edges = properties[4].tolist()
out = open(filename, 'w')
out.write('Source\tTarget\tDistance\tMST\n')
for i in range(len(edges)):
out.write(str(self.ids[edges[i][0]]) + "\t" + str(self.ids[edges[i][1]]) +
"\t" + str(weights[i]))
if len(mst_edges) > 0:
if edges[i] in mst_edges:
out.write('\t' + 'red\n')
else:
out.write('\t' + 'grey\n')
else:
out.write("\n")
if len(mst_edges) == 0:
singletons = np.setdiff1d(np.arange(self.n), properties[1].flatten()).tolist()
if singletons:
for node in singletons:
out.write(str(self.ids[node]) + '\n')
out.close()
## Main Write ##
if f == 'attributes':
_write_attributes()
else:
filename = file
outfile = open(filename, "w")
f = f.lower()
if f == "nexus":
_write_nexus(outfile, sep)
elif f =="raxml":
_write_raxml(outfile, sep)
elif f == "plink":
_write_plink(outfile, file, sep)
elif f == "matrix":
np.savetxt(filename, self.matrix, fmt='%.9f', delimiter=sep)
elif f == "meta":
_write_metadata(outfile, sep)
elif f == "snp":
_write_snpdata(outfile, sep)
elif f == "json":
_write_graph_json()
elif f == 'edges':
_write_graph_edges()
else:
raise IOError("File format not supported.")
outfile.close()
def __str__(self):
return ('-----------\nNumber of Individuals: %i\nNumber of SNPs: %i\nPloidy: %s\n-----------\n') % \
(self.n, self.nSNP, self.ploidy)
#### Analysis Module ####
class Analysis:
def __init__(self, data):
self.data = data
def getDistance(self, target='snps', distance='hamming'):
print(get_time() + "\t" + 'Distance = ' + distance.upper())
if self.data.filetype == 'dist':
target = 'matrix'
if target == 'matrix':
matrix = np.array(self.data.matrix)
else:
# Convert alleles to numbers (e.g. A -> 1, B -> 2) for use in scipy.spatial.distance.pdist()
allele_codes = {}
for i in range(len(self.data.alleles)):
allele_codes[self.data.alleles[i]] = int(i+1)
allele_codes[self.data.missing] = 0 # missing can not be 1 to i
snps = self.data.snps
for a, code in allele_codes.items():
snps[snps == a] = code
matrix = snps
if distance == 'asd':
self.runPLINK(asd=True)
self.data.readData(file=self.data.prefix + '_plink.mdist', f='matrix', sep=' ')
else:
matrix = sd.squareform(sd.pdist(matrix, distance))
self.data.matrix = matrix
self.data.matrices[distance] = self.data.matrix
return matrix
def runPLINK(self, qc_parameters={}, commandstring='', asd=False, quality=False):
if self.data.ploidy == 'haploid':
raise AttributeError('Haploid genotypes not supported for PLINK.')
if commandstring:
subprocess.call(commandstring)
else:
self.data.writeData(file=self.data.prefix + '_plink_in.ped', f='plink')
if quality and qc_parameters:
command = ['plink', '--noweb', '--file', self.data.prefix + '_plink_in']
for key, value in qc_parameters.items():
command.append(key)
command.append(str(value))
command.append('--recode')
command.append('--out')
command.append(self.data.prefix + '_plink_qc')
subprocess.call(command, stdout=subprocess.DEVNULL)
if os.path.exists(self.data.prefix + '_plink_qc.ped'):
self.data.readData(file=self.data.prefix + '_plink_qc.ped', f='plink', sep=' ')
if asd:
subprocess.call(['plink', '--noweb', '--file', self.data.prefix + '_plink_in', '--cluster', '--distance-matrix',
'--out', self.data.prefix + '_plink'], stdout=subprocess.DEVNULL)
def updateNodeAttributes(self, attribute_file):
if os.path.isfile(self.data.prefix + '_plink_qc.irem'):
infile = open(self.data.prefix + '_plink_qc.irem')
to_remove = [line.strip().split()[1] for line in infile]
infile.close()
infile = open(attribute_file)
outname = attribute_file.split('.')[0] + '_qc.csv'
outfile = open(outname, 'w')
for line in infile:
content = line.strip().split(',')
if content[0] not in to_remove:
outfile.write(line)
infile.close()
outfile.close()
self.data.readData(file=outname, f='attributes', sep=',')
def runNetView(self, tree=True, start=10, stop=40, step=10, algorithm='auto', edges=False, html=True):
print(get_time() + "\t" + "Minimum Spanning Tree = " + str(tree).upper())
print(get_time() + "\t" + "Nearest Neighbour = " + algorithm.upper())
print(get_time() + "\t" + "k = " + str(start) + " - " + str(stop) + ' (by ' + str(step) + ')')
print(get_time() + "\t" + "---------------------------------")
self.data.netview_runs += 1
matrix = self.data.matrix
if tree:
mst = csg.minimum_spanning_tree(matrix)
mst = mst.toarray()
#self.data.networks[self.data.prefix + 'mst_' + str(self.data.netview_runs)] = mst
mst = mst + mst.T
else:
mst = None
pool = mp.Pool()
networks = [pool.apply_async(netview, args=(matrix, k, mst, algorithm, tree,), callback=netview_callback)
for k in range(start, stop+1, step)]
pool.close()
pool.join()
for item in networks:
result = item.get()
self.data.networks['netview_k' + str(result[0]) + '_' + str(self.data.netview_runs)] = result
print(get_time() + "\t" + "---------------------------------")
if html:
print(get_time() + "\t" + "Out = JSON")
self.data.writeData(f='json')
if edges:
self.data.writeData(f='edges')
print(get_time() + "\t" + "Out = Edges")
if __name__ == '__main__':
main()
| gpl-2.0 | 5,925,608,658,848,145,000 | 41.613817 | 128 | 0.49208 | false |
BorgERP/borg-erp-6of3 | addons/purchase/wizard/purchase_line_invoice.py | 1 | 5929 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from tools.translate import _
class purchase_line_invoice(osv.osv_memory):
""" To create invoice for purchase order line"""
_name = 'purchase.order.line_invoice'
_description = 'Purchase Order Line Make Invoice'
def makeInvoices(self, cr, uid, ids, context=None):
"""
To get Purchase Order line and create Invoice
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun view of Invoice
"""
if context is None:
context={}
record_ids = context.get('active_ids',[])
if record_ids:
res = False
invoices = {}
invoice_obj = self.pool.get('account.invoice')
purchase_obj = self.pool.get('purchase.order')
purchase_line_obj = self.pool.get('purchase.order.line')
property_obj=self.pool.get('ir.property')
account_fiscal_obj=self.pool.get('account.fiscal.position')
invoice_line_obj = self.pool.get('account.invoice.line')
account_jrnl_obj = self.pool.get('account.journal')
def multiple_order_invoice_notes(orders):
notes = ""
for order in orders:
notes += "%s \n" % order.notes
return notes
def make_invoice_by_partner(partner, orders, lines_ids):
"""
create a new invoice for one supplier
@param partner : The object partner
@param orders : The set of orders to add in the invoice
@param lines : The list of line's id
"""
name = orders and orders[0].name or ''
journal_id = account_jrnl_obj.search(cr, uid, [('type', '=', 'purchase')], context=None)
journal_id = journal_id and journal_id[0] or False
a = partner.property_account_payable.id
if partner and partner.property_payment_term.id:
pay_term = partner.property_payment_term.id
else:
pay_term = False
inv = {
'name': name,
'origin': name,
'type': 'in_invoice',
'journal_id':journal_id,
'reference' : partner.ref,
'account_id': a,
'partner_id': partner.id,
'address_invoice_id': orders[0].partner_address_id.id,
'address_contact_id': orders[0].partner_address_id.id,
'invoice_line': [(6,0,lines_ids)],
'currency_id' : orders[0].pricelist_id.currency_id.id,
'comment': multiple_order_invoice_notes(orders),
'payment_term': pay_term,
'fiscal_position': partner.property_account_position.id
}
inv_id = invoice_obj.create(cr, uid, inv)
for order in orders:
order.write({'invoice_ids': [(4, inv_id)]})
return inv_id
for line in purchase_line_obj.browse(cr, uid, record_ids, context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.partner_id.id in invoices:
invoices[line.partner_id.id] = []
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, line, context=context)
inv_line_data.update({'origin': line.order_id.name})
inv_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
purchase_line_obj.write(cr, uid, [line.id], {'invoiced': True, 'invoice_lines': [(4, inv_id)]})
invoices[line.partner_id.id].append((line,inv_id))
res = []
for result in invoices.values():
il = map(lambda x: x[1], result)
orders = list(set(map(lambda x : x[0].order_id, result)))
res.append(make_invoice_by_partner(orders[0].partner_id, orders, il))
return {
'domain': "[('id','in', ["+','.join(map(str,res))+"])]",
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window'
}
purchase_line_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,235,681,912,279,776,000 | 42.595588 | 115 | 0.524709 | false |
w1ll1am23/home-assistant | tests/components/hassio/test_ingress.py | 3 | 9117 | """The tests for the hassio component."""
from aiohttp.hdrs import X_FORWARDED_FOR, X_FORWARDED_HOST, X_FORWARDED_PROTO
import pytest
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_get(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.get(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.get(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_post(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.post(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.post(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_put(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.put(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.put(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_delete(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.delete(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.delete(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_patch(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.patch(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.patch(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ping?index=1"),
("core", "index.html"),
("local", "panel/config"),
("jk_921", "editor.php?idx=3&ping=5"),
("fsadjf10312", ""),
],
)
async def test_ingress_request_options(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.options(
f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}",
text="test",
)
resp = await hassio_client.options(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "test"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
@pytest.mark.parametrize(
"build_type",
[
("a3_vl", "test/beer/ws"),
("core", "ws.php"),
("local", "panel/config/stream"),
("jk_921", "hulk"),
("demo", "ws/connection?id=9&token=SJAKWS283"),
],
)
async def test_ingress_websocket(hassio_client, build_type, aioclient_mock):
"""Test no auth needed for ."""
aioclient_mock.get(f"http://127.0.0.1/ingress/{build_type[0]}/{build_type[1]}")
# Ignore error because we can setup a full IO infrastructure
await hassio_client.ws_connect(
f"/api/hassio_ingress/{build_type[0]}/{build_type[1]}",
headers={"X-Test-Header": "beer"},
)
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
assert (
aioclient_mock.mock_calls[-1][3]["X-Ingress-Path"]
== f"/api/hassio_ingress/{build_type[0]}"
)
assert aioclient_mock.mock_calls[-1][3]["X-Test-Header"] == "beer"
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_FOR]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_HOST]
assert aioclient_mock.mock_calls[-1][3][X_FORWARDED_PROTO]
| apache-2.0 | 6,364,350,009,245,811,000 | 31.913357 | 83 | 0.596578 | false |
soldag/home-assistant | tests/components/twilio/test_init.py | 13 | 1278 | """Test the init file of Twilio."""
from homeassistant import data_entry_flow
from homeassistant.components import twilio
from homeassistant.core import callback
from tests.async_mock import patch
async def test_config_flow_registers_webhook(hass, aiohttp_client):
"""Test setting up Twilio and sending webhook."""
with patch("homeassistant.util.get_local_ip", return_value="example.com"):
result = await hass.config_entries.flow.async_init(
"twilio", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
webhook_id = result["result"].data["webhook_id"]
twilio_events = []
@callback
def handle_event(event):
"""Handle Twilio event."""
twilio_events.append(event)
hass.bus.async_listen(twilio.RECEIVED_DATA, handle_event)
client = await aiohttp_client(hass.http.app)
await client.post(f"/api/webhook/{webhook_id}", data={"hello": "twilio"})
assert len(twilio_events) == 1
assert twilio_events[0].data["webhook_id"] == webhook_id
assert twilio_events[0].data["hello"] == "twilio"
| apache-2.0 | -5,765,775,747,421,542,000 | 35.514286 | 82 | 0.683099 | false |
timjr/tomograph | tests/bench.py | 1 | 1208 | #!/usr/bin/env python
# Copyright (c) 2012 Yahoo! Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 Unless required by
# applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
import tomograph
import cProfile
import sys
import time
@tomograph.traced('test server', 'server response', port=80)
def server(latency):
time.sleep(latency)
@tomograph.traced('test client', 'client request')
def client(client_overhead, server_latency):
time.sleep(client_overhead)
server(server_latency)
def clientloop():
for i in xrange(10000):
client(0, 0)
if __name__ == '__main__':
if len(sys.argv) > 1:
tomograph.config.set_backends(sys.argv[1:])
#cProfile.run('clientloop()', 'tomo-bench')
clientloop()
| apache-2.0 | -218,663,055,776,483,400 | 29.974359 | 70 | 0.721854 | false |
xpansa/sale-workflow | sale_pricelist_discount/model/pricelist.py | 16 | 1350 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 credativ ltd (<http://www.credativ.co.uk>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
import openerp.addons.decimal_precision as dp
class ProductPricelistItem(models.Model):
_inherit = 'product.pricelist.item'
discount = fields.Float(
'Discount (%)',
digits_compute=dp.get_precision('Discount'),
help="Default discount applied on a sale order line.",
default=0.0,
)
| agpl-3.0 | -2,206,992,317,077,799,700 | 38.705882 | 78 | 0.62 | false |
chb/indivo_server | indivo/migrations/0013_auto__add_field_documentschema_internal_p.py | 4 | 52098 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DocumentSchema.internal_p'
db.add_column('indivo_documentschema', 'internal_p', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DocumentSchema.internal_p'
db.delete_column('indivo_documentschema', 'internal_p')
models = {
'indivo.accesstoken': {
'Meta': {'object_name': 'AccessToken', '_ormbases': ['indivo.Principal']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']", 'null': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHAShare']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'indivo.account': {
'Meta': {'object_name': 'Account', '_ormbases': ['indivo.Principal']},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'failed_login_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'last_failed_login_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_login_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_state_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'primary_secret': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'secondary_secret': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'uninitialized'", 'max_length': '50'}),
'total_login_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'indivo.accountauthsystem': {
'Meta': {'unique_together': "(('auth_system', 'account'), ('auth_system', 'username'))", 'object_name': 'AccountAuthSystem'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_systems'", 'to': "orm['indivo.Account']"}),
'auth_parameters': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'auth_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.AuthSystem']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accountauthsystem_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'user_attributes': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'indivo.accountfullshare': {
'Meta': {'unique_together': "(('record', 'with_account'),)", 'object_name': 'AccountFullShare'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accountfullshare_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fullshares'", 'to': "orm['indivo.Record']"}),
'role_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'with_account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fullshares_to'", 'to': "orm['indivo.Account']"})
},
'indivo.allergy': {
'Meta': {'object_name': 'Allergy', '_ormbases': ['indivo.Fact']},
'allergen_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'allergen_name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_name_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_name_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'allergen_type_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'date_diagnosed': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'diagnosed_by': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'reaction': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'specifics': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'indivo.audit': {
'Meta': {'object_name': 'Audit'},
'carenet_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'document_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'effective_principal_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'pha_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'proxied_by_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'record_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'req_domain': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'req_headers': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'req_ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True'}),
'req_method': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'req_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'request_successful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resp_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'resp_headers': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'view_func': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'indivo.authsystem': {
'Meta': {'object_name': 'AuthSystem'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authsystem_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'internal_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'indivo.carenet': {
'Meta': {'unique_together': "(('name', 'record'),)", 'object_name': 'Carenet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenet_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']"})
},
'indivo.carenetaccount': {
'Meta': {'unique_together': "(('carenet', 'account'),)", 'object_name': 'CarenetAccount'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetaccount_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
'indivo.carenetautoshare': {
'Meta': {'unique_together': "(('carenet', 'record', 'type'),)", 'object_name': 'CarenetAutoshare'},
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetautoshare_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']", 'null': 'True'})
},
'indivo.carenetdocument': {
'Meta': {'unique_together': "(('carenet', 'document'),)", 'object_name': 'CarenetDocument'},
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetdocument_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'share_p': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'indivo.carenetpha': {
'Meta': {'unique_together': "(('carenet', 'pha'),)", 'object_name': 'CarenetPHA'},
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'carenetpha_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'pha': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHA']"})
},
'indivo.document': {
'Meta': {'unique_together': "(('record', 'external_id'),)", 'object_name': 'Document'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'content_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'document_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'digest': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'nevershare': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'document_thread'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'pha': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pha_document'", 'null': 'True', 'to': "orm['indivo.PHA']"}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'null': 'True', 'to': "orm['indivo.Record']"}),
'replaced_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'document_replaced'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'replaces': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']", 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['indivo.StatusName']"}),
'suppressed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'suppressed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Principal']", 'null': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']", 'null': 'True'})
},
'indivo.documentprocessing': {
'Meta': {'object_name': 'DocumentProcessing'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentprocessing_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'processed_doc'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
},
'indivo.documentrels': {
'Meta': {'object_name': 'DocumentRels'},
'document_0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rels_as_doc_0'", 'to': "orm['indivo.Document']"}),
'document_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rels_as_doc_1'", 'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']"})
},
'indivo.documentschema': {
'Meta': {'object_name': 'DocumentSchema'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentschema_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'internal_p': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'stylesheet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stylesheet'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'indivo.documentstatushistory': {
'Meta': {'object_name': 'DocumentStatusHistory'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentstatushistory_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'effective_principal': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'proxied_by_principal': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'record': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.StatusName']"})
},
'indivo.equipment': {
'Meta': {'object_name': 'Equipment', '_ormbases': ['indivo.Fact']},
'date_started': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_stopped': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'})
},
'indivo.fact': {
'Meta': {'object_name': 'Fact'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allergy'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allergy'", 'null': 'True', 'to': "orm['indivo.Record']"})
},
'indivo.immunization': {
'Meta': {'object_name': 'Immunization', '_ormbases': ['indivo.Fact']},
'administered_by': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'adverse_event': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'anatomic_surface': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'anatomic_surface_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'anatomic_surface_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'anatomic_surface_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_administered': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'vaccine_expiration': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'vaccine_lot': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'vaccine_manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'vaccine_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'vaccine_type_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'vaccine_type_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'vaccine_type_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'})
},
'indivo.lab': {
'Meta': {'object_name': 'Lab', '_ormbases': ['indivo.Fact']},
'date_measured': ('django.db.models.fields.DateTimeField', [], {}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'first_lab_test_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'first_lab_test_value': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'first_panel_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'lab_address': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'lab_comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'lab_name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'lab_type': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'non_critical_range_maximum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'non_critical_range_minimum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'normal_range_maximum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'normal_range_minimum': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'})
},
'indivo.machineapp': {
'Meta': {'object_name': 'MachineApp'},
'app_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'indivo.measurement': {
'Meta': {'object_name': 'Measurement', '_ormbases': ['indivo.Fact']},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'indivo.medication': {
'Meta': {'object_name': 'Medication', '_ormbases': ['indivo.Fact']},
'brand_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'brand_name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'brand_name_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'brand_name_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'date_started': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_stopped': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'dispense_as_written': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'dose_textvalue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'dose_unit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'dose_unit_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'dose_unit_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'dose_unit_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'dose_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'frequency_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'frequency_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'frequency_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'prescribed_by_institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'prescribed_by_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'prescribed_on': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'prescribed_stop_on': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'prescription_duration': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'prescription_instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'prescription_refill_info': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'route_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'route_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'route_value': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'strength_textvalue': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'strength_unit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'strength_unit_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'strength_unit_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'strength_unit_value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'strength_value': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'indivo.message': {
'Meta': {'unique_together': "(('account', 'external_identifier', 'sender'),)", 'object_name': 'Message'},
'about_record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']", 'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'archived_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'body_type': ('django.db.models.fields.CharField', [], {'default': "'plaintext'", 'max_length': '100'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'external_identifier': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'num_attachments': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'received_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_as_recipient'", 'to': "orm['indivo.Principal']"}),
'response_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_responses'", 'null': 'True', 'to': "orm['indivo.Message']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_as_sender'", 'to': "orm['indivo.Principal']"}),
'severity': ('django.db.models.fields.CharField', [], {'default': "'low'", 'max_length': '100'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'indivo.messageattachment': {
'Meta': {'unique_together': "(('message', 'attachment_num'),)", 'object_name': 'MessageAttachment'},
'attachment_num': ('django.db.models.fields.IntegerField', [], {}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messageattachment_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Message']"}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'saved_to_document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']", 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'indivo.nonce': {
'Meta': {'unique_together': "(('nonce', 'oauth_type'),)", 'object_name': 'Nonce'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'oauth_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'indivo.notification': {
'Meta': {'object_name': 'Notification'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'app_url': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Document']", 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']", 'null': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications_sent_by'", 'to': "orm['indivo.Principal']"})
},
'indivo.nouser': {
'Meta': {'object_name': 'NoUser', '_ormbases': ['indivo.Principal']},
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'})
},
'indivo.pha': {
'Meta': {'object_name': 'PHA'},
'autonomous_reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'callback_url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'frameable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_ui': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_autonomous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True'}),
'privacy_tou': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.DocumentSchema']", 'null': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'start_url_template': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'indivo.phashare': {
'Meta': {'unique_together': "(('record', 'with_pha'),)", 'object_name': 'PHAShare'},
'authorized_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'authorized_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shares_authorized_by'", 'null': 'True', 'to': "orm['indivo.Account']"}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']", 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phashare_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pha_shares'", 'to': "orm['indivo.Record']"}),
'with_pha': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pha_shares_to'", 'to': "orm['indivo.PHA']"})
},
'indivo.principal': {
'Meta': {'object_name': 'Principal'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'principal_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'indivo.problem': {
'Meta': {'object_name': 'Problem', '_ormbases': ['indivo.Fact']},
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_onset': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_resolution': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'diagnosed_by': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'indivo.procedure': {
'Meta': {'object_name': 'Procedure', '_ormbases': ['indivo.Fact']},
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_performed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'provider_institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'provider_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
'indivo.record': {
'Meta': {'object_name': 'Record'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'the_record_for_contact'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'record_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'demographics': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'the_record_for_demographics'", 'null': 'True', 'to': "orm['indivo.Document']"}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'records_owned_by'", 'null': 'True', 'to': "orm['indivo.Principal']"})
},
'indivo.recordnotificationroute': {
'Meta': {'unique_together': "(('account', 'record'),)", 'object_name': 'RecordNotificationRoute'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recordnotificationroute_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_routes'", 'to': "orm['indivo.Record']"})
},
'indivo.reqtoken': {
'Meta': {'object_name': 'ReqToken', '_ormbases': ['indivo.Principal']},
'authorized_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'authorized_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'}),
'carenet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Carenet']", 'null': 'True'}),
'oauth_callback': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'pha': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHA']"}),
'principal_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Principal']", 'unique': 'True', 'primary_key': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Record']", 'null': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.PHAShare']", 'null': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'indivo.sessionrequesttoken': {
'Meta': {'object_name': 'SessionRequestToken'},
'approved_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessionrequesttoken_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'})
},
'indivo.sessiontoken': {
'Meta': {'object_name': 'SessionToken'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessiontoken_created_by'", 'null': 'True', 'to': "orm['indivo.Principal']"}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['indivo.Account']", 'null': 'True'})
},
'indivo.simpleclinicalnote': {
'Meta': {'object_name': 'SimpleClinicalNote', '_ormbases': ['indivo.Fact']},
'chief_complaint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_of_visit': ('django.db.models.fields.DateTimeField', [], {}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'finalized_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'provider_institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'provider_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'signed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'specialty': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'specialty_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'specialty_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'specialty_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'visit_location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'visit_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'visit_type_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'visit_type_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'visit_type_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'})
},
'indivo.statusname': {
'Meta': {'object_name': 'StatusName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '24'})
},
'indivo.vitals': {
'Meta': {'object_name': 'Vitals', '_ormbases': ['indivo.Fact']},
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_measured': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fact_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['indivo.Fact']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'name_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'name_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit_abbrev': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'unit_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'unit_value': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['indivo']
| gpl-3.0 | -2,168,674,084,025,847,300 | 92.701439 | 181 | 0.553918 | false |
albertjan/pypyjs | website/js/pypy.js-0.2.0/lib/modules/test/test_extcall.py | 12 | 8156 | # -*- coding: utf-8 -*-
"""Doctest for method/function calls.
We're going the use these types for extra testing
>>> from UserList import UserList
>>> from UserDict import UserDict
We're defining four helper functions
>>> def e(a,b):
... print a, b
>>> def f(*a, **k):
... print a, test_support.sortdict(k)
>>> def g(x, *y, **z):
... print x, y, test_support.sortdict(z)
>>> def h(j=1, a=2, h=3):
... print j, a, h
Argument list examples
>>> f()
() {}
>>> f(1)
(1,) {}
>>> f(1, 2)
(1, 2) {}
>>> f(1, 2, 3)
(1, 2, 3) {}
>>> f(1, 2, 3, *(4, 5))
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *[4, 5])
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *UserList([4, 5]))
(1, 2, 3, 4, 5) {}
Here we add keyword arguments
>>> f(1, 2, 3, **{'a':4, 'b':5})
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7})
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9})
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
>>> f(1, 2, 3, **UserDict(a=4, b=5))
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7))
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9))
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
Examples with invalid arguments (TypeErrors). We're also testing the function
names in the exception messages.
Verify clearing of SF bug #733667
>>> e(c=4)
Traceback (most recent call last):
...
TypeError: e() got an unexpected keyword argument 'c'
>>> g()
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*())
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*(), **{})
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(1)
1 () {}
>>> g(1, 2)
1 (2,) {}
>>> g(1, 2, 3)
1 (2, 3) {}
>>> g(1, 2, 3, *(4, 5))
1 (2, 3, 4, 5) {}
>>> class Nothing: pass
...
>>> g(*Nothing()) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after * must be a sequence, not instance
>>> class Nothing:
... def __len__(self): return 5
...
>>> g(*Nothing()) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after * must be a sequence, not instance
>>> class Nothing():
... def __len__(self): return 5
... def __getitem__(self, i):
... if i<3: return i
... else: raise IndexError(i)
...
>>> g(*Nothing())
0 (1, 2) {}
>>> class Nothing:
... def __init__(self): self.c = 0
... def __iter__(self): return self
... def next(self):
... if self.c == 4:
... raise StopIteration
... c = self.c
... self.c += 1
... return c
...
>>> g(*Nothing())
0 (1, 2, 3) {}
Make sure that the function doesn't stomp the dictionary
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = d.copy()
>>> g(1, d=4, **d)
1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> d == d2
True
What about willful misconduct?
>>> def saboteur(**kw):
... kw['x'] = 'm'
... return kw
>>> d = {}
>>> kw = saboteur(a=1, **d)
>>> d
{}
>>> g(1, 2, 3, **{'x': 4, 'y': 5})
Traceback (most recent call last):
...
TypeError: g() got multiple values for keyword argument 'x'
>>> f(**{1:2}) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...keywords must be strings
>>> h(**{'e': 2})
Traceback (most recent call last):
...
TypeError: h() got an unexpected keyword argument 'e'
>>> h(*h) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after * must be a sequence, not function
>>> dir(*h) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after * must be a sequence, not function
>>> None(*h) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after * must be a sequence, not function
>>> h(**h) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after ** must be a mapping, not function
>>> dir(**h) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after ** must be a mapping, not function
>>> None(**h) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...argument after ** must be a mapping, not function
>>> dir(b=1, **{'b': 1}) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...got multiple values for keyword argument 'b'
Another helper function
>>> def f2(*a, **b):
... return a, b
>>> d = {}
>>> for i in xrange(512):
... key = 'k%d' % i
... d[key] = i
>>> a, b = f2(1, *(2,3), **d)
>>> len(a), len(b), b == d
(3, 512, True)
>>> class Foo:
... def method(self, arg1, arg2):
... return arg1+arg2
>>> x = Foo()
>>> Foo.method(*(x, 1, 2))
3
>>> Foo.method(x, *(1, 2))
3
>>> Foo.method(*(1, 2, 3))
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
>>> Foo.method(1, *[2, 3])
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
A PyCFunction that takes only positional parameters should allow an
empty keyword dictionary to pass without a complaint, but raise a
TypeError if te dictionary is not empty
>>> try:
... silence = id(1, *{})
... True
... except:
... False
True
>>> id(1, **{'foo': 1}) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: id() ... keyword argument...
A corner case of keyword dictionary items being deleted during
the function call setup. See <http://bugs.python.org/issue2016>.
>>> class Name(str):
... def __eq__(self, other):
... try:
... del x[self]
... except KeyError:
... pass
... return str.__eq__(self, other)
... def __hash__(self):
... return str.__hash__(self)
>>> x = {Name("a"):1, Name("b"):2}
>>> def f(a, b):
... print a,b
>>> f(**x)
1 2
A obscure message:
>>> def f(a, b):
... pass
>>> f(b=1)
Traceback (most recent call last):
...
TypeError: f() takes exactly 2 arguments (1 given)
The number of arguments passed in includes keywords:
>>> def f(a):
... pass
>>> f(6, a=4, *(1, 2, 3))
Traceback (most recent call last):
...
TypeError: f() takes exactly 1 argument (5 given)
"""
import unittest
import sys
from test import test_support
class ExtCallTest(unittest.TestCase):
def test_unicode_keywords(self):
def f(a):
return a
self.assertEqual(f(**{u'a': 4}), 4)
self.assertRaises(TypeError, f, **{u'stören': 4})
self.assertRaises(TypeError, f, **{u'someLongString':2})
try:
f(a=4, **{u'a': 4})
except TypeError:
pass
else:
self.fail("duplicate arguments didn't raise")
def test_main():
test_support.run_doctest(sys.modules[__name__], True)
test_support.run_unittest(ExtCallTest)
if __name__ == '__main__':
test_main()
| mit | -3,258,538,110,946,822,000 | 24.725552 | 77 | 0.478112 | false |
pizzathief/scipy | scipy/stats/tests/test_contingency.py | 9 | 5982 | import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_approx_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy.special import xlogy
from scipy.stats.contingency import margins, expected_freq, chi2_contingency
def test_margins():
a = np.array([1])
m = margins(a)
assert_equal(len(m), 1)
m0 = m[0]
assert_array_equal(m0, np.array([1]))
a = np.array([[1]])
m0, m1 = margins(a)
expected0 = np.array([[1]])
expected1 = np.array([[1]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(12).reshape(2, 6)
m0, m1 = margins(a)
expected0 = np.array([[15], [51]])
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(24).reshape(2, 3, 4)
m0, m1, m2 = margins(a)
expected0 = np.array([[[66]], [[210]]])
expected1 = np.array([[[60], [92], [124]]])
expected2 = np.array([[[60, 66, 72, 78]]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
assert_array_equal(m2, expected2)
def test_expected_freq():
assert_array_equal(expected_freq([1]), np.array([1.0]))
observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
e = expected_freq(observed)
assert_array_equal(e, np.ones_like(observed))
observed = np.array([[10, 10, 20], [20, 20, 20]])
e = expected_freq(observed)
correct = np.array([[12., 12., 16.], [18., 18., 24.]])
assert_array_almost_equal(e, correct)
def test_chi2_contingency_trivial():
# Some very simple tests for chi2_contingency.
# A trivial case
obs = np.array([[1, 2], [1, 2]])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 1)
assert_array_equal(obs, expected)
# A *really* trivial case: 1-D data.
obs = np.array([1, 2, 3])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 0)
assert_array_equal(obs, expected)
def test_chi2_contingency_R():
# Some test cases that were computed independently, using R.
# Rcode = \
# """
# # Data vector.
# data <- c(
# 12, 34, 23, 4, 47, 11,
# 35, 31, 11, 34, 10, 18,
# 12, 32, 9, 18, 13, 19,
# 12, 12, 14, 9, 33, 25
# )
#
# # Create factor tags:r=rows, c=columns, t=tiers
# r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
# c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
# t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
#
# # 3-way Chi squared test of independence
# s = summary(xtabs(data~r+c+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + t)
# Number of cases in table: 478
# Number of factors: 3
# Test for independence of all factors:
# Chisq = 102.17, df = 17, p-value = 3.514e-14
# """
obs = np.array(
[[[12, 34, 23],
[35, 31, 11],
[12, 32, 9],
[12, 12, 14]],
[[4, 47, 11],
[34, 10, 18],
[18, 13, 19],
[9, 33, 25]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 102.17, significant=5)
assert_approx_equal(p, 3.514e-14, significant=4)
assert_equal(dof, 17)
# Rcode = \
# """
# # Data vector.
# data <- c(
# #
# 12, 17,
# 11, 16,
# #
# 11, 12,
# 15, 16,
# #
# 23, 15,
# 30, 22,
# #
# 14, 17,
# 15, 16
# )
#
# # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
# r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
# c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
# d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
# t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
#
# # 4-way Chi squared test of independence
# s = summary(xtabs(data~r+c+d+t))
# print(s)
# """
# Routput = \
# """
# Call: xtabs(formula = data ~ r + c + d + t)
# Number of cases in table: 262
# Number of factors: 4
# Test for independence of all factors:
# Chisq = 8.758, df = 11, p-value = 0.6442
# """
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 8.758, significant=4)
assert_approx_equal(p, 0.6442, significant=4)
assert_equal(dof, 11)
def test_chi2_contingency_g():
c = np.array([[15, 60], [15, 90]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False)
assert_allclose(g, 2*xlogy(c, c/e).sum())
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True)
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
c = np.array([[10, 12, 10], [12, 10, 10]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
assert_allclose(g, 2*xlogy(c, c/e).sum())
def test_chi2_contingency_bad_args():
# Test that "bad" inputs raise a ValueError.
# Negative value in the array of observed frequencies.
obs = np.array([[-1, 10], [1, 2]])
assert_raises(ValueError, chi2_contingency, obs)
# The zeros in this will result in zeros in the array
# of expected frequencies.
obs = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, chi2_contingency, obs)
# A degenerate case: `observed` has size 0.
obs = np.empty((0, 8))
assert_raises(ValueError, chi2_contingency, obs)
| bsd-3-clause | 5,548,173,873,699,641,000 | 29.212121 | 82 | 0.53661 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.