gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
##
# @license
# Copyright Neekware Inc. All Rights Reserved.
#
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file at http://neekware.com/license/MIT.html
###
from django.conf import settings
from django.utils import timezone
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.core.files.storage import get_storage_class
from toolware.utils.generic import get_uuid
from toolware.utils.query import CaseInsensitiveUniqueManager
from slugify import slugify
from . import utils as util
from . import defaults as defs
EnabledLanguages = getattr(settings, 'ENABLED_LANGUAGES', {})
DefaultStorage = get_storage_class(defs.DEFAULT_FILE_STORAGE)
class UserProfileManager(CaseInsensitiveUniqueManager, BaseUserManager):
"""
Custom User Manager Class.
USERNAME_FIELD is the email field.
"""
def _create_user(self, email, password, is_staff, is_superuser,
**extra_fields):
"""
Creates and saves a User with the given, email and password.
"""
if email:
if password is None:
# Social users have no passwords, but they can request one later on
# via password reset. We need to setup a random password since a usable
# password is required by `has_usable_password()` to allow password resets.
password = get_uuid(length=20, version=4)
user = self.model(email=self.normalize_email(email),
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
last_login=timezone.now(),
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
else:
# Translators: admin:skip
raise ValueError(_('USER.EMAIL.REQUIRED'))
def create_user(self, email=None, password=None, **extra_fields):
return self._create_user(email, password, False, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True, **extra_fields)
class Profile(AbstractBaseUser, PermissionsMixin):
"""
A custom user class w/ email & password as the only required fileds
"""
default_storage = DefaultStorage()
email = models.EmailField(
# Translators: admin:skip
_('USER.EMAIL'),
db_index=True,
unique=True,
# Translators: admin:skip
help_text=_('USER.EMAIL.DESC'),
)
is_superuser = models.BooleanField(
# Translators: admin:skip
_('USER.SUPERUSER'),
default=False,
# Translators: admin:skip
help_text=_('USER.SUPERUSER.DESC'),
)
is_staff = models.BooleanField(
# Translators: admin:skip
_('USER.STAFF'),
default=False,
# Translators: admin:skip
help_text=_('USER.STAFF.DESC'),
)
is_active = models.BooleanField(
# Translators: admin:skip
_('USER.ACTIVE'),
default=True,
# Translators: admin:skip
help_text=_('USER.ACTIVE.DESC'),
)
# Django specific fields are above this line
#############################################
created_at = models.DateTimeField(
# Translators: admin:skip
_('USER.CREATED_AT'),
default=timezone.now,
)
updated_at = models.DateTimeField(
# Translators: admin:skip
_('USER.UPDATED_AT'),
auto_now=True,
)
first_name = models.CharField(
# Translators: admin:skip
_('USER.FIRST_NAME'),
max_length=60,
null=True,
blank=False,
# Translators: admin:skip
help_text=_('USER.FIRST_NAME.DESC'),
)
last_name = models.CharField(
# Translators: admin:skip
_('USER.LAST_NAME'),
max_length=255,
null=True,
blank=False,
# Translators: admin:skip
help_text=_('USER.LAST_NAME.DESC'),
)
is_verified = models.BooleanField(
# Translators: admin:skip
_('USER.VERIFIED'),
default=False,
# Translators: admin:skip
help_text=_('USER.VERIFIED.DESC'),
)
photo = models.ImageField(
# Translators: admin:skip
_('USER.PHOTO'),
null=True,
blank=True,
storage=default_storage,
upload_to=util.uploadto_user_photo,
max_length=255,
# Translators: admin:skip
help_text=_('USER.PHOTO.DESC'),
)
status = models.CharField(
# Translators: admin:skip
_('USER.STATUS'),
default=defs.USER_STATUS_DEFAULT,
choices=defs.USER_STATUS_CHOICES,
max_length=60,
# Translators: admin:skip
help_text=_('USER.STATUS.DESC'),
)
language = models.CharField(
# Translators: admin:skip
_('USER.LANGUAGE'),
max_length=40,
default=defs.USER_DEFAULT_LANGUAGE,
# Translators: admin:skip
help_text=_('USER.LANGUAGE.DESC')
)
# ########## Add new fields above this line #############
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
CASE_INSENSITIVE_FIELDS = ['email', 'first_name', 'last_name']
class Meta:
# Translators: admin:skip
verbose_name = _('USER.LABEL')
# Translators: admin:skip
verbose_name_plural = _('USER.LABEL.PLURAL')
permissions = (
# ('add_profile', 'Can add new profile'),
# ('change_profile', 'Can change all data on any profile'),
# ('delete_profile', 'Can delete any non-superuser profile'),
# ('view_profile', 'Can view public data on any profile'),
('read_profile', 'Can read all data on any profile'),
('update_profile', 'Can update public data on any profile'),
('switch_profile', 'Can switch to any non-superuser profile'),
)
def __str__(self):
return '{} [{}]'.format(self.email, self.id)
def get_username(self):
"""
Return a unique user id instead of username
"""
return self.email
def get_absolute_url(self):
"""
Return public URL for user
"""
return "/m/{}/{}".format(self.id, slugify(self.get_full_name()))
def get_short_name(self):
"""
Returns first name
"""
return self.first_name
def get_full_name(self):
"""
Returns full name
"""
return '{} {}'.format(self.first_name, self.last_name)
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this user
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
# ########## Add new methods below this line #############
@property
def avatar(self):
if self.photo and self.photo.url:
return self.photo.url
return None
|
|
#!/usr/bin/python
# Subscribes to the Glider Singleton Publishing Service Socket.
# When a new set is published, it outputs a new NetCDF to a given
# output directory.
#
# By: Michael Lindemuth
# University of South Florida
# College of Marine Science
# Ocean Technology Group
import daemon
import zmq
import argparse
import sys
import shutil
import os
import json
import logging
logger = logging.getLogger('gsps_netcdf_sub')
from glider_netcdf_writer import (
open_glider_netcdf
)
from netCDF4 import default_fillvals as NC_FILL_VALUES
from threading import Thread
from gsps_netcdf_subscriber.generators import (
generate_global_attributes,
generate_filename,
generate_set_key
)
import lockfile
import numpy as np
from glider_utils.yo import find_yo_extrema
from glider_utils.yo.filters import (
filter_profile_depth,
filter_profile_time,
filter_profile_distance,
filter_profile_number_of_points
)
from glider_utils.gps import interpolate_gps
from glider_utils.ctd.salinity import calculate_practical_salinity
from glider_utils.ctd.density import calculate_density
class GliderDataset(object):
"""Represents a complete glider dataset
"""
def __init__(self, handler_dataset):
self.glider = handler_dataset['glider']
self.segment = handler_dataset['segment']
self.headers = handler_dataset['headers']
self.__parse_lines(handler_dataset['lines'])
self.__interpolate_glider_gps()
self.__calculate_salinity_and_density()
self.__calculate_position_uv()
def __interpolate_glider_gps(self):
if 'm_gps_lat-lat' in self.data_by_type:
dataset = np.column_stack((
self.times,
self.data_by_type['m_gps_lat-lat'],
self.data_by_type['m_gps_lon-lon']
))
gps = interpolate_gps(dataset)
self.data_by_type['lat-lat'] = gps[:, 1]
self.data_by_type['lon-lon'] = gps[:, 2]
def __calculate_salinity_and_density(self):
if 'sci_water_cond-s/m' in self.data_by_type:
dataset = np.column_stack((
self.times,
self.data_by_type['sci_water_cond-s/m'],
self.data_by_type['sci_water_temp-degc'],
self.data_by_type['sci_water_pressure-bar']
))
salinity_dataset = calculate_practical_salinity(dataset)
density_dataset = calculate_density(
salinity_dataset,
self.data_by_type['lat-lat'],
self.data_by_type['lon-lon']
)
density_dataset[np.isnan(density_dataset[:, 7]), 7] = (
NC_FILL_VALUES['f8']
)
density_dataset[np.isnan(density_dataset[:, 9]), 9] = (
NC_FILL_VALUES['f8']
)
self.data_by_type['salinity-psu'] = density_dataset[:, 7]
self.data_by_type['density-kg/m^3'] = density_dataset[:, 9]
def __parse_lines(self, lines):
self.time_uv = NC_FILL_VALUES['f8']
self.times = []
self.data_by_type = {}
for header in self.headers:
self.data_by_type[header] = []
for line in lines:
self.times.append(line['timestamp'])
for key in self.data_by_type.keys():
if key in line:
datum = line[key]
if key == 'm_water_vx-m/s':
self.time_uv = line['timestamp']
else:
datum = NC_FILL_VALUES['f8']
self.data_by_type[key].append(datum)
def calculate_profiles(self):
profiles = []
if 'm_depth-m' in self.data_by_type:
dataset = np.column_stack((
self.times,
self.data_by_type['m_depth-m']
))
profiles = find_yo_extrema(dataset)
profiles = filter_profile_depth(profiles)
profiles = filter_profile_time(profiles)
profiles = filter_profile_distance(profiles)
profiles = filter_profile_number_of_points(profiles)
return profiles[:, 2]
def __calculate_position_uv(self):
dataset = np.column_stack((
self.times,
self.data_by_type['lat-lat'],
self.data_by_type['lon-lon']
))
i = np.min(dataset[:, 0] - self.time_uv).argmin()
self.data_by_type['lat_uv-lat'] = [dataset[i, 1]]
self.data_by_type['lon_uv-lon'] = [dataset[i, 2]]
def write_netcdf(configs, sets, set_key):
dataset = GliderDataset(sets[set_key])
# No longer need the dataset stored by handlers
del sets[set_key]
global_attributes = (
generate_global_attributes(configs, dataset)
)
filename = generate_filename(configs, dataset)
tmp_path = '/tmp/' + filename
with open_glider_netcdf(tmp_path, 'w') as glider_nc:
glider_nc.set_global_attributes(global_attributes)
glider_nc.set_platform(
configs[dataset.glider]['deployment']['platform']
)
glider_nc.set_trajectory_id(1)
glider_nc.set_segment_id(dataset.segment)
glider_nc.set_datatypes(configs['datatypes'])
glider_nc.set_instruments(configs[dataset.glider]['instruments'])
glider_nc.set_times(dataset.times)
# Insert time_uv parameters
glider_nc.set_time_uv(dataset.time_uv)
glider_nc.set_profile_ids(dataset.calculate_profiles())
for datatype, data in dataset.data_by_type.items():
glider_nc.insert_data(datatype, data)
deployment_path = (
configs['output_directory'] + '/'
+ configs[dataset.glider]['deployment']['directory']
)
if not os.path.exists(deployment_path):
os.mkdir(deployment_path)
file_path = deployment_path + '/' + filename
shutil.move(tmp_path, file_path)
logger.info("Datafile written to %s" % file_path)
def handle_set_start(configs, sets, message):
"""Handles the set start message from the GSPS publisher
Initializes the new dataset store in memory
"""
set_key = generate_set_key(message)
sets[set_key] = {
'glider': message['glider'],
'segment': message['segment'],
'headers': [],
'lines': []
}
for header in message['headers']:
key = header['name'] + '-' + header['units']
sets[set_key]['headers'].append(key)
logger.info(
"Dataset start for %s @ %s"
% (message['glider'], message['start'])
)
def handle_set_data(configs, sets, message):
"""Handles all new data coming in for a GSPS dataset
All datasets must already have been initialized by a set_start message.
Appends new data lines to the set lines variable.
"""
set_key = generate_set_key(message)
if set_key in sets:
sets[set_key]['lines'].append(message['data'])
else:
logger.error(
"Unknown dataset passed for key glider %s dataset @ %s"
% (message['glider'], message['start'])
)
def handle_set_end(configs, sets, message):
"""Handles the set_end message coming from GSPS
Checks for empty dataset. If not empty, it hands
off dataset to thread. Thread writes NetCDF data to
new file in output directory.
"""
set_key = generate_set_key(message)
if set_key in sets:
if len(sets[set_key]['lines']) == 0:
logger.info(
"Empty set: for glider %s dataset @ %s"
% (message['glider'], message['start'])
)
return # No data in set, do nothing
thread = Thread(
target=write_netcdf,
args=(configs, sets, set_key)
)
thread.start()
logger.info(
"Dataset end for %s @ %s. Processing..."
% (message['glider'], message['start'])
)
message_handlers = {
'set_start': handle_set_start,
'set_data': handle_set_data,
'set_end': handle_set_end
}
def load_configs(configs_directory):
configs = {}
for filename in os.listdir(configs_directory):
# Skip hidden directories
if filename[0] == '.':
continue
ext_sep = filename.find('.')
if ext_sep != -1:
key = filename[:ext_sep]
else:
key = filename
full_path = configs_directory + '/' + filename
# Glider configurations are in directories.
# Load configs recursively
if os.path.isdir(full_path):
configs[key] = load_configs(full_path)
# Load configuration from file
else:
with open(full_path, 'r') as f:
contents = f.read()
conf = {}
try:
conf = json.loads(contents)
except Exception, e:
logger.error('Error processing %s: %s' % (filename, e))
configs[key] = conf
return configs
def run_subscriber(configs):
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(configs['zmq_url'])
socket.setsockopt(zmq.SUBSCRIBE, '')
sets = {}
while True:
try:
message = socket.recv_json()
if message['message_type'] in message_handlers:
message_type = message['message_type']
message_handlers[message_type](configs, sets, message)
except Exception, e:
logger.error("Subscriber exited: %s" % (e))
break
def main():
parser = argparse.ArgumentParser(
description="Subscribes to the Glider Singleton Publishing Service "
"Socket. When a new set is published, it outputs a new "
"NetCDF to a given output directory."
)
parser.add_argument(
"--zmq_url",
default="tcp://localhost:8008",
help="ZMQ url for the GSPS publisher. Default: tcp://localhost:8008"
)
parser.add_argument(
"--configs",
default="/etc/gsps_netcdf_sub",
help="Folder to look for NetCDF global and glider "
"JSON configuration files. Default: /etc/gsps_netcdf_sub"
)
parser.add_argument(
"--daemonize",
type=bool,
help="To daemonize or not to daemonize. Default: false",
default=False
)
parser.add_argument(
"--log_file",
help="Path of log file. Default: ./gsps_netcdf_sub.log",
default="./gsps_netcdf_sub.log"
)
parser.add_argument(
"--pid_file",
help="Path of PID file for daemon. Default: ./gsps_netcdf_sub.pid",
default="./gsps_netcdf_sub.pid"
)
parser.add_argument(
"output_directory",
help="Where to place the newly generated netCDF file.",
default=False
)
args = parser.parse_args()
# Setup logger
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s "
"- %(levelname)s - %(message)s")
if args.daemonize:
log_handler = logging.FileHandler(args.log_file)
else:
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
configs_directory = args.configs
if configs_directory[-1] == '/':
configs_directory = configs_directory[:-1]
configs = load_configs(configs_directory)
output_directory = args.output_directory
if args.output_directory[-1] == '/':
output_directory = args.output_directory[:-1]
configs['output_directory'] = output_directory
configs['zmq_url'] = args.zmq_url
if args.daemonize:
logger.info('Starting')
daemon_context = daemon.DaemonContext(
pidfile=lockfile.FileLock(args.pid_file),
files_preserve=[log_handler.stream.fileno()],
)
with daemon_context:
run_subscriber(configs)
else:
run_subscriber(configs)
logger.info('Stopped')
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is loosely based on the discovery component in NOX.
"""
This module discovers the connectivity between OpenFlow switches by sending
out LLDP packets. To be notified of this information, listen to LinkEvents
on core.openflow_discovery.
It's possible that some of this should be abstracted out into a generic
Discovery module, or a Discovery superclass.
"""
from pox.lib.revent import *
from pox.lib.recoco import Timer
from pox.lib.util import dpid_to_str, str_to_bool
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import struct
import time
from collections import namedtuple
from random import shuffle
log = core.getLogger()
class LLDPSender (object):
"""
Sends out discovery packets
"""
SendItem = namedtuple("LLDPSenderItem", ('dpid','port_num','packet'))
#NOTE: This class keeps the packets to send in a flat list, which makes
# adding/removing them on switch join/leave or (especially) port
# status changes relatively expensive. Could easily be improved.
def __init__ (self, send_cycle_time, ttl = 120):
"""
Initialize an LLDP packet sender
send_cycle_time is the time (in seconds) that this sender will take to
send every discovery packet. Thus, it should be the link timeout
interval at most.
ttl is the time (in seconds) for which a receiving LLDP agent should
consider the rest of the data to be valid. We don't use this, but
other LLDP agents might. Can't be 0 (this means revoke).
"""
# Packets remaining to be sent in this cycle
self._this_cycle = []
# Packets we've already sent in this cycle
self._next_cycle = []
self._timer = None
self._ttl = ttl
self._send_cycle_time = send_cycle_time
core.listen_to_dependencies(self)
def _handle_openflow_PortStatus (self, event):
"""
Track changes to switch ports
"""
if event.added:
self.add_port(event.dpid, event.port, event.ofp.desc.hw_addr)
elif event.deleted:
self.del_port(event.dpid, event.port)
def _handle_openflow_ConnectionUp (self, event):
self.del_switch(event.dpid, set_timer = False)
ports = [(p.port_no, p.hw_addr) for p in event.ofp.ports]
for port_num, port_addr in ports:
self.add_port(event.dpid, port_num, port_addr, set_timer = False)
self._set_timer()
def _handle_openflow_ConnectionDown (self, event):
self.del_switch(event.dpid)
def del_switch (self, dpid, set_timer = True):
self._this_cycle = [p for p in self._this_cycle if p.dpid != dpid]
self._next_cycle = [p for p in self._next_cycle if p.dpid != dpid]
if set_timer: self._set_timer()
def del_port (self, dpid, port_num, set_timer = True):
if port_num > of.OFPP_MAX: return
self._this_cycle = [p for p in self._this_cycle
if p.dpid != dpid or p.port_num != port_num]
self._next_cycle = [p for p in self._next_cycle
if p.dpid != dpid or p.port_num != port_num]
if set_timer: self._set_timer()
def add_port (self, dpid, port_num, port_addr, set_timer = True):
if port_num > of.OFPP_MAX: return
self.del_port(dpid, port_num, set_timer = False)
self._next_cycle.append(LLDPSender.SendItem(dpid, port_num,
self.create_discovery_packet(dpid, port_num, port_addr)))
if set_timer: self._set_timer()
def _set_timer (self):
if self._timer: self._timer.cancel()
self._timer = None
num_packets = len(self._this_cycle) + len(self._next_cycle)
if num_packets != 0:
self._timer = Timer(self._send_cycle_time / float(num_packets),
self._timer_handler, recurring=True)
def _timer_handler (self):
"""
Called by a timer to actually send packets.
Picks the first packet off this cycle's list, sends it, and then puts
it on the next-cycle list. When this cycle's list is empty, starts
the next cycle.
"""
if len(self._this_cycle) == 0:
self._this_cycle = self._next_cycle
self._next_cycle = []
shuffle(self._this_cycle)
item = self._this_cycle.pop(0)
self._next_cycle.append(item)
core.openflow.sendToDPID(item.dpid, item.packet)
def create_discovery_packet (self, dpid, port_num, port_addr):
"""
Build discovery packet
"""
chassis_id = pkt.chassis_id(subtype=pkt.chassis_id.SUB_LOCAL)
chassis_id.id = bytes('dpid:' + hex(long(dpid))[2:-1])
# Maybe this should be a MAC. But a MAC of what? Local port, maybe?
port_id = pkt.port_id(subtype=pkt.port_id.SUB_PORT, id=str(port_num))
ttl = pkt.ttl(ttl = self._ttl)
sysdesc = pkt.system_description()
sysdesc.payload = bytes('dpid:' + hex(long(dpid))[2:-1])
discovery_packet = pkt.lldp()
discovery_packet.tlvs.append(chassis_id)
discovery_packet.tlvs.append(port_id)
discovery_packet.tlvs.append(ttl)
discovery_packet.tlvs.append(sysdesc)
discovery_packet.tlvs.append(pkt.end_tlv())
eth = pkt.ethernet(type=pkt.ethernet.LLDP_TYPE)
eth.src = port_addr
eth.dst = pkt.ETHERNET.NDP_MULTICAST
eth.payload = discovery_packet
po = of.ofp_packet_out(action = of.ofp_action_output(port=port_num))
po.data = eth.pack()
return po.pack()
class LinkEvent (Event):
"""
Link up/down event
"""
def __init__ (self, add, link):
Event.__init__(self)
self.link = link
self.added = add
self.removed = not add
def port_for_dpid (self, dpid):
if self.link.dpid1 == dpid:
return self.link.port1
if self.link.dpid2 == dpid:
return self.link.port2
return None
class Link (namedtuple("LinkBase",("dpid1","port1","dpid2","port2"))):
@property
def uni (self):
"""
Returns a "unidirectional" version of this link
The unidirectional versions of symmetric keys will be equal
"""
pairs = list(self.end)
pairs.sort()
return Link(pairs[0][0],pairs[0][1],pairs[1][0],pairs[1][1])
@property
def end (self):
return ((self[0],self[1]),(self[2],self[3]))
def __str__ (self):
return "%s.%s -> %s.%s" % (dpid_to_str(self[0]),self[1],
dpid_to_str(self[2]),self[3])
def __repr__ (self):
return "Link(dpid1=%s,port1=%s, dpid2=%s,port2=%s)" % (self.dpid1,
self.port1, self.dpid2, self.port2)
class Discovery (EventMixin):
"""
Component that attempts to discover network toplogy.
Sends out specially-crafted LLDP packets, and monitors their arrival.
"""
_flow_priority = 65000 # Priority of LLDP-catching flow (if any)
_link_timeout = 10 # How long until we consider a link dead
_timeout_check_period = 5 # How often to check for timeouts
_eventMixin_events = set([
LinkEvent,
])
_core_name = "openflow_discovery" # we want to be core.openflow_discovery
Link = Link
def __init__ (self, install_flow = True, explicit_drop = True,
link_timeout = None, eat_early_packets = False):
self._eat_early_packets = eat_early_packets
self._explicit_drop = explicit_drop
self._install_flow = install_flow
if link_timeout: self._link_timeout = link_timeout
self.adjacency = {} # From Link to time.time() stamp
self._sender = LLDPSender(self.send_cycle_time)
# Listen with a high priority (mostly so we get PacketIns early)
core.listen_to_dependencies(self,
listen_args={'openflow':{'priority':0xffffffff}})
Timer(self._timeout_check_period, self._expire_links, recurring=True)
@property
def send_cycle_time (self):
return self._link_timeout / 2.0
def install_flow (self, con_or_dpid, priority = None):
if priority is None:
priority = self._flow_priority
if isinstance(con_or_dpid, (int,long)):
con = core.openflow.connections.get(con_or_dpid)
if con is None:
log.warn("Can't install flow for %s", dpid_to_str(con_or_dpid))
return False
else:
con = con_or_dpid
match = of.ofp_match(dl_type = pkt.ethernet.LLDP_TYPE,
dl_dst = pkt.ETHERNET.NDP_MULTICAST)
msg = of.ofp_flow_mod()
msg.priority = priority
msg.match = match
msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
con.send(msg)
return True
def _handle_openflow_ConnectionUp (self, event):
if self._install_flow:
# Make sure we get appropriate traffic
log.debug("Installing flow for %s", dpid_to_str(event.dpid))
self.install_flow(event.connection)
def _handle_openflow_ConnectionDown (self, event):
# Delete all links on this switch
self._delete_links([link for link in self.adjacency
if link.dpid1 == event.dpid
or link.dpid2 == event.dpid])
def _expire_links (self):
"""
Remove apparently dead links
"""
now = time.time()
expired = [link for link,timestamp in self.adjacency.iteritems()
if timestamp + self._link_timeout < now]
if expired:
for link in expired:
log.info('link timeout: %s', link)
self._delete_links(expired)
def _handle_openflow_PacketIn (self, event):
"""
Receive and process LLDP packets
"""
packet = event.parsed
if (packet.effective_ethertype != pkt.ethernet.LLDP_TYPE
or packet.dst != pkt.ETHERNET.NDP_MULTICAST):
if not self._eat_early_packets: return
if not event.connection.connect_time: return
enable_time = time.time() - self.send_cycle_time - 1
if event.connection.connect_time > enable_time:
return EventHalt
return
if self._explicit_drop:
if event.ofp.buffer_id is not None:
log.debug("Dropping LLDP packet %i", event.ofp.buffer_id)
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
lldph = packet.find(pkt.lldp)
if lldph is None or not lldph.parsed:
log.error("LLDP packet could not be parsed")
return EventHalt
if len(lldph.tlvs) < 3:
log.error("LLDP packet without required three TLVs")
return EventHalt
if lldph.tlvs[0].tlv_type != pkt.lldp.CHASSIS_ID_TLV:
log.error("LLDP packet TLV 1 not CHASSIS_ID")
return EventHalt
if lldph.tlvs[1].tlv_type != pkt.lldp.PORT_ID_TLV:
log.error("LLDP packet TLV 2 not PORT_ID")
return EventHalt
if lldph.tlvs[2].tlv_type != pkt.lldp.TTL_TLV:
log.error("LLDP packet TLV 3 not TTL")
return EventHalt
def lookInSysDesc ():
r = None
for t in lldph.tlvs[3:]:
if t.tlv_type == pkt.lldp.SYSTEM_DESC_TLV:
# This is our favored way...
for line in t.payload.split('\n'):
if line.startswith('dpid:'):
try:
return int(line[5:], 16)
except:
pass
if len(t.payload) == 8:
# Maybe it's a FlowVisor LLDP...
# Do these still exist?
try:
return struct.unpack("!Q", t.payload)[0]
except:
pass
return None
originatorDPID = lookInSysDesc()
if originatorDPID == None:
# We'll look in the CHASSIS ID
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_LOCAL:
if lldph.tlvs[0].id.startswith('dpid:'):
# This is how NOX does it at the time of writing
try:
originatorDPID = int(lldph.tlvs[0].id[5:], 16)
except:
pass
if originatorDPID == None:
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_MAC:
# Last ditch effort -- we'll hope the DPID was small enough
# to fit into an ethernet address
if len(lldph.tlvs[0].id) == 6:
try:
s = lldph.tlvs[0].id
originatorDPID = struct.unpack("!Q",'\x00\x00' + s)[0]
except:
pass
if originatorDPID == None:
log.warning("Couldn't find a DPID in the LLDP packet")
return EventHalt
if originatorDPID not in core.openflow.connections:
log.info('Received LLDP packet from unknown switch')
return EventHalt
# Get port number from port TLV
if lldph.tlvs[1].subtype != pkt.port_id.SUB_PORT:
log.warning("Thought we found a DPID, but packet didn't have a port")
return EventHalt
originatorPort = None
if lldph.tlvs[1].id.isdigit():
# We expect it to be a decimal value
originatorPort = int(lldph.tlvs[1].id)
elif len(lldph.tlvs[1].id) == 2:
# Maybe it's a 16 bit port number...
try:
originatorPort = struct.unpack("!H", lldph.tlvs[1].id)[0]
except:
pass
if originatorPort is None:
log.warning("Thought we found a DPID, but port number didn't " +
"make sense")
return EventHalt
if (event.dpid, event.port) == (originatorDPID, originatorPort):
log.warning("Port received its own LLDP packet; ignoring")
return EventHalt
link = Discovery.Link(originatorDPID, originatorPort, event.dpid,
event.port)
if link not in self.adjacency:
self.adjacency[link] = time.time()
log.info('link detected: %s', link)
self.raiseEventNoErrors(LinkEvent, True, link)
else:
# Just update timestamp
self.adjacency[link] = time.time()
return EventHalt # Probably nobody else needs this event
def _delete_links (self, links):
for link in links:
self.raiseEventNoErrors(LinkEvent, False, link)
for link in links:
self.adjacency.pop(link, None)
def is_edge_port (self, dpid, port):
"""
Return True if given port does not connect to another switch
"""
for link in self.adjacency:
if link.dpid1 == dpid and link.port1 == port:
return False
if link.dpid2 == dpid and link.port2 == port:
return False
return True
def launch (no_flow = False, explicit_drop = True, link_timeout = None,
eat_early_packets = False):
explicit_drop = str_to_bool(explicit_drop)
eat_early_packets = str_to_bool(eat_early_packets)
install_flow = not str_to_bool(no_flow)
if link_timeout: link_timeout = int(link_timeout)
core.registerNew(Discovery, explicit_drop=explicit_drop,
install_flow=install_flow, link_timeout=link_timeout,
eat_early_packets=eat_early_packets)
|
|
from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
# Subclasses seemingly do not call this.
NumpyDocString.__init__(self, docstring, config=config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import uuid
from mock import patch
from oslo_utils import timeutils
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance import i18n
from glance.registry.api import v2 as rserver
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.tests.unit import base
from glance.tests import utils as test_utils
_ = i18n._
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = str(uuid.uuid4())
UUID2 = str(uuid.uuid4())
# NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""Test proper actions made against a registry service.
Test for both valid and invalid requests.
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
virtual_size=26, properties={'type': 'kernel'},
location="swift://user:passwd@acct/container/obj.tar.0",
created_at=uuid1_time),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19, virtual_size=38,
location="file:///tmp/glance-tests/2",
created_at=uuid2_time)]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(2, len(images))
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by name in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['name'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by status in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['status'],
sort_dir=['desc'])
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""Tests that the registry API returns list of public images.
Must besorted alphabetically by disk_format in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['disk_format'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by container_format in descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['container_format'],
sort_dir=['desc'])
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami',
size=100, virtual_size=200)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare',
size=2, virtual_size=4)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['size'], sort_dir=['asc'])
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=False)
def test_get_index_sort_created_at_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['created_at'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3),
unjsonify=False)
def test_get_index_sort_updated_at_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=None,
updated_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=None,
updated_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['updated_at'],
sort_dir=['desc'])
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_image_details_sort_multiple_keys(self):
"""
Tests that a detailed call returns list of
public images sorted by name-size and
size-name in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
size=19)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name=u'xyz',
size=20)
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID5, name=u'asdf',
size=20)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['name', 'size'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID3, UUID5, UUID1, UUID2, UUID4),
unjsonify=False)
images = self.client.image_get_all(sort_key=['size', 'name'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID1, UUID3, UUID2, UUID5, UUID4),
unjsonify=False)
def test_get_image_details_sort_multiple_dirs(self):
"""
Tests that a detailed call returns list of
public images sorted by name-size and
size-name in ascending and descending orders.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
size=19)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
size=20)
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID5, name='asdf',
size=20)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['name', 'size'],
sort_dir=['asc', 'desc'])
self.assertEqualImages(images, (UUID5, UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
images = self.client.image_get_all(sort_key=['name', 'size'],
sort_dir=['desc', 'asc'])
self.assertEqualImages(images, (UUID4, UUID2, UUID1, UUID3, UUID5),
unjsonify=False)
images = self.client.image_get_all(sort_key=['size', 'name'],
sort_dir=['asc', 'desc'])
self.assertEqualImages(images, (UUID1, UUID2, UUID3, UUID4, UUID5),
unjsonify=False)
images = self.client.image_get_all(sort_key=['size', 'name'],
sort_dir=['desc', 'asc'])
self.assertEqualImages(images, (UUID5, UUID4, UUID3, UUID2, UUID1),
unjsonify=False)
def test_image_get_index_marker(self):
"""Test correct set of images returned with marker param."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID3)
self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False)
def test_image_get_index_limit(self):
"""Test correct number of images returned with limit param."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=2)
self.assertEqual(2, len(images))
def test_image_get_index_marker_limit(self):
"""Test correct set of images returned with marker/limit params."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID4, limit=1)
self.assertEqualImages(images, (UUID2,), unjsonify=False)
def test_image_get_index_limit_None(self):
"""Test correct set of images returned with limit param == None."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=None)
self.assertEqual(4, len(images))
def test_image_get_index_by_name(self):
"""Test correct set of public, name-filtered image returned.
This is just a sanity check, we test the details call more in-depth.
"""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(filters={'name': 'new name! #123'})
self.assertEqual(1, len(images))
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_image_get_is_public_v2(self):
"""Tests that a detailed call can be filtered by a property"""
extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving',
properties={'is_public': 'avalue'})
context = copy.copy(self.context)
db_api.image_create(context, extra_fixture)
filters = {'is_public': 'avalue'}
images = self.client.image_get_all(filters=filters)
self.assertEqual(1, len(images))
for image in images:
self.assertEqual('avalue', image['properties'][0]['value'])
def test_image_get(self):
"""Tests that the detailed info about an image returned"""
fixture = self.get_fixture(id=UUID1, name='fake image #1',
is_public=False, size=13, virtual_size=26,
disk_format='ami', container_format='ami')
data = self.client.image_get(image_id=UUID1)
for k, v in fixture.items():
el = data[k]
self.assertEqual(v, data[k],
"Failed v != data[k] where v = %(v)s and "
"k = %(k)s and data[k] = %(el)s" %
dict(v=v, k=k, el=el))
def test_image_get_non_existing(self):
"""Tests that NotFound is raised when getting a non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_get,
image_id=_gen_uuid())
def test_image_create_basic(self):
"""Tests that we can add image metadata and returns the new id"""
fixture = self.get_fixture()
new_image = self.client.image_create(values=fixture)
# Test all other attributes set
data = self.client.image_get(image_id=new_image['id'])
for k, v in fixture.items():
self.assertEqual(v, data[k])
# Test status was updated properly
self.assertIn('status', data)
self.assertEqual('active', data['status'])
def test_image_create_with_properties(self):
"""Tests that we can add image metadata with properties"""
fixture = self.get_fixture(location="file:///tmp/glance-tests/2",
properties={'distro': 'Ubuntu 10.04 LTS'})
new_image = self.client.image_create(values=fixture)
self.assertIn('properties', new_image)
self.assertEqual(new_image['properties'][0]['value'],
fixture['properties']['distro'])
del fixture['location']
del fixture['properties']
for k, v in fixture.items():
self.assertEqual(v, new_image[k])
# Test status was updated properly
self.assertIn('status', new_image.keys())
self.assertEqual('active', new_image['status'])
def test_image_create_already_exists(self):
"""Tests proper exception is raised if image with ID already exists"""
fixture = self.get_fixture(id=UUID2,
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Duplicate,
self.client.image_create,
values=fixture)
def test_image_create_with_bad_status(self):
"""Tests proper exception is raised if a bad status is set"""
fixture = self.get_fixture(status='bad status',
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Invalid,
self.client.image_create,
values=fixture)
def test_image_update(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': 'saving'}
self.assertTrue(self.client.image_update(image_id=UUID2,
values=fixture))
# Test all other attributes set
data = self.client.image_get(image_id=UUID2)
for k, v in fixture.items():
self.assertEqual(v, data[k])
def test_image_update_conflict(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual('active', current)
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Conflict, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
try:
self.client.image_update(image_id=UUID2, values=fixture,
from_state=from_state)
except exception.Conflict as exc:
msg = (_('cannot transition from %(current)s to '
'%(next)s in update (wanted '
'from_state=%(from)s)') %
{'current': current, 'next': next_state,
'from': from_state})
self.assertEqual(str(exc), msg)
def test_image_update_with_invalid_min_disk(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake image',
'disk_format': 'vmdk',
'min_disk': str(2 ** 31 + 1),
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual('active', current)
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Invalid, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
def test_image_update_with_invalid_min_ram(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake image',
'disk_format': 'vmdk',
'min_ram': str(2 ** 31 + 1),
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual('active', current)
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Invalid, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
def _test_image_update_not_existing(self):
"""Tests non existing image update doesn't work"""
fixture = self.get_fixture(status='bad status')
self.assertRaises(exception.NotFound,
self.client.image_update,
image_id=_gen_uuid(),
values=fixture)
def test_image_destroy(self):
"""Tests that image metadata is deleted properly"""
# Grab the original number of images
orig_num_images = len(self.client.image_get_all())
# Delete image #2
image = self.FIXTURES[1]
deleted_image = self.client.image_destroy(image_id=image['id'])
self.assertTrue(deleted_image)
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
# Verify one less image
filters = {'deleted': False}
new_num_images = len(self.client.image_get_all(filters=filters))
self.assertEqual(new_num_images, orig_num_images - 1)
def test_image_destroy_not_existing(self):
"""Tests cannot delete non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_destroy,
image_id=_gen_uuid())
def test_image_get_members(self):
"""Tests getting image members"""
memb_list = self.client.image_member_find(image_id=UUID2)
num_members = len(memb_list)
self.assertEqual(0, num_members)
def test_image_get_members_not_existing(self):
"""Tests getting non-existent image members"""
self.assertRaises(exception.NotFound,
self.client.image_get_members,
image_id=_gen_uuid())
def test_image_member_find(self):
"""Tests getting member images"""
memb_list = self.client.image_member_find(member='pattieblack')
num_members = len(memb_list)
self.assertEqual(0, num_members)
def test_add_update_members(self):
"""Tests updating image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.assertTrue(member)
values['member'] = 'pattieblack2'
self.assertTrue(self.client.image_member_update(memb_id=member['id'],
values=values))
def test_add_delete_member(self):
"""Tests deleting image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.client.image_member_delete(memb_id=member['id'])
memb_list = self.client.image_member_find(member='pattieblack')
self.assertEqual(0, len(memb_list))
class TestRegistryV2ClientApi(base.IsolatedUnitTest):
"""Test proper actions made against a registry service.
Test for both valid and invalid requests.
"""
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2ClientApi, self).setUp()
reload(rapi)
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2ClientApi, self).tearDown()
def test_configure_registry_client_not_using_use_user_token(self):
self.config(use_user_token=False)
with patch.object(rapi,
'configure_registry_admin_creds') as mock_rapi:
rapi.configure_registry_client()
mock_rapi.assert_called_once_with()
def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'):
return {
'user': 'user',
'password': 'password',
'username': 'user',
'tenant': 'tenant',
'auth_url': auth_url,
'strategy': strategy,
'region': 'region'
}
def test_configure_registry_admin_creds(self):
expected = self._get_fake_config_creds(auth_url=None,
strategy='configured_strategy')
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_strategy=expected['strategy'])
self.config(auth_region=expected['region'])
self.stubs.Set(os, 'getenv', lambda x: None)
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(expected, rapi._CLIENT_CREDS)
def test_configure_registry_admin_creds_with_auth_url(self):
expected = self._get_fake_config_creds()
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_url=expected['auth_url'])
self.config(auth_strategy='test_strategy')
self.config(auth_region=expected['region'])
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(expected, rapi._CLIENT_CREDS)
|
|
# file eulcommon/binfile/outlookexpress.py
#
# Copyright 2012 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Map binary email folder index and content files for Outlook Express
4.5 for Macintosh to Python objects.
What documentation is available suggests that Outlook Express stored
email in either .mbx or .dbx format, but in Outlook Express 4.5 for
Macintosh, each mail folder consists of a directory with an ``Index``
file and an optional ``Mail`` file (no Mail file is present when a
mail folder is empty).
'''
import email
from eulcommon import binfile
import logging
import os
logger = logging.getLogger(__name__)
class MacIndex(binfile.BinaryStructure):
'''A :class:`~eulcommon.binfile.BinaryStructure` for the Index
file of an Outlook Express 4.5 for Mac email folder.'''
MAGIC_NUMBER = 'FMIn' # data file is FMDF
'''Magic Number for Outlook Express 4.5 Mac Index file'''
_magic_num = binfile.ByteField(0, 4)
# first four bytes should match magic number
header_length = 28 # 28 bytes at beginning of header
'''length of the binary header at the beginning of the Index file'''
total_messages = binfile.IntegerField(13, 16)
'''number of email messages in this folder'''
# seems to be number of messages in the folder (or close, anyway)
def sanity_check(self):
if self._magic_num != self.MAGIC_NUMBER:
logger.debug('Index file sanity check failed')
return self._magic_num == self.MAGIC_NUMBER
@property
def messages(self):
'''A generator yielding the :class:`MacIndexMessage`
structures in this index file.'''
# The file contains the fixed-size file header followed by
# fixed-size message structures, followed by minimal message
# information (subject, from, to). Start after the file
# header and then simply return the message structures in
# sequence until we have returned the number of messages in
# this folder, ignoring the minimal message information at the
# end of the file.
offset = self.header_length # initial offset
# how much of the data in this file we expect to use, based on
# the number of messages in this folder and the index message block size
maxlen = self.header_length + self.total_messages * MacIndexMessage.LENGTH
while offset < maxlen:
yield MacIndexMessage(mm=self.mmap, offset=offset)
offset += MacIndexMessage.LENGTH
class MacIndexMessage(binfile.BinaryStructure):
'''Information about a single email message within the
:class:`MacIndex`.'''
LENGTH = 52
'''size of a single message information block'''
offset = binfile.IntegerField(13, 16)
'''the offset of the raw email data in the folder data file'''
size = binfile.IntegerField(17, 20)
'''the size of the raw email data in the folder data file'''
class MacMail(binfile.BinaryStructure):
'''A :class:`~eulcommon.binfile.BinaryStructure` for the Mail file
of an Outlook Express 4.5 for Mac email folder. The Mail file
includes the actual contents of any email files in the folder,
which must be accessed based on the message offset and size from
the Index file.
'''
MAGIC_NUMBER = 'FMDF' # data file (?)
'''Magic Number for a mail content file within an Outlook Express
4.5 for Macintosh folder'''
_magic_num = binfile.ByteField(0, 4) # should match magic number
def sanity_check(self):
if self._magic_num != self.MAGIC_NUMBER:
logger.debug('Mail file sanity check failed')
return self._magic_num == self.MAGIC_NUMBER
def get_message(self, offset, size):
'''Get an individual :class:`MacMailMessage` within a Mail
data file, based on size and offset information from the
corresponding :class:`MacIndexMessage`.
:param offset: offset within the Mail file where the desired
message begins, i.e. :attr:`MacMailMessage.offset`
:param size: size of the message,
i.e. :attr:`MacMailMessage.size`
'''
return MacMailMessage(size=size, mm=self.mmap, offset=offset)
class MacMailMessage(binfile.BinaryStructure):
'''A single email message within the Mail data file, as indexed by
a :class:`MacIndexMessage`. Consists of a variable length header
or message summary followed by the content of the email (also
variable length).
The size of a single :class:`MacMailMessage` is stored in the
:class:`MacIndexMessage` but not (as far as we have determined) in
the Mail data file, an individual message must be initialized with
the a size parameter, so that the correct content can be returned.
:param size: size of this message (as determined by
:attr:`MacIndexMessage.size`); **required** to return
:attr:`data` correctly.
'''
header_type = binfile.ByteField(0, 4)
'''Each mail message begins with a header, starting with either
``MSum`` (message summary, perhaps) or ``MDel`` for deleted
messages.'''
MESSAGE = 'MSum'
'Header string indicating a normal message'
DELETED_MESSAGE = 'MDel'
'Header string indicating a deleted message'
content_offset = binfile.IntegerField(5, 8)
'''offset within this message block where the message summary
header ends and message content begins'''
def __init__(self, size, *args, **kwargs):
self.size = size
super(MacMailMessage, self).__init__(*args, **kwargs)
@property
def deleted(self):
'boolean flag indicating if this is a deleted message'
return self.header_type == self.DELETED_MESSAGE
@property
def data(self):
'''email content for this message'''
# return data after any initial offset, plus content offset to
# skip header, up to the size of this message
return self.mmap[self.content_offset + self._offset: self._offset + self.size]
def as_email(self):
'''Return message data as a :class:`email.message.Message`
object.'''
return email.message_from_string(self.data)
class MacFolder(object):
'''Wrapper object for an Outlook Express 4.5 for Mac folder, with
a :class:`MacIndex` and an optional :class:`MacMail`.
:param folder_path: path to the Outlook Express 4.5 folder
directory, which must contain at least an ``Index`` file (and
probably a ``Mail`` file, for non-empty folders)
'''
index = None
data = None
def __init__(self, folder_path):
index_filename = os.path.join(folder_path, 'Index')
data_filename = os.path.join(folder_path, 'Mail')
if os.path.exists(index_filename):
self.index = MacIndex(index_filename)
else:
raise RuntimeError('Outlook Express Folder Index does not exist at "%s"' % \
index_filename)
# data file will not be present for empty folders
if os.path.exists(data_filename):
self.data = MacMail(data_filename)
@property
def count(self):
'Number of email messages in this folder'
return self.index.total_messages
skipped_chunks = None
'''Number of data chunks skipped between raw messages, based on
offset and size. (Only set after iterating through messages.)'''
@property
def raw_messages(self):
'''A generator yielding a :class:`MacMailMessage` binary
object for each message in this folder, based on message index
information in :class:`MacIndex` and content in
:class:`MacMail`.'''
if self.data:
# offset for first message, at end of Mail data file header
last_offset = 24
self.skipped_chunks = 0
for msginfo in self.index.messages:
msg = self.data.get_message(msginfo.offset, msginfo.size)
# Index file seems to references messages in order by
# offset; check for data skipped between messages.
if msginfo.offset > last_offset:
logger.debug('Skipped %d bytes between %s (%s) and %s (%s)',
msginfo.offset - last_offset,
last_offset, hex(last_offset),
msginfo.offset, hex(msginfo.offset))
self.skipped_chunks += 1
last_offset = msginfo.offset + msginfo.size
yield msg
@property
def messages(self):
'''A generator yielding an :class:`email.message.Message` for
each message in this folder, based on message index
information in :class:`MacIndex` and content in
:class:`MacMail`. Does **not** include deleted messages.'''
return self._messages()
@property
def all_messages(self):
'''Same as :attr:`messages` except deleted messages are included.'''
return self._messages(skip_deleted=False)
def _messages(self, skip_deleted=True):
# common logic for messages / all_messages
for raw_msg in self.raw_messages:
if skip_deleted and raw_msg.deleted:
continue
yield raw_msg.as_email()
|
|
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import gc
import glob
import sys
import time
from IECore import *
class TestImageDisplayDriver(unittest.TestCase):
def testConstruction( self ):
idd = ImageDisplayDriver( Box2i( V2i(0,0), V2i(100,100) ), Box2i( V2i(10,10), V2i(40,40) ), [ 'r','g','b' ], CompoundData() )
self.assertEqual( idd.scanLineOrderOnly(), False )
self.assertEqual( idd.displayWindow(), Box2i( V2i(0,0), V2i(100,100) ) )
self.assertEqual( idd.dataWindow(), Box2i( V2i(10,10), V2i(40,40) ) )
self.assertEqual( idd.channelNames(), [ 'r', 'g', 'b' ] )
def __prepareBuf( self, buf, width, offset, red, green, blue ):
for i in xrange( 0, width ):
buf[3*i] = blue[i+offset]
buf[3*i+1] = green[i+offset]
buf[3*i+2] = red[i+offset]
def testComplete( self ):
img = Reader.create( "test/IECore/data/tiff/bluegreen_noise.400x300.tif" )()
idd = ImageDisplayDriver( img.displayWindow, img.dataWindow, list( img.channelNames() ), CompoundData() )
self.assertEqual( img.keys(), [ 'B', 'G', 'R' ] )
red = img['R'].data
green = img['G'].data
blue = img['B'].data
width = img.dataWindow.max.x - img.dataWindow.min.x + 1
buf = FloatVectorData( width * 3 )
for i in xrange( 0, img.dataWindow.max.y - img.dataWindow.min.y + 1 ):
self.__prepareBuf( buf, width, i*width, red, green, blue )
idd.imageData( Box2i( V2i( img.dataWindow.min.x, i + img.dataWindow.min.y ), V2i( img.dataWindow.max.x, i + img.dataWindow.min.y) ), buf )
idd.imageClose()
self.assertEqual( idd.image(), img )
def testFactory( self ):
idd = DisplayDriver.create( "ImageDisplayDriver", Box2i( V2i(0,0), V2i(100,100) ), Box2i( V2i(10,10), V2i(40,40) ), [ 'r', 'g', 'b' ], CompoundData() )
self.failUnless( isinstance( idd, ImageDisplayDriver ) )
self.assertEqual( idd.scanLineOrderOnly(), False )
self.assertEqual( idd.displayWindow(), Box2i( V2i(0,0), V2i(100,100) ) )
self.assertEqual( idd.dataWindow(), Box2i( V2i(10,10), V2i(40,40) ) )
self.assertEqual( idd.channelNames(), [ 'r', 'g', 'b' ] )
# test if all symbols are gone after the tests.
creator = None
idd = None
gc.collect()
RefCounted.collectGarbage()
self.assertEqual( RefCounted.numWrappedInstances(), 0 )
def testImagePool( self ) :
img = Reader.create( "test/IECore/data/tiff/bluegreen_noise.400x300.tif" )()
idd = DisplayDriver.create(
"ImageDisplayDriver",
img.displayWindow,
img.dataWindow,
list( img.channelNames() ),
{
"handle" : StringData( "myHandle" )
}
)
red = img['R'].data
green = img['G'].data
blue = img['B'].data
width = img.dataWindow.max.x - img.dataWindow.min.x + 1
buf = FloatVectorData( width * 3 )
for i in xrange( 0, img.dataWindow.max.y - img.dataWindow.min.y + 1 ):
self.__prepareBuf( buf, width, i*width, red, green, blue )
idd.imageData( Box2i( V2i( img.dataWindow.min.x, i + img.dataWindow.min.y ), V2i( img.dataWindow.max.x, i + img.dataWindow.min.y) ), buf )
idd.imageClose()
self.assertEqual( ImageDisplayDriver.storedImage( "myHandle" ), idd.image() )
self.assertEqual( ImageDisplayDriver.removeStoredImage( "myHandle" ), idd.image() )
self.assertEqual( ImageDisplayDriver.storedImage( "myHandle" ), None )
def testAcceptsRepeatedData( self ) :
window = Box2i( V2i( 0 ), V2i( 15 ) )
dd = ImageDisplayDriver( window, window, [ "Y" ], CompoundData() )
self.assertEqual( dd.acceptsRepeatedData(), True )
y = FloatVectorData( [ 1 ] * 16 * 16 )
dd.imageData( window, y )
y = FloatVectorData( [ 0.5 ] * 16 * 16 )
dd.imageData( window, y )
dd.imageClose()
i = dd.image()
self.assertEqual( i["Y"].data, y )
class TestClientServerDisplayDriver(unittest.TestCase):
def setUp( self ):
gc.collect()
RefCounted.collectGarbage()
# make sure we don't have symbols from previous tests
self.assertEqual( RefCounted.numWrappedInstances(), 0 )
# this is necessary so python will allow threads created by the display driver server
# to enter into python when those threads execute procedurals.
initThreads()
self.server = DisplayDriverServer( 1559 )
time.sleep(2)
def __prepareBuf( self, buf, width, offset, red, green, blue ):
for i in xrange( 0, width ):
buf[3*i] = blue[i+offset]
buf[3*i+1] = green[i+offset]
buf[3*i+2] = red[i+offset]
def testUsedPortException( self ):
self.assertRaises( RuntimeError, lambda : DisplayDriverServer( 1559 ) )
def testTransfer( self ):
img = Reader.create( "test/IECore/data/tiff/bluegreen_noise.400x300.tif" )()
self.assertEqual( img.keys(), [ 'B', 'G', 'R' ] )
red = img['R'].data
green = img['G'].data
blue = img['B'].data
width = img.dataWindow.max.x - img.dataWindow.min.x + 1
params = CompoundData()
params['displayHost'] = StringData('localhost')
params['displayPort'] = StringData( '1559' )
params["remoteDisplayType"] = StringData( "ImageDisplayDriver" )
params["handle"] = StringData( "myHandle" )
params["header:myMetadata"] = StringData( "Metadata!" )
idd = ClientDisplayDriver( img.displayWindow, img.dataWindow, list( img.channelNames() ), params )
buf = FloatVectorData( width * 3 )
for i in xrange( 0, img.dataWindow.max.y - img.dataWindow.min.y + 1 ):
self.__prepareBuf( buf, width, i*width, red, green, blue )
idd.imageData( Box2i( V2i( img.dataWindow.min.x, i + img.dataWindow.min.y ), V2i( img.dataWindow.max.x, i + img.dataWindow.min.y) ), buf )
idd.imageClose()
newImg = ImageDisplayDriver.removeStoredImage( "myHandle" )
params["clientPID"] = IntData( os.getpid() )
# only data prefixed by 'header:' will come through as blindData/metadata
self.assertEqual( newImg.blindData(), CompoundData({"myMetadata": StringData( "Metadata!" )}) )
# remove blindData for comparison
newImg.blindData().clear()
img.blindData().clear()
self.assertEqual( newImg, img )
def testWrongSocketException( self ) :
parameters = CompoundData( {
"displayHost" : "localhost",
"displayPort" : "1560", # wrong port
"remoteDisplayType" : "ImageDisplayDriver",
} )
dw = Box2i( V2i( 0 ), V2i( 255 ) )
self.assertRaises( RuntimeError, ClientDisplayDriver, dw, dw, [ "R", "G", "B" ], parameters )
try :
ClientDisplayDriver( dw, dw, [ "R", "G", "B" ], parameters )
except Exception, e :
pass
self.failUnless( "Could not connect to remote display driver server : Connection refused" in str( e ) )
def testWrongHostException( self ) :
parameters = CompoundData( {
"displayHost" : "thisHostDoesNotExist",
"displayPort" : "1559", # wrong port
"remoteDisplayType" : "ImageDisplayDriver",
} )
dw = Box2i( V2i( 0 ), V2i( 255 ) )
self.assertRaises( RuntimeError, ClientDisplayDriver, dw, dw, [ "R", "G", "B" ], parameters )
try :
ClientDisplayDriver( dw, dw, [ "R", "G", "B" ], parameters )
except Exception, e :
pass
self.failUnless( "Could not connect to remote display driver server : Host not found" in str( e ) )
def testAcceptsRepeatedData( self ) :
window = Box2i( V2i( 0 ), V2i( 15 ) )
dd = ClientDisplayDriver(
window, window,
[ "Y" ],
CompoundData( {
"displayHost" : "localhost",
"displayPort" : "1559",
"remoteDisplayType" : "ImageDisplayDriver",
"handle" : "myHandle"
} )
)
self.assertEqual( dd.acceptsRepeatedData(), True )
y = FloatVectorData( [ 1 ] * 16 * 16 )
dd.imageData( window, y )
y = FloatVectorData( [ 0.5 ] * 16 * 16 )
dd.imageData( window, y )
dd.imageClose()
i = ImageDisplayDriver.removeStoredImage( "myHandle" )
self.assertEqual( i["Y"].data, y )
def tearDown( self ):
self.server = None
# test if all symbols are gone after the tests.
gc.collect()
RefCounted.collectGarbage()
self.assertEqual( RefCounted.numWrappedInstances(), 0 )
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import hmac
import struct
import sys
import dns.exception
import dns.hash
import dns.rdataclass
import dns.name
class BadTime(dns.exception.DNSException):
"""Raised if the current time is not within the TSIG's validity time."""
pass
class BadSignature(dns.exception.DNSException):
"""Raised if the TSIG signature fails to verify."""
pass
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
pass
class PeerBadKey(PeerError):
"""Raised if the peer didn't know the key we used"""
pass
class PeerBadSignature(PeerError):
"""Raised if the peer didn't like the signature we sent"""
pass
class PeerBadTime(PeerError):
"""Raised if the peer didn't like the time we sent"""
pass
class PeerBadTruncation(PeerError):
"""Raised if the peer didn't like amount of truncation in the TSIG we sent"""
pass
# TSIG Algorithms
HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
HMAC_SHA1 = dns.name.from_text("hmac-sha1")
HMAC_SHA224 = dns.name.from_text("hmac-sha224")
HMAC_SHA256 = dns.name.from_text("hmac-sha256")
HMAC_SHA384 = dns.name.from_text("hmac-sha384")
HMAC_SHA512 = dns.name.from_text("hmac-sha512")
default_algorithm = HMAC_MD5
BADSIG = 16
BADKEY = 17
BADTIME = 18
BADTRUNC = 22
def sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, string, hmac.HMAC object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
(algorithm_name, digestmod) = get_algorithm(algorithm)
if first:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(request_mac)
if ml > 0:
ctx.update(struct.pack('!H', ml))
ctx.update(request_mac)
id = struct.pack('!H', original_id)
ctx.update(id)
ctx.update(wire[2:])
if first:
ctx.update(keyname.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
long_time = time + 0L
upper_time = (long_time >> 32) & 0xffffL
lower_time = long_time & 0xffffffffL
time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
pre_mac = algorithm_name + time_mac
ol = len(other_data)
if ol > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
post_mac = struct.pack('!HH', error, ol) + other_data
if first:
ctx.update(pre_mac)
ctx.update(post_mac)
else:
ctx.update(time_mac)
mac = ctx.digest()
mpack = struct.pack('!H', len(mac))
tsig_rdata = pre_mac + mpack + mac + id + post_mac
if multi:
ctx = hmac.new(secret, digestmod=digestmod)
ml = len(mac)
ctx.update(struct.pack('!H', ml))
ctx.update(mac)
else:
ctx = None
return (tsig_rdata, mac, ctx)
def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx=None, multi=False, first=True,
algorithm=default_algorithm):
return sign(wire, keyname, secret, time, fudge, original_id, error,
other_data, request_mac, ctx, multi, first, algorithm)
def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
tsig_rdlen, ctx=None, multi=False, first=True):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: hmac.HMAC object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
current = tsig_rdata
(aname, used) = dns.name.from_wire(wire, current)
current = current + used
(upper_time, lower_time, fudge, mac_size) = \
struct.unpack("!HIHH", wire[current:current + 10])
time = ((upper_time + 0L) << 32) + (lower_time + 0L)
current += 10
mac = wire[current:current + mac_size]
current += mac_size
(original_id, error, other_size) = \
struct.unpack("!HHH", wire[current:current + 6])
current += 6
other_data = wire[current:current + other_size]
current += other_size
if current != tsig_rdata + tsig_rdlen:
raise dns.exception.FormError
if error != 0:
if error == BADSIG:
raise PeerBadSignature
elif error == BADKEY:
raise PeerBadKey
elif error == BADTIME:
raise PeerBadTime
elif error == BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % error)
time_low = time - fudge
time_high = time + fudge
if now < time_low or now > time_high:
raise BadTime
(junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
original_id, error, other_data,
request_mac, ctx, multi, first, aname)
if (our_mac != mac):
raise BadSignature
return ctx
_hashes = None
def _maybe_add_hash(tsig_alg, hash_alg):
try:
_hashes[tsig_alg] = dns.hash.get(hash_alg)
except KeyError:
pass
def _setup_hashes():
global _hashes
_hashes = {}
_maybe_add_hash(HMAC_SHA224, 'SHA224')
_maybe_add_hash(HMAC_SHA256, 'SHA256')
_maybe_add_hash(HMAC_SHA384, 'SHA384')
_maybe_add_hash(HMAC_SHA512, 'SHA512')
_maybe_add_hash(HMAC_SHA1, 'SHA1')
_maybe_add_hash(HMAC_MD5, 'MD5')
def get_algorithm(algorithm):
"""Returns the wire format string and the hash module to use for the
specified TSIG algorithm
@rtype: (string, hash constructor)
@raises NotImplementedError: I{algorithm} is not supported
"""
global _hashes
if _hashes is None:
_setup_hashes()
if isinstance(algorithm, (str, unicode)):
algorithm = dns.name.from_text(algorithm)
if sys.hexversion < 0x02050200 and \
(algorithm == HMAC_SHA384 or algorithm == HMAC_SHA512):
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" requires Python 2.5.2 or later")
try:
return (algorithm.to_digestable(), _hashes[algorithm])
except KeyError:
raise NotImplementedError("TSIG algorithm " + str(algorithm) +
" is not supported")
|
|
"""
This class is defined to override standard pickle functionality
The goals of it follow:
-Serialize lambdas and nested functions to compiled byte code
-Deal with main module correctly
-Deal with other non-serializable objects
It does not include an unpickler, as standard python unpickling suffices.
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
<http://www.picloud.com>`_.
Copyright (c) 2012, Regents of the University of California.
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California, Berkeley nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import operator
import os
import io
import pickle
import struct
import sys
import types
from functools import partial
import itertools
import dis
import traceback
if sys.version < '3':
from pickle import Pickler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
types.ClassType = type
from pickle import _Pickler as Pickler
from io import BytesIO as StringIO
PY3 = True
#relevant opcodes
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
DELETE_GLOBAL = dis.opname.index('DELETE_GLOBAL')
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
GLOBAL_OPS = [STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL]
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
def islambda(func):
return getattr(func,'__name__') == '<lambda>'
_BUILTIN_TYPE_NAMES = {}
for k, v in types.__dict__.items():
if type(v) is type:
_BUILTIN_TYPE_NAMES[v] = k
def _builtin_type(name):
return getattr(types, name)
class CloudPickler(Pickler):
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
Pickler.__init__(self, file, protocol)
# set of modules to unpickle
self.modules = set()
# map ids to dictionary. used to ensure that functions can share global env
self.globals_ref = {}
def dump(self, obj):
self.inject_addons()
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if 'recursion' in e.args[0]:
msg = """Could not pickle object as excessively deep recursion required."""
raise pickle.PicklingError(msg)
def save_memoryview(self, obj):
"""Fallback to save_string"""
Pickler.save_string(self, str(obj))
def save_buffer(self, obj):
"""Fallback to save_string"""
Pickler.save_string(self,str(obj))
if PY3:
dispatch[memoryview] = save_memoryview
else:
dispatch[buffer] = save_buffer
def save_unsupported(self, obj):
raise pickle.PicklingError("Cannot pickle objects of type %s" % type(obj))
dispatch[types.GeneratorType] = save_unsupported
# itertools objects do not pickle!
for v in itertools.__dict__.values():
if type(v) is type:
dispatch[v] = save_unsupported
def save_module(self, obj):
"""
Save a module as an import
"""
self.modules.add(obj)
self.save_reduce(subimport, (obj.__name__,), obj=obj)
dispatch[types.ModuleType] = save_module
def save_codeobject(self, obj):
"""
Save a code object
"""
if PY3:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames,
obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code,
obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars
)
self.save_reduce(types.CodeType, args, obj=obj)
dispatch[types.CodeType] = save_codeobject
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
write = self.write
if name is None:
name = obj.__name__
modname = pickle.whichmodule(obj, name)
# print('which gives %s %s %s' % (modname, obj, name))
try:
themodule = sys.modules[modname]
except KeyError:
# eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
if themodule:
self.modules.add(themodule)
if getattr(themodule, name, None) is obj:
return self.save_global(obj, name)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if islambda(obj) or obj.__code__.co_filename == '<stdin>' or themodule is None:
#print("save global", islambda(obj), obj.__code__.co_filename, modname, themodule)
self.save_function_tuple(obj)
return
else:
# func is nested
klass = getattr(themodule, name, None)
if klass is None or klass is not obj:
self.save_function_tuple(obj)
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
dispatch[types.FunctionType] = save_function
def save_function_tuple(self, func):
""" Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later.
"""
save = self.save
write = self.write
code, f_globals, defaults, closure, dct, base_globals = self.extract_func_data(func)
save(_fill_function) # skeleton function updater
write(pickle.MARK) # beginning of tuple that _fill_function expects
# create a skeleton function object and memoize it
save(_make_skel_func)
save((code, closure, base_globals))
write(pickle.REDUCE)
self.memoize(func)
# save the rest of the func data needed by _fill_function
save(f_globals)
save(defaults)
save(dct)
write(pickle.TUPLE)
write(pickle.REDUCE) # applies _fill_function on the tuple
@staticmethod
def extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
code = co.co_code
if not PY3:
code = [ord(c) for c in code]
names = co.co_names
out_names = set()
n = len(code)
i = 0
extended_arg = 0
while i < n:
op = code[i]
i += 1
if op >= HAVE_ARGUMENT:
oparg = code[i] + code[i+1] * 256 + extended_arg
extended_arg = 0
i += 2
if op == EXTENDED_ARG:
extended_arg = oparg*65536
if op in GLOBAL_OPS:
out_names.add(names[oparg])
# see if nested function have any global refs
if co.co_consts:
for const in co.co_consts:
if type(const) is types.CodeType:
out_names |= CloudPickler.extract_code_globals(const)
return out_names
def extract_func_data(self, func):
"""
Turn the function into a tuple of data necessary to recreate it:
code, globals, defaults, closure, dict
"""
code = func.__code__
# extract all global ref's
func_global_refs = self.extract_code_globals(code)
# process all variables referenced by global environment
f_globals = {}
for var in func_global_refs:
if var in func.__globals__:
f_globals[var] = func.__globals__[var]
# defaults requires no processing
defaults = func.__defaults__
# process closure
closure = [c.cell_contents for c in func.__closure__] if func.__closure__ else []
# save the dict
dct = func.__dict__
base_globals = self.globals_ref.get(id(func.__globals__), {})
self.globals_ref[id(func.__globals__)] = base_globals
return (code, f_globals, defaults, closure, dct, base_globals)
def save_builtin_function(self, obj):
if obj.__module__ is "__builtin__":
return self.save_global(obj)
return self.save_function(obj)
dispatch[types.BuiltinFunctionType] = save_builtin_function
def save_global(self, obj, name=None, pack=struct.pack):
if obj.__module__ == "__builtin__" or obj.__module__ == "builtins":
if obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if name is None:
name = obj.__name__
modname = getattr(obj, "__module__", None)
if modname is None:
modname = pickle.whichmodule(obj, name)
if modname == '__main__':
themodule = None
else:
__import__(modname)
themodule = sys.modules[modname]
self.modules.add(themodule)
if hasattr(themodule, name) and getattr(themodule, name) is obj:
return Pickler.save_global(self, obj, name)
typ = type(obj)
if typ is not obj and isinstance(obj, (type, types.ClassType)):
d = dict(obj.__dict__) # copy dict proxy to a dict
if not isinstance(d.get('__dict__', None), property):
# don't extract dict that are properties
d.pop('__dict__', None)
d.pop('__weakref__', None)
# hack as __new__ is stored differently in the __dict__
new_override = d.get('__new__', None)
if new_override:
d['__new__'] = obj.__new__
# workaround for namedtuple (hijacked by PySpark)
if getattr(obj, '_is_namedtuple_', False):
self.save_reduce(_load_namedtuple, (obj.__name__, obj._fields))
return
self.save(_load_class)
self.save_reduce(typ, (obj.__name__, obj.__bases__, {"__doc__": obj.__doc__}), obj=obj)
d.pop('__doc__', None)
# handle property and staticmethod
dd = {}
for k, v in d.items():
if isinstance(v, property):
k = ('property', k)
v = (v.fget, v.fset, v.fdel, v.__doc__)
elif isinstance(v, staticmethod) and hasattr(v, '__func__'):
k = ('staticmethod', k)
v = v.__func__
elif isinstance(v, classmethod) and hasattr(v, '__func__'):
k = ('classmethod', k)
v = v.__func__
dd[k] = v
self.save(dd)
self.write(pickle.TUPLE2)
self.write(pickle.REDUCE)
else:
raise pickle.PicklingError("Can't pickle %r" % obj)
dispatch[type] = save_global
dispatch[types.ClassType] = save_global
def save_instancemethod(self, obj):
# Memoization rarely is ever useful due to python bounding
if PY3:
self.save_reduce(types.MethodType, (obj.__func__, obj.__self__), obj=obj)
else:
self.save_reduce(types.MethodType, (obj.__func__, obj.__self__, obj.__self__.__class__),
obj=obj)
dispatch[types.MethodType] = save_instancemethod
def save_inst(self, obj):
"""Inner logic to save instance. Based off pickle.save_inst
Supports __transient__"""
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
pickle._keep_alive(args, memo)
else:
args = ()
write(pickle.MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(pickle.OBJ)
else:
for arg in args:
save(arg)
write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
#remove items if transient
if hasattr(obj, '__transient__'):
transient = obj.__transient__
stuff = stuff.copy()
for k in list(stuff.keys()):
if k in transient:
del stuff[k]
else:
stuff = getstate()
pickle._keep_alive(stuff, memo)
save(stuff)
write(pickle.BUILD)
if not PY3:
dispatch[types.InstanceType] = save_inst
def save_property(self, obj):
# properties not correctly saved in python
self.save_reduce(property, (obj.fget, obj.fset, obj.fdel, obj.__doc__), obj=obj)
dispatch[property] = save_property
def save_itemgetter(self, obj):
"""itemgetter serializer (needed for namedtuple support)"""
class Dummy:
def __getitem__(self, item):
return item
items = obj(Dummy())
if not isinstance(items, tuple):
items = (items, )
return self.save_reduce(operator.itemgetter, items)
if type(operator.itemgetter) is type:
dispatch[operator.itemgetter] = save_itemgetter
def save_attrgetter(self, obj):
"""attrgetter serializer"""
class Dummy(object):
def __init__(self, attrs, index=None):
self.attrs = attrs
self.index = index
def __getattribute__(self, item):
attrs = object.__getattribute__(self, "attrs")
index = object.__getattribute__(self, "index")
if index is None:
index = len(attrs)
attrs.append(item)
else:
attrs[index] = ".".join([attrs[index], item])
return type(self)(attrs, index)
attrs = []
obj(Dummy(attrs))
return self.save_reduce(operator.attrgetter, tuple(attrs))
if type(operator.attrgetter) is type:
dispatch[operator.attrgetter] = save_attrgetter
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
"""Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud"""
# Assert that args is a tuple or None
if not isinstance(args, tuple):
raise pickle.PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise pickle.PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
#Added fix to allow transient
cls = args[0]
if not hasattr(cls, "__new__"):
raise pickle.PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise pickle.PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
#Don't pickle transient entries
if hasattr(obj, '__transient__'):
transient = obj.__transient__
state = state.copy()
for k in list(state.keys()):
if k in transient:
del state[k]
save(args)
write(pickle.NEWOBJ)
else:
save(func)
save(args)
write(pickle.REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(pickle.BUILD)
def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords))
if sys.version_info < (2,7): # 2.7 supports partial pickling
dispatch[partial] = save_partial
def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading")
name = obj.name
try:
fsize = os.stat(name).st_size
except OSError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name)
if obj.closed:
#create an empty closed string io
retval = pystringIO.StringIO("")
retval.close()
elif not fsize: #empty file
retval = pystringIO.StringIO("")
try:
tmpfile = file(name)
tst = tmpfile.read(1)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
tmpfile.close()
if tst != '':
raise pickle.PicklingError("Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
else:
try:
tmpfile = file(name)
contents = tmpfile.read()
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj)
if PY3:
dispatch[io.TextIOWrapper] = save_file
else:
dispatch[file] = save_file
"""Special functions for Add-on libraries"""
def inject_numpy(self):
numpy = sys.modules.get('numpy')
if not numpy or not hasattr(numpy, 'ufunc'):
return
self.dispatch[numpy.ufunc] = self.__class__.save_ufunc
def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
numpy_tst_mods = ['numpy', 'scipy.special']
for tst_mod_name in numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
if tst_mod and name in tst_mod.__dict__:
return self.save_reduce(_getobject, (tst_mod_name, name))
raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in'
% str(obj))
def inject_addons(self):
"""Plug in system. Register additional pickling functions if modules already loaded"""
self.inject_numpy()
# Shorthands for legacy support
def dump(obj, file, protocol=2):
CloudPickler(file, protocol).dump(obj)
def dumps(obj, protocol=2):
file = StringIO()
cp = CloudPickler(file,protocol)
cp.dump(obj)
return file.getvalue()
#hack for __import__ not working as desired
def subimport(name):
__import__(name)
return sys.modules[name]
# restores function attributes
def _restore_attr(obj, attr):
for key, val in attr.items():
setattr(obj, key, val)
return obj
def _get_module_builtins():
return pickle.__builtins__
def print_exec(stream):
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
def _modules_to_main(modList):
"""Force every module in modList to be placed into main"""
if not modList:
return
main = sys.modules['__main__']
for modname in modList:
if type(modname) is str:
try:
mod = __import__(modname)
except Exception as e:
sys.stderr.write('warning: could not import %s\n. '
'Your function may unexpectedly error due to this import failing;'
'A version mismatch is likely. Specific error was:\n' % modname)
print_exec(sys.stderr)
else:
setattr(main, mod.__name__, mod)
#object generators:
def _genpartial(func, args, kwds):
if not args:
args = ()
if not kwds:
kwds = {}
return partial(func, *args, **kwds)
def _fill_function(func, globals, defaults, dict):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
return func
def _make_cell(value):
return (lambda: value).__closure__[0]
def _reconstruct_closure(values):
return tuple([_make_cell(v) for v in values])
def _make_skel_func(code, closures, base_globals = None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
closure = _reconstruct_closure(closures) if closures else None
if base_globals is None:
base_globals = {}
base_globals['__builtins__'] = __builtins__
return types.FunctionType(code, base_globals,
None, None, closure)
def _load_class(cls, d):
"""
Loads additional properties into class `cls`.
"""
for k, v in d.items():
if isinstance(k, tuple):
typ, k = k
if typ == 'property':
v = property(*v)
elif typ == 'staticmethod':
v = staticmethod(v)
elif typ == 'classmethod':
v = classmethod(v)
setattr(cls, k, v)
return cls
def _load_namedtuple(name, fields):
"""
Loads a class generated by namedtuple
"""
from collections import namedtuple
return namedtuple(name, fields)
"""Constructors for 3rd party libraries
Note: These can never be renamed due to client compatibility issues"""
def _getobject(modname, attribute):
mod = __import__(modname, fromlist=[attribute])
return mod.__dict__[attribute]
|
|
"""
IOStore class originated here
https://github.com/BD2KGenomics/hgvm-graph-bakeoff-evaluations/blob/master/scripts/toillib.py
and was then here:
https://github.com/cmarkello/toil-lib/blob/master/src/toil_lib/toillib.py
In a perfect world, this would be deprecated and replaced with Toil's stores.
Actually did this here:
https://github.com/glennhickey/toil-vg/tree/issues/110-fix-iostore
But couldn't get Toil's multipart S3 uploader working on large files. Also,
the toil jobStore interface is a little less clean for our use.
So for now keep as part of toil-vg where it works. Could also consider merging
into the upstream toil-lib
https://github.com/BD2KGenomics/toil-lib
"""
import sys, os, os.path, json, collections, logging, logging.handlers
import SocketServer, struct, socket, threading, tarfile, shutil
import tempfile
import functools
import random
import time
import dateutil
import traceback
import stat
from toil.realtimeLogger import RealtimeLogger
import datetime
# Need stuff for Amazon s3
try:
import boto3
import botocore
have_s3 = True
except ImportError:
have_s3 = False
pass
# We need some stuff in order to have Azure
try:
import azure
# Make sure to get the 0.11 BlobService, in case the new azure storage
# module is also installed.
from azure.storage.blob import BlobService
import toil.jobStores.azureJobStore
have_azure = True
except ImportError:
have_azure = False
pass
def robust_makedirs(directory):
"""
Make a directory when other nodes may be trying to do the same on a shared
filesystem.
"""
if not os.path.exists(directory):
try:
# Make it if it doesn't exist
os.makedirs(directory)
except OSError:
# If you can't make it, maybe someone else did?
pass
# Make sure it exists and is a directory
assert(os.path.exists(directory) and os.path.isdir(directory))
def write_global_directory(file_store, path, cleanup=False, tee=None, compress=True):
"""
Write the given directory into the file store, and return an ID that can be
used to retrieve it. Writes the files in the directory and subdirectories
into a tar file in the file store.
Does not preserve the name or permissions of the given directory (only of
its contents).
If cleanup is true, directory will be deleted from the file store when this
job and its follow-ons finish.
If tee is passed, a tar.gz of the directory contents will be written to that
filename. The file thus created must not be modified after this function is
called.
"""
write_stream_mode = "w"
if compress:
write_stream_mode = "w|gz"
if tee is not None:
with open(tee, "w") as file_handle:
# We have a stream, so start taring into it
with tarfile.open(fileobj=file_handle, mode=write_stream_mode) as tar:
# Open it for streaming-only write (no seeking)
# We can't just add the root directory, since then we wouldn't be
# able to extract it later with an arbitrary name.
for file_name in os.listdir(path):
# Add each file in the directory to the tar, with a relative
# path
tar.add(os.path.join(path, file_name), arcname=file_name)
# Save the file on disk to the file store.
return file_store.writeGlobalFile(tee)
else:
with file_store.writeGlobalFileStream(cleanup=cleanup) as (file_handle,
file_id):
# We have a stream, so start taring into it
# TODO: don't duplicate this code.
with tarfile.open(fileobj=file_handle, mode=write_stream_mode) as tar:
# Open it for streaming-only write (no seeking)
# We can't just add the root directory, since then we wouldn't be
# able to extract it later with an arbitrary name.
for file_name in os.listdir(path):
# Add each file in the directory to the tar, with a relative
# path
tar.add(os.path.join(path, file_name), arcname=file_name)
# Spit back the ID to use to retrieve it
return file_id
def read_global_directory(file_store, directory_id, path):
"""
Reads a directory with the given tar file id from the global file store and
recreates it at the given path.
The given path, if it exists, must be a directory.
Do not use to extract untrusted directories, since they could sneakily plant
files anywhere on the filesystem.
"""
# Make the path
robust_makedirs(path)
with file_store.readGlobalFileStream(directory_id) as file_handle:
# We need to pull files out of this tar stream
with tarfile.open(fileobj=file_handle, mode="r|*") as tar:
# Open it for streaming-only read (no seeking)
# We need to extract the whole thing into that new directory
tar.extractall(path)
class IOStore(object):
"""
A class that lets you get your input files and save your output files
to/from a local filesystem, Amazon S3, or Microsoft Azure storage
transparently.
This is the abstract base class; other classes inherit from this and fill in
the methods.
"""
def __init__(self):
"""
Make a new IOStore
"""
raise NotImplementedError()
def read_input_file(self, input_path, local_path):
"""
Read an input file from wherever the input comes from and send it to the
given path.
If the file at local_path already exists, it is overwritten.
If the file at local_path already exists and is a directory, behavior is
undefined.
"""
raise NotImplementedError()
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Yields each of the subdirectories and files in the given input path.
If recursive is false, yields files and directories in the given
directory. If recursive is true, yields all files contained within the
current directory, recursively, but does not yield folders.
If with_times is True, yields (name, modification time) pairs instead of
just names, with modification times represented as datetime objects in
the GMT timezone. Modification times may be None on objects that do not
support them.
Gives relative file/directory names.
"""
raise NotImplementedError()
def write_output_file(self, local_path, output_path):
"""
Save the given local file to the given output path. No output directory
needs to exist already.
If the output path already exists, it is overwritten.
If the output path already exists and is a directory, behavior is
undefined.
"""
raise NotImplementedError()
def exists(self, path):
"""
Returns true if the given input or output file exists in the store
already.
"""
raise NotImplementedError()
def get_mtime(self, path):
"""
Returns the modification time of the given gile if it exists, or None
otherwise.
"""
raise NotImplementedError()
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
@staticmethod
def absolute(store_string):
"""
Convert a relative path IOStore string to an absolute path one. Leaves
strings that aren't FileIOStore specifications alone.
Since new Toil versions change the working directory of SingleMachine
batch system jobs, we need to have absolute paths passed into jobs.
Recommended to be used as an argparse type, so that strings can be
directly be passed to IOStore.get on the nodes.
"""
if store_string == "":
return ""
if store_string[0] == ".":
# It's a relative ./ path
return os.path.abspath(store_string)
if store_string.startswith("file:"):
# It's a file:-prefixed thing that may be a relative path
# Normalize the part after "file:" (which is 5 characters)
return "file:" + os.path.abspath(store_string[5:])
return store_string
@staticmethod
def get(store_string):
"""
Get a concrete IOStore created from the given connection string.
Valid formats are just like for a Toil JobStore, except with container
names being specified on Azure.
Formats:
/absolute/filesystem/path
./relative/filesystem/path
file:filesystem/path
aws:region:bucket (TODO)
aws:region:bucket/path/prefix (TODO)
azure:account:container (instead of a container prefix) (gets keys like
Toil)
azure:account:container/path/prefix (trailing slash added automatically)
"""
# Code adapted from toil's common.py loadJobStore()
if store_string[0] in "/.":
# Prepend file: tot he path
store_string = "file:" + store_string
try:
# Break off the first colon-separated piece.
store_type, store_arguments = store_string.split(":", 1)
except ValueError:
# They probably forgot the . or /
raise RuntimeError("Incorrect IO store specification {}. "
"Local paths must start with . or /".format(store_string))
if store_type == "file":
return FileIOStore(store_arguments)
elif store_type == "aws":
# Break out the AWS arguments
region, bucket_name = store_arguments.split(":", 1)
if "/" in bucket_name:
# Split the bucket from the path
bucket_name, path_prefix = bucket_name.split("/", 1)
else:
# No path prefix
path_prefix = ""
return S3IOStore(region, bucket_name, path_prefix)
elif store_type == "azure":
# Break out the Azure arguments.
account, container = store_arguments.split(":", 1)
if "/" in container:
# Split the container from the path
container, path_prefix = container.split("/", 1)
else:
# No path prefix
path_prefix = ""
return AzureIOStore(account, container, path_prefix)
else:
raise RuntimeError("Unknown IOStore implementation {}".format(
store_type))
class FileIOStore(IOStore):
"""
A class that lets you get input from and send output to filesystem files.
"""
def __init__(self, path_prefix=""):
"""
Make a new FileIOStore that just treats everything as local paths,
relative to the given prefix.
"""
self.path_prefix = path_prefix
def read_input_file(self, input_path, local_path):
"""
Get input from the filesystem.
"""
RealtimeLogger.debug("Loading {} from FileIOStore in {} to {}".format(
input_path, self.path_prefix, local_path))
if os.path.exists(local_path):
# Try deleting the existing item if it already exists
try:
os.unlink(local_path)
except:
# Don't fail here, fail complaining about the assertion, which
# will be more informative.
pass
# Make sure the path is clear for copying
assert(not os.path.exists(local_path))
# Where is the file actually?
real_path = os.path.abspath(os.path.join(self.path_prefix, input_path))
if not os.path.exists(real_path):
RealtimeLogger.error(
"Can't find {} from FileIOStore in {}!".format(input_path,
self.path_prefix))
raise RuntimeError("File {} missing!".format(real_path))
# Make a temporary file
temp_handle, temp_path = tempfile.mkstemp(dir=os.path.dirname(local_path))
os.close(temp_handle)
# Copy to the temp file
shutil.copy2(real_path, temp_path)
# Rename the temp file to the right place, atomically
RealtimeLogger.info("rename {} -> {}".format(temp_path, local_path))
os.rename(temp_path, local_path)
# Look at the file stats
file_stats = os.stat(real_path)
if (file_stats.st_uid == os.getuid() and
file_stats.st_mode & stat.S_IWUSR):
# We own this file and can write to it. We don't want the user
# script messing it up through the symlink.
try:
# Clear the user write bit, so the user can't accidentally
# clobber the file in the actual store through the symlink.
os.chmod(real_path, file_stats.st_mode ^ stat.S_IWUSR)
except OSError:
# If something goes wrong here (like us not having permission to
# change permissions), ignore it.
pass
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Loop over directories on the filesystem.
"""
RealtimeLogger.info("Enumerating {} from "
"FileIOStore in {}".format(input_path, self.path_prefix))
if not os.path.exists(os.path.join(self.path_prefix, input_path)):
# Nothing to list over
return
if not os.path.isdir(os.path.join(self.path_prefix, input_path)):
# Can't list a file, only a directory.
return
for item in os.listdir(os.path.join(self.path_prefix, input_path)):
if(recursive and os.path.isdir(os.path.join(self.path_prefix,
input_path, item))):
# We're recursing and this is a directory.
# Recurse on this.
for subitem in self.list_input_directory(
os.path.join(input_path, item), recursive):
# Make relative paths include this directory name and yield
# them
name_to_yield = os.path.join(item, subitem)
if with_times:
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(
input_path, item, subitem))
# Convert it to datetime
yield name_to_yield, mtime_epoch_seconds
else:
yield name_to_yield
else:
# This isn't a directory or we aren't being recursive
# Just report this individual item.
if with_times:
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(
input_path, item))
yield item, mtime_epoch_seconds
else:
yield item
def write_output_file(self, local_path, output_path):
"""
Write output to the filesystem
"""
RealtimeLogger.debug("Saving {} to FileIOStore in {}".format(
output_path, self.path_prefix))
# What's the real output path to write to?
real_output_path = os.path.join(self.path_prefix, output_path)
# What directory should this go in?
parent_dir = os.path.split(real_output_path)[0]
if parent_dir != "":
# Make sure the directory it goes in exists.
robust_makedirs(parent_dir)
# Make a temporary file
temp_handle, temp_path = tempfile.mkstemp(dir=self.path_prefix)
os.close(temp_handle)
# Copy to the temp file
shutil.copy2(local_path, temp_path)
if os.path.exists(real_output_path):
# At least try to get existing files out of the way first.
try:
os.unlink(real_output_path)
except:
pass
# Rename the temp file to the right place, atomically
os.rename(temp_path, real_output_path)
def exists(self, path):
"""
Returns true if the given input or output file exists in the file system
already.
"""
return os.path.exists(os.path.join(self.path_prefix, path))
def get_mtime(self, path):
"""
Returns the modification time of the given file if it exists, or None
otherwise.
"""
if not self.exists(path):
return None
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(self.path_prefix,
path))
# Convert it to datetime
mtime_datetime = datetime.datetime.utcfromtimestamp(
mtime_epoch_seconds).replace(tzinfo=dateutil.tz.tzutc())
# Return the modification time, timezoned, in UTC
return mtime_datetime
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
if not self.exists(path):
return None
# Return the size in bytes of the backing file
return os.stat(os.path.join(self.path_prefix, path)).st_size
class BackoffError(RuntimeError):
"""
Represents an error from running out of retries during exponential back-off.
"""
def backoff_times(retries, base_delay):
"""
A generator that yields times for random exponential back-off. You have to
do the exception handling and sleeping yourself. Stops when the retries run
out.
"""
# Don't wait at all before the first try
yield 0
# What retry are we on?
try_number = 1
# Make a delay that increases
delay = float(base_delay) * 2
while try_number <= retries:
# Wait a random amount between 0 and 2^try_number * base_delay
yield random.uniform(base_delay, delay)
delay *= 2
try_number += 1
# If we get here, we're stopping iteration without succeeding. The caller
# will probably raise an error.
def backoff(original_function, retries=6, base_delay=10):
"""
We define a decorator that does randomized exponential back-off up to a
certain number of retries. Raises BackoffError if the operation doesn't
succeed after backing off for the specified number of retries (which may be
float("inf")).
Unfortunately doesn't really work on generators.
"""
# Make a new version of the function
@functools.wraps(original_function)
def new_function(*args, **kwargs):
# Call backoff times, overriding parameters with stuff from kwargs
for delay in backoff_times(retries=kwargs.get("retries", retries),
base_delay=kwargs.get("base_delay", base_delay)):
# Keep looping until it works or our iterator raises a
# BackoffError
if delay > 0:
# We have to wait before trying again
RealtimeLogger.error("Retry after {} seconds".format(
delay))
time.sleep(delay)
try:
return original_function(*args, **kwargs)
except:
# Report the formatted underlying exception with traceback
RealtimeLogger.error("{} failed due to: {}".format(
original_function.__name__,
"".join(traceback.format_exception(*sys.exc_info()))))
# If we get here, the function we're calling never ran through before we
# ran out of backoff times. Give an error.
raise BackoffError("Ran out of retries calling {}".format(
original_function.__name__))
return new_function
class S3IOStore(IOStore):
"""
A class that lets you get input from and send output to AWS S3 Storage.
"""
def __init__(self, region, bucket_name, name_prefix=""):
"""
Make a new S3IOStore that reads from and writes to the given
container in the given account, adding the given prefix to keys. All
paths will be interpreted as keys or key prefixes.
"""
# Make sure azure libraries actually loaded
assert(have_s3)
self.region = region
self.bucket_name = bucket_name
self.name_prefix = name_prefix
self.s3 = None
def __connect(self):
"""
Make sure we have an S3 Bucket connection, and set one up if we don't.
Creates the S3 bucket if it doesn't exist.
"""
if self.s3 is None:
RealtimeLogger.debug("Connecting to bucket {} in region".format(
self.bucket_name, self.region))
# Configure boto3 for caching assumed role credentials with the same cache Toil uses
botocore_session = botocore.session.get_session()
botocore_session.get_component('credential_provider').get_provider('assume-role').cache = botocore.credentials.JSONFileCache()
boto3_session = boto3.Session(botocore_session=botocore_session)
# Connect to the s3 bucket service where we keep everything
self.s3 = boto3_session.client('s3')
try:
self.s3.head_bucket(Bucket=self.bucket_name)
except:
self.s3.create_bucket(Bucket=self.bucket_name,
CreateBucketConfiguration={'LocationConstraint':self.region})
def read_input_file(self, input_path, local_path):
"""
Get input from S3.
"""
self.__connect()
RealtimeLogger.debug("Loading {} from S3IOStore".format(
input_path))
# Download the file contents.
self.s3.download_file(self.bucket_name, os.path.join(self.name_prefix, input_path), local_path)
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Yields each of the subdirectories and files in the given input path.
If recursive is false, yields files and directories in the given
directory. If recursive is true, yields all files contained within the
current directory, recursively, but does not yield folders.
If with_times is True, yields (name, modification time) pairs instead of
just names, with modification times represented as datetime objects in
the GMT timezone. Modification times may be None on objects that do not
support them.
Gives relative file/directory names.
"""
raise NotImplementedError()
def write_output_file(self, local_path, output_path):
"""
Write output to S3.
"""
self.__connect()
RealtimeLogger.debug("Saving {} to S3IOStore".format(
output_path))
# Download the file contents.
self.s3.upload_file(local_path, self.bucket_name, os.path.join(self.name_prefix, output_path))
def exists(self, path):
"""
Returns true if the given input or output file exists in the store
already.
"""
raise NotImplementedError()
def get_mtime(self, path):
"""
Returns the modification time of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
class AzureIOStore(IOStore):
"""
A class that lets you get input from and send output to Azure Storage.
"""
def __init__(self, account_name, container_name, name_prefix=""):
"""
Make a new AzureIOStore that reads from and writes to the given
container in the given account, adding the given prefix to keys. All
paths will be interpreted as keys or key prefixes.
If the name prefix does not end with a trailing slash, and is not empty,
one will be added automatically.
Account keys are retrieved from the AZURE_ACCOUNT_KEY environment
variable or from the ~/.toilAzureCredentials file, as in Toil itself.
"""
# Make sure azure libraries actually loaded
assert(have_azure)
self.account_name = account_name
self.container_name = container_name
self.name_prefix = name_prefix
if self.name_prefix != "" and not self.name_prefix.endswith("/"):
# Make sure it has the trailing slash required.
self.name_prefix += "/"
# Sneak into Toil and use the same keys it uses
self.account_key = toil.jobStores.azureJobStore._fetchAzureAccountKey(
self.account_name)
# This will hold out Azure blob store connection
self.connection = None
def __getstate__(self):
"""
Return the state to use for pickling. We don't want to try and pickle
an open Azure connection.
"""
return (self.account_name, self.account_key, self.container_name,
self.name_prefix)
def __setstate__(self, state):
"""
Set up after unpickling.
"""
self.account_name = state[0]
self.account_key = state[1]
self.container_name = state[2]
self.name_prefix = state[3]
self.connection = None
def __connect(self):
"""
Make sure we have an Azure connection, and set one up if we don't.
"""
if self.connection is None:
RealtimeLogger.debug("Connecting to account {}, using "
"container {} and prefix {}".format(self.account_name,
self.container_name, self.name_prefix))
# Connect to the blob service where we keep everything
self.connection = BlobService(
account_name=self.account_name, account_key=self.account_key)
@backoff
def read_input_file(self, input_path, local_path):
"""
Get input from Azure.
"""
self.__connect()
RealtimeLogger.debug("Loading {} from AzureIOStore".format(
input_path))
# Download the blob. This is known to be synchronous, although it can
# call a callback during the process.
self.connection.get_blob_to_path(self.container_name,
self.name_prefix + input_path, local_path)
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Loop over fake /-delimited directories on Azure. The prefix may or may
not not have a trailing slash; if not, one will be added automatically.
Returns the names of files and fake directories in the given input fake
directory, non-recursively.
If with_times is specified, will yield (name, time) pairs including
modification times as datetime objects. Times on directories are None.
"""
self.__connect()
RealtimeLogger.info("Enumerating {} from AzureIOStore".format(
input_path))
# Work out what the directory name to list is
fake_directory = self.name_prefix + input_path
if fake_directory != "" and not fake_directory.endswith("/"):
# We have a nonempty prefix, and we need to end it with a slash
fake_directory += "/"
# This will hold the marker that we need to send back to get the next
# page, if there is one. See <http://stackoverflow.com/a/24303682>
marker = None
# This holds the subdirectories we found; we yield each exactly once if
# we aren't recursing.
subdirectories = set()
while True:
# Get the results from Azure. We don't use delimiter since Azure
# doesn't seem to provide the placeholder entries it's supposed to.
result = self.connection.list_blobs(self.container_name,
prefix=fake_directory, marker=marker)
RealtimeLogger.info("Found {} files".format(len(result)))
for blob in result:
# Yield each result's blob name, but directory names only once
# Drop the common prefix
relative_path = blob.name[len(fake_directory):]
if (not recursive) and "/" in relative_path:
# We found a file in a subdirectory, and we aren't supposed
# to be recursing.
subdirectory, _ = relative_path.split("/", 1)
if subdirectory not in subdirectories:
# It's a new subdirectory. Yield and remember it
subdirectories.add(subdirectory)
if with_times:
yield subdirectory, None
else:
yield subdirectory
else:
# We found an actual file
if with_times:
mtime = blob.properties.last_modified
if isinstance(mtime, datetime.datetime):
# Make sure we're getting proper localized datetimes
# from the new Azure Storage API.
assert(mtime.tzinfo is not None and
mtime.tzinfo.utcoffset(mtime) is not None)
else:
# Convert mtime from a string as in the old API.
mtime = dateutil.parser.parse(mtime).replace(
tzinfo=dateutil.tz.tzutc())
yield relative_path, mtime
else:
yield relative_path
# Save the marker
marker = result.next_marker
if not marker:
break
@backoff
def write_output_file(self, local_path, output_path):
"""
Write output to Azure. Will create the container if necessary.
"""
self.__connect()
RealtimeLogger.debug("Saving {} to AzureIOStore".format(
output_path))
try:
# Make the container
self.connection.create_container(self.container_name)
except azure.WindowsAzureConflictError:
# The container probably already exists
pass
# Upload the blob (synchronously)
# TODO: catch no container error here, make the container, and retry
self.connection.put_block_blob_from_path(self.container_name,
self.name_prefix + output_path, local_path)
@backoff
def exists(self, path):
"""
Returns true if the given input or output file exists in Azure already.
"""
self.__connect()
marker = None
while True:
try:
# Make the container
self.connection.create_container(self.container_name)
except azure.WindowsAzureConflictError:
# The container probably already exists
pass
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
return True
# Save the marker
marker = result.next_marker
if not marker:
break
return False
@backoff
def get_mtime(self, path):
"""
Returns the modification time of the given blob if it exists, or None
otherwise.
"""
self.__connect()
marker = None
while True:
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
mtime = blob.properties.last_modified
if isinstance(mtime, datetime.datetime):
# Make sure we're getting proper localized datetimes
# from the new Azure Storage API.
assert(mtime.tzinfo is not None and
mtime.tzinfo.utcoffset(mtime) is not None)
else:
# Convert mtime from a string as in the old API.
mtime = dateutil.parser.parse(mtime).replace(
tzinfo=dateutil.tz.tzutc())
return mtime
# Save the marker
marker = result.next_marker
if not marker:
break
return None
@backoff
def get_size(self, path):
"""
Returns the size in bytes of the given blob if it exists, or None
otherwise.
"""
self.__connect()
marker = None
while True:
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
size = blob.properties.content_length
return size
# Save the marker
marker = result.next_marker
if not marker:
break
return None
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for sparse operator"""
import numpy as np
import tvm
import topi
import topi.testing
from topi.util import get_const_tuple
import tvm.contrib.sparse as tvmsp
from collections import namedtuple
import time
import scipy.sparse as sp
def verify_dynamic_csrmv(batch, in_dim, out_dim, use_bias=True):
nr, nc, n = tvm.var("nr"), tvm.var("nc"), tvm.var("n")
dtype = 'float32'
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name='A')
B = tvm.placeholder((in_dim, 1), name='B')
C = tvm.placeholder((nr,), name='C')
D = topi.sparse.csrmv(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.maximum(np.random.uniform(size=(batch, in_dim)).astype(dtype)-0.5, 0.)
b_np = np.random.uniform(size=(in_dim, 1)).astype(dtype)-0.5
c_np = np.random.uniform(size=(batch, )).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, ctx)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0]-1
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros((_nr, 1), dtype=dtype), ctx)
assert a.data.dtype == A.data.dtype
assert a.indices.dtype == A.indices.dtype
assert a.indptr.dtype == A.indptr.dtype
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
for device in ["llvm"]:
check_device(device)
def verify_dynamic_csrmm(batch, in_dim, out_dim, use_bias=True):
nr, nc, n = tvm.var("nr"), tvm.var("nc"), tvm.var("n")
dtype = 'float32'
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name='A')
B = tvm.placeholder((in_dim, out_dim), name='B')
C = tvm.placeholder((nr,), name='C')
D = topi.sparse.csrmm(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.maximum(np.random.uniform(size=(batch, in_dim)).astype(dtype)-0.5, 0.)
b_np = np.random.uniform(size=(in_dim, out_dim)).astype(dtype)-0.5
c_np = np.random.uniform(size=(batch, )).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, ctx)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0]-1
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), ctx)
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-2, atol=1e-2)
for device in ["llvm"]:
check_device(device)
def verify_dense_si(batch, in_dim, out_dim, use_bias=True, dtype='float32'):
nonzeros = tvm.var('nonzeros')
A = tvmsp.placeholder(shape=(batch, in_dim), nonzeros=nonzeros, dtype=dtype, name='A')
B = tvm.placeholder((out_dim, in_dim), dtype=dtype, name='B')
C = tvm.placeholder((out_dim,), dtype=dtype, name='C')
D = topi.sparse.dense(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.
a_np = np.maximum(mag*(np.random.uniform(size=(batch, in_dim)).astype('float32')-0.5), 0.).astype(dtype)
b_np = (mag*(np.random.uniform(size=(out_dim, in_dim)).astype('float32')-.5)).astype(dtype)
c_np = (mag*(np.random.uniform(size=(out_dim,)).astype('float32')-.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx)
f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense")
f(a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
check_device('llvm')
def verify_dense_sw(batch, in_dim, out_dim, use_bias=True, dtype='float32'):
nonzeros = tvm.var('nonzeros')
A = tvm.placeholder((batch, in_dim), dtype=dtype, name='A')
B = tvmsp.placeholder(shape=(out_dim, in_dim), nonzeros=nonzeros, dtype=dtype, name='B')
C = tvm.placeholder((out_dim,), dtype=dtype, name='C')
D = topi.sparse.dense(A, B, C if use_bias else None)
s = tvm.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.
a_np = (mag*(np.random.uniform(size=(batch, in_dim)).astype('float32')-.5)).astype(dtype)
b_np = np.maximum(mag*(np.random.uniform(size=(out_dim, in_dim)).astype('float32')-0.5), 0.).astype(dtype)
c_np = (mag*(np.random.uniform(size=(out_dim,)).astype('float32')-.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvm.nd.array(a_np, ctx)
b = tvmsp.array(b_np, ctx)
c = tvm.nd.array(c_np, ctx)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense")
f(a, b.data, b.indices, b.indptr, c, d)
tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4)
check_device('llvm')
def test_csrmv():
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, use_bias=False)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, use_bias=True)
def test_csrmm():
M, K, N = 5, 7, 2
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, use_bias=False)
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, use_bias=True)
def test_dense_si():
M, K, N = 3, 5, 2
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='float32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='float32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int32')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int16')
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int16')
def test_dense_sw():
M, K, N = 3, 5, 2
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='float32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='float32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int32')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype='int16')
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype='int16')
def test_dense():
test_dense_si()
test_dense_sw()
def test_sparse_dense_csr():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format='csr', dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np.dot(W_np.T)
W_data = tvm.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = tvm.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = tvm.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = tvm.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr)
s = tvm.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.ndarray.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(tvm.ndarray.array(X_np), tvm.ndarray.array(W_sp_np.data), tvm.ndarray.array(W_sp_np.indices), tvm.ndarray.array(W_sp_np.indptr), Y_tvm)
tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_transpose_csr():
N, density = 1023, 0.3
X_sp = sp.random(N, N, density=density, format='csr', dtype='float32')
X_sp_T = X_sp.transpose()
X_np_T = X_sp_T.todense()
X_data = tvm.placeholder(shape=X_sp.data.shape, dtype=str(X_sp.data.dtype))
X_indices = tvm.placeholder(shape=X_sp.indices.shape, dtype=str(X_sp.indices.dtype))
X_indptr = tvm.placeholder(shape=X_sp.indptr.shape, dtype=str(X_sp.indptr.dtype))
X_T_data, X_T_indices, X_T_indptr = topi.nn.sparse_transpose(X_data, X_indices, X_indptr)
s = tvm.create_schedule([X_T_data.op, X_T_indices.op, X_T_indptr.op])
func = tvm.build(s, [X_data, X_indices, X_indptr, X_T_data, X_T_indices, X_T_indptr])
X_T_data_tvm = tvm.ndarray.array(np.zeros(X_sp_T.data.shape, dtype=X_sp_T.data.dtype))
X_T_indices_tvm = tvm.ndarray.array(np.zeros(X_sp_T.indices.shape, dtype=X_sp_T.indices.dtype))
X_T_indptr_tvm = tvm.ndarray.array(np.zeros(X_sp_T.indptr.shape, dtype=X_sp_T.indptr.dtype))
func(tvm.ndarray.array(X_sp.data), tvm.ndarray.array(X_sp.indices), tvm.ndarray.array(X_sp.indptr),
X_T_data_tvm, X_T_indices_tvm, X_T_indptr_tvm)
X_T_out = sp.csr_matrix((X_T_data_tvm.asnumpy(), X_T_indices_tvm.asnumpy(), X_T_indptr_tvm.asnumpy()), shape=(N,N)).todense()
tvm.testing.assert_allclose(X_np_T, X_T_out, atol=1e-4, rtol=1e-4)
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype):
import itertools
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r:r + BS_R, c:c + BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.indices.shape == (num_blocks, )
assert s.indptr.shape == (M // BS_R + 1, )
return s
def test_sparse_dense_bsr():
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np.dot(W_np.T)
W_data = tvm.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = tvm.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = tvm.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = tvm.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr)
s = tvm.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.ndarray.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(tvm.ndarray.array(X_np),
tvm.ndarray.array(W_sp_np.data),
tvm.ndarray.array(W_sp_np.indices),
tvm.ndarray.array(W_sp_np.indptr),
Y_tvm)
tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_dense_bsr_randomized():
for _ in range(20):
BS_R = np.random.randint(1, 16)
BS_C = np.random.randint(1, 16)
M = np.random.randint(1, 32)
N = int(np.random.randint(1, 16) * BS_R)
K = int(np.random.randint(1, 16) * BS_C)
density = np.clip(np.random.random(), 0.1, 0.9)
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = np.array(X_np.dot(W_np.T))
W_data = tvm.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = tvm.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = tvm.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = tvm.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr)
s = tvm.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.ndarray.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(tvm.ndarray.array(X_np),
tvm.ndarray.array(W_sp_np.data),
tvm.ndarray.array(W_sp_np.indices),
tvm.ndarray.array(W_sp_np.indptr),
Y_tvm)
tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-5, rtol=1e-5)
def test_sparse_dense():
test_sparse_dense_csr()
test_sparse_dense_bsr()
test_sparse_dense_bsr_randomized()
if __name__ == "__main__":
test_csrmv()
test_csrmm()
test_dense()
test_sparse_dense()
test_sparse_transpose_csr()
|
|
import os
import operator
import tensorflow as tf
import models
import time
import numpy as np
from datetime import datetime
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_string('train_dir', './multigpu-trained',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('batch_size', 1024, """Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('epochs', 40, """Max epochs for training.""")
tf.app.flags.DEFINE_integer('log_step', 10, """Log step""")
tf.app.flags.DEFINE_integer('eval_step', 1, """Evaluate step of epoch""")
tf.app.flags.DEFINE_string('device_ids', '', """Device ids. split by comma, e.g. 0,1""")
#tf.app.flags.DEFINE_string('data_dir', '/home/comp/csshshi/data/tensorflow/MNIST_data/',
tf.app.flags.DEFINE_string('data_dir', os.environ['HOME']+'/data/tensorflow/MNIST_data/',
#tf.app.flags.DEFINE_string('data_dir', '/home/comp/pengfeixu/Data/tensorflow/MNIST_data/',
"""Path to the data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
tf.app.flags.DEFINE_boolean('log_device_placement', True,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('num_gpus', 2, """How many GPUs to use.""")
tf.app.flags.DEFINE_string('local_ps_device', 'GPU', """Local parameter server GPU if gpus are peered or CPU otherwise try both.""")
tf.app.flags.DEFINE_boolean('use_dataset', False,
"""Whether to use datasets vs. feed_dict.""")
tf.app.flags.DEFINE_boolean('xla', False,
"""True to use XLA, which has to be compiled in.""")
EPOCH_SIZE = 60000
TEST_SIZE = 10000
def createFakeData(count, featureDim, labelDim):
features = np.random.randn(count, featureDim)
labels = np.random.randint(0, labelDim, size=(count, 1))
return features, labels
features, labels = createFakeData(1024, 32*32*3, 10)
def getFakeMinibatch(minibatchSize, labelDim):
feat = features[:minibatchSize]
l = labels[:minibatchSize]
lab = np.zeros((minibatchSize, labelDim))
for i in range(lab.shape[0]):
lab[i][l[i]] = 1
return feat, lab
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
def get_real_batch_data(batch_size, label_dim):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
return batch_xs, batch_ys
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for single_grads in zip(*tower_grads):
grads = [g for g, _ in single_grads]
grad = tf.add_n(grads)
grad = tf.multiply(grad, 1.0/len(grads))
v = single_grads[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(model='fcn5'):
config = tf.ConfigProto(allow_soft_placement=True,log_device_placement=FLAGS.log_device_placement)
if FLAGS.xla:
# Turns on XLA. XLA is not included in the standard build. For single GPU this shows ~5% improvement
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Graph().as_default(), tf.device("/" + FLAGS.local_ps_device + ":0"):
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
device_ids = FLAGS.device_ids
if not device_ids:
device_ids = [str(i) for i in range(FLAGS.num_gpus)]
else:
device_ids = device_ids.split(',')
lr = 0.05
#optimizer = tf.train.GradientDescentOptimizer(lr)
optimizer = tf.train.MomentumOptimizer(lr, 0.9)
def assign_to_device(device, ps_device=FLAGS.local_ps_device):
worker_device = device
ps_sizes = [0]
if FLAGS.local_ps_device.lower == 'gpu':
ps_sizes = [0] * FLAGS.num_gpus
def _assign(op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return worker_device
device_index, _ = min(enumerate(
ps_sizes), key=operator.itemgetter(1))
device_name = '/' + FLAGS.local_ps_device +':' + str(device_index)
var_size = op.outputs[0].get_shape().num_elements()
ps_sizes[device_index] += var_size
return device_name
return _assign
images = None
labels = None
if FLAGS.use_dataset:
with tf.device('/CPU:0'):
d_features = mnist.train.images
d_labels = mnist.train.labels
dataset = tf.contrib.data.Dataset.from_tensor_slices((d_features, d_labels))
dataset = dataset.shuffle(buffer_size=60000)
dataset = dataset.repeat()
dataset = dataset.batch(FLAGS.batch_size)
# Trick to get datasets to buffer the next epoch. This is needed because
# the data loading is occuring outside DataSets in python. Normally preprocessing
# would occur in DataSets and this odd looking line is not needed.
dataset = dataset.map(lambda x,y:(x,y),
num_threads=FLAGS.num_gpus,
output_buffer_size=FLAGS.num_gpus)
iterator = dataset.make_initializable_iterator()
images,labels = iterator.get_next()
tower_grads = []
feed_vars = []
average_loss_tensor = []
reuse_variables = False
accuracy = None
for i in xrange(FLAGS.num_gpus):
with tf.device(assign_to_device('/gpu:%s'%device_ids[i])):
with tf.name_scope('%s_%s' % ('TOWER', device_ids[i])) as scope:
if not FLAGS.use_dataset:
feature_dim = models.feature_dim
label_dim = models.label_dim
images = tf.placeholder(tf.float32, [None, feature_dim], name='images')
labels = tf.placeholder(tf.int64, [None, label_dim], name='labels')
feed_vars.append((images, labels))
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
logits = models.model_fcn5(images)
if i == 0:
# Prediction only on GPU:0
predictionCorrectness = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(predictionCorrectness, "float"))
loss = models.loss(logits, labels)
reuse_variables = True
average_loss_tensor.append(loss)
grads = optimizer.compute_gradients(loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
train_op = apply_gradient_op
average_op = tf.reduce_mean(average_loss_tensor)
saver = tf.train.Saver(tf.global_variables())
init = tf.global_variables_initializer()
sess = tf.Session(config=config)
sess.run(init)
if FLAGS.use_dataset:
sess.run(iterator.initializer)
real_batch_size = FLAGS.batch_size * FLAGS.num_gpus
num_batches_per_epoch = int((EPOCH_SIZE + real_batch_size - 1)/ real_batch_size)
iterations = FLAGS.epochs * num_batches_per_epoch
average_batch_time = 0.0
epochs_info = []
step = 0
average_loss = 0.0
for step in range(iterations):
start_time = time.time()
feed_dict = {}
if not FLAGS.use_dataset:
imgs, labs = get_real_batch_data(real_batch_size, 10)
for i in range(FLAGS.num_gpus):
feed_dict[feed_vars[i][0]] = imgs[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]
feed_dict[feed_vars[i][1]] = labs[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]
_, loss_value = sess.run([train_op, average_op], feed_dict=feed_dict)
duration = time.time() - start_time
average_batch_time += float(duration)
average_loss += loss_value
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % FLAGS.log_step == 0:
examples_per_sec = (FLAGS.batch_size * FLAGS.num_gpus) / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))
if step > 0 and step % (FLAGS.eval_step * num_batches_per_epoch) == 0:
average_loss /= num_batches_per_epoch * FLAGS.eval_step
print ('epoch: %d, loss: %.2f' % (step/(FLAGS.eval_step*num_batches_per_epoch), average_loss))
epochs_info.append('%d:-:%s'%(step/(FLAGS.eval_step*num_batches_per_epoch), average_loss))
average_loss = 0.0
feed_dict = { images: mnist.test.images, labels :mnist.test.labels }
if not FLAGS.use_dataset:
feed_dict = {}
feed_dict[feed_vars[0][0]] = mnist.test.images
feed_dict[feed_vars[0][1]] = mnist.test.labels
accuracy_value = accuracy.eval(session=sess, feed_dict=feed_dict)
print("test accuracy %g"%accuracy_value)
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
average_batch_time /= iterations
print 'average_batch_time: ', average_batch_time
print ('epoch_info: %s' % ','.join(epochs_info))
def main(argv=None):
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
train(model='fcn5')
if __name__ == '__main__':
tf.app.run()
|
|
# sqlite/pysqlite.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: https://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
The ``sqlite3`` Python DBAPI is standard on all modern Python versions;
for cPython and Pypy, no additional installation is necessary.
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\path\\to\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
.. _pysqlite_uri_connections:
URI Connections
^^^^^^^^^^^^^^^
Modern versions of SQLite support an alternative system of connecting using a
`driver level URI <https://www.sqlite.org/uri.html>`_, which has the advantage
that additional driver-level arguments can be passed including options such as
"read only". The Python sqlite3 driver supports this mode under modern Python
3 versions. The SQLAlchemy pysqlite driver supports this mode of use by
specifying "uri=true" in the URL query string. The SQLite-level "URI" is kept
as the "database" portion of the SQLAlchemy url (that is, following a slash)::
e = create_engine("sqlite:///file:path/to/database?mode=ro&uri=true")
.. note:: The "uri=true" parameter must appear in the **query string**
of the URL. It will not currently work as expected if it is only
present in the :paramref:`_sa.create_engine.connect_args`
parameter dictionary.
The logic reconciles the simultaneous presence of SQLAlchemy's query string and
SQLite's query string by separating out the parameters that belong to the
Python sqlite3 driver vs. those that belong to the SQLite URI. This is
achieved through the use of a fixed list of parameters known to be accepted by
the Python side of the driver. For example, to include a URL that indicates
the Python sqlite3 "timeout" and "check_same_thread" parameters, along with the
SQLite "mode" and "nolock" parameters, they can all be passed together on the
query string::
e = create_engine(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true"
)
Above, the pysqlite / sqlite3 DBAPI would be passed arguments as::
sqlite3.connect(
"file:path/to/database?mode=ro&nolock=1",
check_same_thread=True, timeout=10, uri=True
)
Regarding future parameters added to either the Python or native drivers. new
parameter names added to the SQLite URI scheme should be automatically
accommodated by this scheme. New parameter names added to the Python driver
side can be accommodated by specifying them in the
:paramref:`_sa.create_engine.connect_args` dictionary,
until dialect support is
added by SQLAlchemy. For the less likely case that the native SQLite driver
adds a new parameter name that overlaps with one of the existing, known Python
driver parameters (such as "timeout" perhaps), SQLAlchemy's dialect would
require adjustment for the URL scheme to continue to support this.
As is always the case for all SQLAlchemy dialects, the entire "URL" process
can be bypassed in :func:`_sa.create_engine` through the use of the
:paramref:`_sa.create_engine.creator`
parameter which allows for a custom callable
that creates a Python sqlite3 driver level connection directly.
.. versionadded:: 1.3.9
.. seealso::
`Uniform Resource Identifiers <https://www.sqlite.org/uri.html>`_ - in
the SQLite documentation
.. _pysqlite_regexp:
Regular Expression Support
---------------------------
.. versionadded:: 1.4
Support for the :meth:`_sql.ColumnOperators.regexp_match` operator is provided
using Python's re.search_ function. SQLite itself does not include a working
regular expression operator; instead, it includes a non-implemented placeholder
operator ``REGEXP`` that calls a user-defined function that must be provided.
SQLAlchemy's implementation makes use of the pysqlite create_function_ hook
as follows::
def regexp(a, b):
return re.search(a, b) is not None
sqlite_connection.create_function(
"regexp", 2, regexp,
)
There is currently no support for regular expression flags as a separate
argument, as these are not supported by SQLite's REGEXP operator, however these
may be included inline within the regular expression string. See `Python regular expressions`_ for
details.
.. seealso::
`Python regular expressions`_: Documentation for Python's regular expression syntax.
.. _create_function: https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_function
.. _re.search: https://docs.python.org/3/library/re.html#re.search
.. _Python regular expressions: https://docs.python.org/3/library/re.html#re.search
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
Dealing with Mixed String / Binary Columns in Python 3
------------------------------------------------------
The SQLite database is weakly typed, and as such it is possible when using
binary values, which in Python 3 are represented as ``b'some string'``, that a
particular SQLite database can have data values within different rows where
some of them will be returned as a ``b''`` value by the Pysqlite driver, and
others will be returned as Python strings, e.g. ``''`` values. This situation
is not known to occur if the SQLAlchemy :class:`.LargeBinary` datatype is used
consistently, however if a particular SQLite database has data that was
inserted using the Pysqlite driver directly, or when using the SQLAlchemy
:class:`.String` type which was later changed to :class:`.LargeBinary`, the
table will not be consistently readable because SQLAlchemy's
:class:`.LargeBinary` datatype does not handle strings so it has no way of
"encoding" a value that is in string format.
To deal with a SQLite table that has mixed string / binary data in the
same column, use a custom type that will check each row individually::
# note this is Python 3 only
from sqlalchemy import String
from sqlalchemy import TypeDecorator
class MixedBinary(TypeDecorator):
impl = String
cache_ok = True
def process_result_value(self, value, dialect):
if isinstance(value, str):
value = bytes(value, 'utf-8')
elif value is not None:
value = bytes(value)
return value
Then use the above ``MixedBinary`` datatype in the place where
:class:`.LargeBinary` would normally be used.
.. _pysqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL
-------------------------------------------------------
In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
driver's assortment of issues that prevent several features of SQLite
from working correctly. The pysqlite DBAPI driver has several
long-standing bugs which impact the correctness of its transactional
behavior. In its default mode of operation, SQLite features such as
SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
non-functional, and in order to use these features, workarounds must
be taken.
The issue is essentially that the driver attempts to second-guess the user's
intent, failing to start transactions and sometimes ending them prematurely, in
an effort to minimize the SQLite databases's file locking behavior, even
though SQLite itself uses "shared" locks for read-only activities.
SQLAlchemy chooses to not alter this behavior by default, as it is the
long-expected behavior of the pysqlite driver; if and when the pysqlite
driver attempts to repair these issues, that will be more of a driver towards
defaults for SQLAlchemy.
The good news is that with a few events, we can implement transactional
support fully, by disabling pysqlite's feature entirely and emitting BEGIN
ourselves. This is achieved using two event listeners::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.exec_driver_sql("BEGIN")
.. warning:: When using the above recipe, it is advised to not use the
:paramref:`.Connection.execution_options.isolation_level` setting on
:class:`_engine.Connection` and :func:`_sa.create_engine`
with the SQLite driver,
as this function necessarily will also alter the ".isolation_level" setting.
Above, we intercept a new pysqlite connection and disable any transactional
integration. Then, at the point at which SQLAlchemy knows that transaction
scope is to begin, we emit ``"BEGIN"`` ourselves.
When we take control of ``"BEGIN"``, we can also control directly SQLite's
locking modes, introduced at
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_,
by adding the desired locking mode to our ``"BEGIN"``::
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.exec_driver_sql("BEGIN EXCLUSIVE")
.. seealso::
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_ -
on the SQLite site
`sqlite3 SELECT does not BEGIN a transaction <https://bugs.python.org/issue9924>`_ -
on the Python bug tracker
`sqlite3 module breaks transactions and potentially corrupts data <https://bugs.python.org/issue10740>`_ -
on the Python bug tracker
""" # noqa
import os
import re
from .base import DATE
from .base import DATETIME
from .base import SQLiteDialect
from ... import exc
from ... import pool
from ... import types as sqltypes
from ... import util
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = "qmark"
supports_statement_cache = True
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
},
)
if not util.py2k:
description_encoding = None
driver = "pysqlite"
@classmethod
def dbapi(cls):
if util.py2k:
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
try:
from sqlite3 import dbapi2 as sqlite
except ImportError as e:
raise e
else:
from sqlite3 import dbapi2 as sqlite
return sqlite
@classmethod
def _is_url_file_db(cls, url):
if (url.database and url.database != ":memory:") and (
url.query.get("mode", None) != "memory"
):
return True
else:
return False
@classmethod
def get_pool_class(cls, url):
if cls._is_url_file_db(url):
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
_isolation_lookup = SQLiteDialect._isolation_lookup.union(
{
"AUTOCOMMIT": None,
}
)
def set_isolation_level(self, connection, level):
if hasattr(connection, "dbapi_connection"):
dbapi_connection = connection.dbapi_connection
else:
dbapi_connection = connection
if level == "AUTOCOMMIT":
dbapi_connection.isolation_level = None
else:
dbapi_connection.isolation_level = ""
return super(SQLiteDialect_pysqlite, self).set_isolation_level(
connection, level
)
def on_connect(self):
connect = super(SQLiteDialect_pysqlite, self).on_connect()
def regexp(a, b):
if b is None:
return None
return re.search(a, b) is not None
def set_regexp(connection):
if hasattr(connection, "dbapi_connection"):
dbapi_connection = connection.dbapi_connection
else:
dbapi_connection = connection
dbapi_connection.create_function(
"regexp",
2,
regexp,
)
fns = [set_regexp]
if self.isolation_level is not None:
def iso_level(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(iso_level)
def connect(conn):
for fn in fns:
fn(conn)
return connect
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,)
)
# theoretically, this list can be augmented, at least as far as
# parameter names accepted by sqlite3/pysqlite, using
# inspect.getfullargspec(). for the moment this seems like overkill
# as these parameters don't change very often, and as always,
# parameters passed to connect_args will always go to the
# sqlite3/pysqlite driver.
pysqlite_args = [
("uri", bool),
("timeout", float),
("isolation_level", str),
("detect_types", int),
("check_same_thread", bool),
("cached_statements", int),
]
opts = url.query
pysqlite_opts = {}
for key, type_ in pysqlite_args:
util.coerce_kw_type(opts, key, type_, dest=pysqlite_opts)
if pysqlite_opts.get("uri", False):
uri_opts = dict(opts)
# here, we are actually separating the parameters that go to
# sqlite3/pysqlite vs. those that go the SQLite URI. What if
# two names conflict? again, this seems to be not the case right
# now, and in the case that new names are added to
# either side which overlap, again the sqlite3/pysqlite parameters
# can be passed through connect_args instead of in the URL.
# If SQLite native URIs add a parameter like "timeout" that
# we already have listed here for the python driver, then we need
# to adjust for that here.
for key, type_ in pysqlite_args:
uri_opts.pop(key, None)
filename = url.database
if uri_opts:
# sorting of keys is for unit test support
filename += "?" + (
"&".join(
"%s=%s" % (key, uri_opts[key])
for key in sorted(uri_opts)
)
)
else:
filename = url.database or ":memory:"
if filename != ":memory:":
filename = os.path.abspath(filename)
return ([filename], pysqlite_opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.ProgrammingError
) and "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomLocationsOperations:
"""CustomLocationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.extendedlocation.v2021_08_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_operations(
self,
**kwargs: Any
) -> AsyncIterable["_models.CustomLocationOperationsList"]:
"""Lists all available Custom Locations operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomLocationOperationsList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocationOperationsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocationOperationsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_operations.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomLocationOperationsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.ExtendedLocation/operations'} # type: ignore
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.CustomLocationListResult"]:
"""Gets a list of Custom Locations in a subscription.
Gets a list of Custom Locations in the specified subscription. The operation returns properties
of each Custom Location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomLocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ExtendedLocation/customLocations'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CustomLocationListResult"]:
"""Gets a list of Custom Locations in the specified subscription and resource group.
Gets a list of Custom Locations in the specified subscription and resource group. The operation
returns properties of each Custom Location.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomLocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CustomLocation":
"""Gets a Custom Location.
Gets the details of the customLocation with a specified resource group and name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: Custom Locations name.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomLocation, or the result of cls(response)
:rtype: ~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.CustomLocation",
**kwargs: Any
) -> "_models.CustomLocation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CustomLocation')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CustomLocation', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CustomLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.CustomLocation",
**kwargs: Any
) -> AsyncLROPoller["_models.CustomLocation"]:
"""Creates or updates a Custom Location.
Creates or updates a Custom Location in the specified Subscription and Resource Group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: Custom Locations name.
:type resource_name: str
:param parameters: Parameters supplied to create or update a Custom Location.
:type parameters: ~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomLocation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CustomLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a Custom Location.
Deletes the Custom Location with the specified Resource Name, Resource Group, and Subscription
Id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: Custom Locations name.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}'} # type: ignore
async def update(
self,
resource_group_name: str,
resource_name: str,
identity: Optional["_models.Identity"] = None,
tags: Optional[Dict[str, str]] = None,
authentication: Optional["_models.CustomLocationPropertiesAuthentication"] = None,
cluster_extension_ids: Optional[List[str]] = None,
display_name: Optional[str] = None,
host_resource_id: Optional[str] = None,
host_type: Optional[Union[str, "_models.HostType"]] = None,
namespace: Optional[str] = None,
provisioning_state: Optional[str] = None,
**kwargs: Any
) -> "_models.CustomLocation":
"""Updates a Custom Location.
Updates a Custom Location with the specified Resource Name in the specified Resource Group and
Subscription.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: Custom Locations name.
:type resource_name: str
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.extendedlocation.v2021_08_15.models.Identity
:param tags: Resource tags.
:type tags: dict[str, str]
:param authentication: This is optional input that contains the authentication that should be
used to generate the namespace.
:type authentication: ~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocationPropertiesAuthentication
:param cluster_extension_ids: Contains the reference to the add-on that contains charts to
deploy CRDs and operators.
:type cluster_extension_ids: list[str]
:param display_name: Display name for the Custom Locations location.
:type display_name: str
:param host_resource_id: Connected Cluster or AKS Cluster. The Custom Locations RP will perform
a checkAccess API for listAdminCredentials permissions.
:type host_resource_id: str
:param host_type: Type of host the Custom Locations is referencing (Kubernetes, etc...).
:type host_type: str or ~azure.mgmt.extendedlocation.v2021_08_15.models.HostType
:param namespace: Kubernetes namespace that will be created on the specified cluster.
:type namespace: str
:param provisioning_state: Provisioning State for the Custom Location.
:type provisioning_state: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomLocation, or the result of cls(response)
:rtype: ~azure.mgmt.extendedlocation.v2021_08_15.models.CustomLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_parameters = _models.PatchableCustomLocations(identity=identity, tags=tags, authentication=authentication, cluster_extension_ids=cluster_extension_ids, display_name=display_name, host_resource_id=host_resource_id, host_type=host_type, namespace=namespace, provisioning_state=provisioning_state)
api_version = "2021-08-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'PatchableCustomLocations')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}'} # type: ignore
def list_enabled_resource_types(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.EnabledResourceTypesListResult"]:
"""Gets the list of Enabled Resource Types.
Gets the list of the Enabled Resource Types.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: Custom Locations name.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnabledResourceTypesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.extendedlocation.v2021_08_15.models.EnabledResourceTypesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnabledResourceTypesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_enabled_resource_types.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EnabledResourceTypesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_enabled_resource_types.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ExtendedLocation/customLocations/{resourceName}/enabledResourceTypes'} # type: ignore
|
|
import datetime
import functools
import json
import urllib.parse
import pytz
from django.conf import settings
from django.contrib.staticfiles import finders
from django.urls import reverse, reverse_lazy
from django.db import models
from django.utils.timezone import make_naive
from django.utils.translation import get_language, gettext, gettext_lazy as _
from core.models import (
BigForeignKey, EventInfo,
ConferenceRelated, DefaultConferenceManagerMixin,
)
from core.utils import format_html_lazy
from proposals.models import TalkProposal, TutorialProposal, PrimarySpeaker
from sponsors.models import Sponsor
MIDNIGHT_TIME = datetime.time(tzinfo=pytz.timezone('Asia/Taipei'))
EVENT_DATETIME_START_END = (
datetime.datetime.combine(
min(settings.EVENTS_DAY_NAMES.keys()),
MIDNIGHT_TIME,
),
datetime.datetime.combine(
max(settings.EVENTS_DAY_NAMES.keys()) + datetime.timedelta(days=1),
MIDNIGHT_TIME,
),
)
class TimeManager(models.Manager):
def get(self, value):
"""We only has one field, so let's make it available without keyword.
"""
return super().get(value=value)
class LimitedTimeManager(TimeManager):
def get_queryset(self):
"""Limit times to those in the current conference's time.
"""
qs = super().get_queryset()
return qs.filter(value__range=EVENT_DATETIME_START_END)
@functools.total_ordering
class Time(models.Model):
value = models.DateTimeField(
primary_key=True,
verbose_name=_('value'),
)
objects = LimitedTimeManager()
all_objects = TimeManager()
class Meta:
verbose_name = _('time')
verbose_name_plural = _('times')
ordering = ['value']
def __str__(self):
return str(make_naive(self.value))
def __lt__(self, other):
if not isinstance(other, Time):
return NotImplemented
if (not isinstance(self.value, datetime.datetime) or
not isinstance(other.value, datetime.datetime)):
return NotImplemented
return self.value < other.value
class Location:
"""All possible location combinations.
The numbering prefix helps to order events by locations. We need this
information when resolving events in the same time period.
Rules:
1. The R3 events are put first.
2. Belt and partial belt events are next, in that order.
3. Block events in R0-2 are next, in that order.
"""
R3 = '1-r3'
ALL = '2-all'
R012 = '3-r012'
R0 = '4-r0'
R1 = '5-r1'
R2 = '6-r2'
R4 = '7-r4'
OTHER = '8-oth'
@classmethod
def get_md_width(cls, value):
return {
'2-all': 4,
'3-r012': 3,
'4-r0': 1,
'5-r1': 1,
'6-r2': 1,
'1-r3': 1,
'7-r4': 1,
'8-oth': 1,
}[value]
EVENT_ROOMS = {Location.R0, Location.R1, Location.R2, Location.R3, Location.R4}
class BaseEvent(ConferenceRelated):
"""Base interface for all events in the schedule.
"""
LOCATION_CHOICES = [
(Location.ALL, _('All rooms')),
(Location.R012, _('R1, R2, R3')),
(Location.R0, _('R1')),
(Location.R1, _('R2')),
(Location.R2, _('R3')),
(Location.R3, _('Multifunction room')),
(Location.R4, _('Goodideas Studio')),
(Location.OTHER, _('Other')),
]
location = models.CharField(
max_length=6,
choices=LOCATION_CHOICES,
blank=True,
null=True,
db_index=True,
verbose_name=_('location'),
)
begin_time = models.ForeignKey(
to=Time,
blank=True,
null=True,
related_name='begined_%(class)s_set',
verbose_name=_('begin time'),
on_delete=models.CASCADE,
)
end_time = models.ForeignKey(
to=Time,
blank=True,
null=True,
related_name='ended_%(class)s_set',
verbose_name=_('end time'),
on_delete=models.CASCADE,
)
class Meta:
abstract = True
class CustomEvent(BaseEvent):
title = models.CharField(
verbose_name=_('title'),
max_length=140,
)
break_event = models.BooleanField(
verbose_name=_('is break event'),
default=False,
help_text=_(
"Whether this event is displays as a break. A break can be "
"visually distinguished from \"real\" conference sessions, such "
"as keynotes, talks, etc.",
)
)
description = models.TextField(
verbose_name=_('event description'), blank=True,
)
link_path = models.CharField(
verbose_name=_('link path'),
max_length=255, blank=True,
)
class Meta:
verbose_name = _('custom event')
verbose_name_plural = _('custom events')
def __str__(self):
return self.title
class KeynoteEvent(BaseEvent):
speaker_name = models.CharField(
verbose_name=_('speaker name'),
max_length=100,
)
slug = models.SlugField(
verbose_name=_('slug'),
help_text=format_html_lazy(
_("This is used to link to the speaker's introduction on the "
"Keynote page, e.g. 'liang2' will link to "
"'{link}#keynote-speaker-liang2'."),
link=reverse_lazy('page', kwargs={'path': 'conference/keynotes'}),
)
)
is_remote = models.BooleanField(
verbose_name=_('is remote'),
default=False,
)
class Meta:
verbose_name = _('keynote event')
verbose_name_plural = _('keynote events')
def __str__(self):
return gettext('Keynote: {speaker}'.format(
speaker=self.speaker_name,
))
def get_absolute_url(self):
url = reverse('page', kwargs={'path': 'conference/keynotes'})
split = urllib.parse.urlsplit(url)
frag = 'keynote-speaker-{slug}'.format(slug=self.slug)
return urllib.parse.urlunsplit(split._replace(fragment=frag))
def get_static_data(self):
path = '/'.join([
settings.CONFERENCE_DEFAULT_SLUG,
'assets/keynotes',
f'{self.slug}.json',
])
keynote_info = finders.find(path)
if not keynote_info:
raise FileNotFoundError(path)
with open(keynote_info) as f:
data = json.load(f)
return data
def get_static_data_for_locale(self, code=None):
if code is None:
code = get_language()
code = code.split('-', 1)[0]
data = self.get_static_data()
data = {k: v[code] if isinstance(v, dict) and code in v else v for k, v in data.items()}
return data
class JobListingsEvent(BaseEvent):
sponsor = BigForeignKey(
to=Sponsor,
verbose_name=_("sponsor"),
on_delete=models.CASCADE,
)
class Meta:
verbose_name = _('Job Listings')
verbose_name_plural = _('Job Listings')
def __str__(self):
return gettext('Open Role of Sponsor: {sponsor}'.format(
sponsor=self.sponsor,
))
class SponsoredEvent(EventInfo, BaseEvent):
host = BigForeignKey(
to=settings.AUTH_USER_MODEL,
verbose_name=_('host'),
on_delete=models.CASCADE,
)
slug = models.SlugField(
allow_unicode=True,
verbose_name=_('slug'),
)
class Meta:
verbose_name = _('sponsored event')
verbose_name_plural = _('sponsored events')
def get_absolute_url(self):
return reverse('events_sponsored_event_detail', kwargs={
'slug': self.slug,
})
@property
def speakers(self):
yield PrimarySpeaker(user=self.host)
class ProposedEventManager(DefaultConferenceManagerMixin, models.Manager):
proposal_attr = 'proposal'
conference_attr = 'proposal__conference'
def get_queryset(self):
"""We almost always need the proposal info, so let's always JOIN it.
"""
return super().get_queryset().select_related(self.proposal_attr)
class ProposedTalkEvent(BaseEvent):
proposal = BigForeignKey(
to=TalkProposal,
limit_choices_to={'accepted': True},
verbose_name=_('proposal'),
on_delete=models.CASCADE,
unique=True,
)
is_remote = models.BooleanField(
verbose_name=_('is remote'),
default=False,
)
objects = ProposedEventManager()
class Meta:
verbose_name = _('talk event')
verbose_name_plural = _('talk events')
def __str__(self):
return self.proposal.title
def get_absolute_url(self):
return reverse('events_talk_detail', kwargs={'pk': self.proposal.pk})
class ProposedTutorialEvent(BaseEvent):
proposal = BigForeignKey(
to=TutorialProposal,
verbose_name=_('proposal'),
on_delete=models.CASCADE,
unique=True,
)
registration_link = models.URLField(
verbose_name=_('registration link'),
blank=True,
default='',
)
is_remote = models.BooleanField(
verbose_name=_('is remote'),
default=False,
)
objects = ProposedEventManager()
class Meta:
verbose_name = _('tutorial event')
verbose_name_plural = _('tutorial events')
def __str__(self):
return self.proposal.title
def get_absolute_url(self):
return reverse('events_tutorial_detail', kwargs={
'pk': self.proposal.pk,
})
class Schedule(ConferenceRelated):
html = models.TextField(
verbose_name=_('HTML'),
)
created_at = models.DateTimeField(
verbose_name=_('created at'),
auto_now_add=True,
)
class Meta:
verbose_name = _('Schedule')
verbose_name_plural = _('Schedules')
ordering = ['-created_at']
get_latest_by = 'created_at'
def __str__(self):
return gettext('Schedule created at {}').format(self.created_at)
|
|
# -*- coding: utf-8 -*-
from io import open
import os.path
import jinja2
import pystache
from statik.errors import *
from statik.utils import *
from statik import templatetags
import logging
logger = logging.getLogger(__name__)
__all__ = [
'StatikTemplateEngine',
'StatikTemplate',
'StatikTemplateProvider',
'StatikJinjaTemplate',
'StatikJinjaTemplateProvider',
'StatikMustacheTemplateProvider',
'StatikMustacheTemplate',
'DEFAULT_TEMPLATE_PROVIDERS',
'SAFER_TEMPLATE_PROVIDERS'
]
# our default template engine providers, in order of precedence
DEFAULT_TEMPLATE_PROVIDERS = [
"jinja2",
"mustache"
]
# template providers that we consider to be "safe"
SAFER_TEMPLATE_PROVIDERS = [
"mustache"
]
TEMPLATE_PROVIDER_EXTS = {
"jinja2": [".html.jinja2", ".jinja2", ".html"],
"mustache": [".html.mustache", ".mustache", ".html"]
}
def get_template_provider_class(provider):
provider_classes = {
"jinja2": StatikJinjaTemplateProvider,
"mustache": StatikMustacheTemplateProvider
}
return provider_classes[provider]
def template_exception_handler(fn, error_context, filename=None):
"""Calls the given function, attempting to catch any template-related errors, and
converts the error to a Statik TemplateError instance. Returns the result returned
by the function itself."""
error_message = None
if filename:
error_context.update(filename=filename)
try:
return fn()
except jinja2.TemplateSyntaxError as exc:
error_context.update(filename=exc.filename, line_no=exc.lineno)
error_message = exc.message
except jinja2.TemplateError as exc:
error_message = exc.message
except Exception as exc:
error_message = "%s" % exc
raise TemplateError(message=error_message, context=error_context)
class StatikTemplateEngine(object):
"""Provides a common interface to different underlying template engines. At present,
Jinja2 and Mustache templates are supported."""
def __init__(self, project, error_context=None):
"""Constructor.
Args:
project: The project to which this template engine relates.
"""
self.project = project
self.error_context = error_context or StatikErrorContext()
self.supported_providers = project.config.template_providers
if project.safe_mode:
self.supported_providers = [provider for provider in self.supported_providers \
if provider in SAFER_TEMPLATE_PROVIDERS]
if len(self.supported_providers) == 0:
raise NoSupportedTemplateProvidersError(
SAFER_TEMPLATE_PROVIDERS if project.safe_mode else DEFAULT_TEMPLATE_PROVIDERS,
project.safe_mode
)
self.provider_classes = dict()
self.providers_by_ext = dict()
self.exts = []
for provider in self.supported_providers:
self.provider_classes[provider] = get_template_provider_class(provider)
# track which provider to use for which file extension
for ext in TEMPLATE_PROVIDER_EXTS[provider]:
if ext not in self.providers_by_ext:
self.providers_by_ext[ext] = provider
self.exts.append(ext)
self.providers = dict()
self.cached_templates = dict()
# build up our expected template paths
# we allow the templates/ folder to take highest precedence
self.template_paths = [os.path.join(project.path, project.TEMPLATES_DIR)]
# if this project has a theme associated with it
if project.config.theme is not None:
self.template_paths.append(os.path.join(
project.path,
project.THEMES_DIR,
project.config.theme,
project.TEMPLATES_DIR
))
logger.debug(
"Looking in the following path(s) (in the following order) for templates:\n%s",
"\n".join(self.template_paths)
)
# now make sure that all of the relevant template paths actually exist
for path in self.template_paths:
if not os.path.exists(path) or not os.path.isdir(path):
raise MissingProjectFolderError(path)
logger.debug(
"Configured the following template providers: %s",
", ".join(self.supported_providers)
)
def get_provider(self, name):
"""Allows for lazy instantiation of providers (Jinja2 templating is heavy, so only instantiate it if
necessary)."""
if name not in self.providers:
cls = self.provider_classes[name]
# instantiate the provider
self.providers[name] = cls(self)
return self.providers[name]
def find_template_details(self, name):
base_path = None
name_with_ext = name
found_ext = None
for ext in self.exts:
if name.endswith(ext):
found_ext = ext
if found_ext is None:
base_path, found_ext = find_first_file_with_ext(self.template_paths, name, self.exts)
if base_path is None or found_ext is None:
raise MissingTemplateError(name=name)
name_with_ext = "%s%s" % (name, found_ext)
return name_with_ext, self.providers_by_ext[found_ext], base_path
def load_template(self, name):
"""Attempts to load the relevant template from our templating system/environment.
Args:
name: The name of the template to load.
Return:
On success, a StatikTemplate object that can be used to render content.
"""
# hopefully speeds up loading of templates a little, especially when loaded multiple times
if name in self.cached_templates:
logger.debug("Using cached template: %s", name)
return self.cached_templates[name]
logger.debug("Attempting to find template by name: %s", name)
name_with_ext, provider_name, base_path = self.find_template_details(name)
full_path = None
if base_path is not None:
full_path = os.path.join(base_path, name_with_ext)
# load it with the relevant provider
template = template_exception_handler(
lambda: self.get_provider(provider_name).load_template(
name_with_ext,
full_path=full_path
),
self.error_context,
filename=full_path
)
# cache it for potential later use
self.cached_templates[name] = template
return template
def create_template(self, s, provider_name=None):
"""Creates a template from the given string based on the specified provider or the provider with
highest precedence.
Args:
s: The string to convert to a template.
provider_name: The name of the provider to use to create the template.
"""
if provider_name is None:
provider_name = self.supported_providers[0]
return template_exception_handler(
lambda: self.get_provider(provider_name).create_template(s),
self.error_context
)
class StatikTemplate(object):
"""Abstract class to act as an interface to the underlying templating engine's templates."""
def __init__(self, filename, error_context=None):
self.filename = filename
self.error_context = error_context or StatikErrorContext()
def render(self, context):
return template_exception_handler(
lambda: self.do_render(context),
self.error_context,
filename=self.filename
)
def do_render(self, context):
"""Renders this template using the given context data."""
raise NotImplementedError("Must be implemented in subclasses")
class StatikTemplateProvider(object):
"""Abstract base class for all template providers."""
def __init__(self, engine, error_context=None):
"""Constructor."""
if not isinstance(engine, StatikTemplateEngine):
raise TypeError(
"Expecting a StatikTemplateEngine instance to initialise template provider"
)
self.engine = engine
self.error_context = error_context or StatikErrorContext()
def load_template(self, name, full_path=None):
"""Loads the template with the given name/filename."""
raise NotImplementedError("Must be implemented in subclasses")
def create_template(self, s):
"""Creates a template from the given string."""
raise NotImplementedError("Must be implemented in subclasses")
class StatikJinjaTemplateProvider(StatikTemplateProvider):
"""Template provider specifically for Jinja2."""
expected_template_exts = TEMPLATE_PROVIDER_EXTS["jinja2"]
def __init__(self, engine):
"""Constructor.
Args:
engine: The StatikTemplateEngine to which this template provider belongs.
"""
super(StatikJinjaTemplateProvider, self).__init__(engine)
project = engine.project
logger.debug("Instantiating Jinja2 template provider")
# now load our template tags
self.templatetags_path = os.path.join(project.path, project.TEMPLATETAGS_DIR)
if os.path.exists(self.templatetags_path) and os.path.isdir(self.templatetags_path):
# dynamically import modules; they register themselves with our template tag store
import_python_modules_by_path(self.templatetags_path)
extensions = [
'statik.jinja2ext.StatikUrlExtension',
'statik.jinja2ext.StatikAssetExtension',
'statik.jinja2ext.StatikLoremIpsumExtension',
'statik.jinja2ext.StatikTemplateTagsExtension',
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_',
'jinja2.ext.autoescape',
]
jinja2_config = project.config.vars.get('jinja2', dict())
extensions.extend(jinja2_config.get('extensions', list()))
self.env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
engine.template_paths,
encoding=project.config.encoding
),
extensions=extensions
)
if templatetags.store.filters:
logger.debug(
"Loaded custom template tag filters: %s",
", ".join(templatetags.store.filters)
)
self.env.filters.update(templatetags.store.filters)
# configure views for the Jinja2 templating environment
self.env.statik_views = project.views
self.env.statik_base_url = project.config.base_path
self.env.statik_base_asset_url = add_url_path_component(
project.config.base_path,
project.config.assets_dest_path
)
def reattach_project_views(self):
if len(self.env.statik_views) == 0:
self.env.statik_views = self.engine.project.views
def load_template(self, name, full_path=None):
logger.debug("Attempting to load Jinja2 template: %s", name)
return StatikJinjaTemplate(self, self.env.get_template(name))
def create_template(self, s):
return StatikJinjaTemplate(self, self.env.from_string(s))
class StatikJinjaTemplate(StatikTemplate):
"""Wraps a simple Jinja2 template."""
def __init__(self, provider, template, **kwargs):
"""Constructor.
Args:
provider: The provider that created this template.
template: The Jinja2 template to wrap.
"""
super(StatikJinjaTemplate, self).__init__(template.filename, **kwargs)
self.provider = provider
self.template = template
def __repr__(self):
return "StatikJinjaTemplate(template=%s)" % self.template
def __str__(self):
return repr(self)
def do_render(self, context):
# make sure we lazily reattach our provider's environment to the project's views
self.provider.reattach_project_views()
return self.template.render(**context)
class StatikMustachePartialGetter(object):
def __init__(self, provider):
self.provider = provider
self.cache = dict()
def get(self, partial_name):
if partial_name in self.cache:
return self.cache[partial_name]
logger.debug("Attempting to load Mustache partial: %s", partial_name)
content = self.provider.load_template_content(partial_name)
self.cache[partial_name] = content
return content
class StatikMustacheTemplateProvider(StatikTemplateProvider):
"""Template provider specifically for Mustache templates."""
expected_template_exts = TEMPLATE_PROVIDER_EXTS["mustache"]
def __init__(self, engine, **kwargs):
super(StatikMustacheTemplateProvider, self).__init__(engine, **kwargs)
logger.debug("Instantiating Mustache template provider")
self.renderer = pystache.Renderer(partials=StatikMustachePartialGetter(self))
def load_template_content(self, name, full_path=None):
logger.debug("Attempting to load Mustache template: %s", name)
if full_path is None:
base_path, ext = find_first_file_with_ext(
self.engine.template_paths,
name,
self.expected_template_exts
)
if base_path is None or ext is None:
raise MissingTemplateError(
name=name,
kind="Mustache",
context=self.error_context
)
full_path = os.path.join(base_path, "%s%s" % (name, ext))
if not os.path.exists(full_path) or not os.path.isfile(full_path):
raise MissingTemplateError(
path=full_path,
kind="Mustache",
context=self.error_context
)
# read the template's content from the file
with open(full_path, encoding=self.engine.project.config.encoding) as f:
template_content = f.read()
return template_content
def load_template(self, name, full_path=None):
return self.create_template(
self.load_template_content(name, full_path=full_path),
filename=full_path
)
def create_template(self, s, filename=None):
return StatikMustacheTemplate(
pystache.parse(s),
self.renderer,
filename=filename
)
class StatikMustacheTemplate(StatikTemplate):
"""Wraps a simple Mustache template."""
def __init__(self, parsed_template, renderer, filename=None, error_context=None):
super(StatikMustacheTemplate, self).__init__(
filename,
error_context=error_context
)
self.parsed_template = parsed_template
self.renderer = renderer
def __repr__(self):
return "StatikMustacheTemplate(parsed_template=%s)" % self.parsed_template
def __str__(self):
return repr(self)
def do_render(self, context):
return self.renderer.render(self.parsed_template, context)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import keyword_only, since
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
__all__ = ["FPGrowth", "FPGrowthModel"]
class HasMinSupport(Params):
"""
Mixin for param minSupport.
"""
minSupport = Param(
Params._dummy(),
"minSupport",
"Minimal support level of the frequent pattern. [0.0, 1.0]. " +
"Any pattern that appears more than (minSupport * size-of-the-dataset) " +
"times will be output in the frequent itemsets.",
typeConverter=TypeConverters.toFloat)
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
class HasNumPartitions(Params):
"""
Mixin for param numPartitions: Number of partitions (at least 1) used by parallel FP-growth.
"""
numPartitions = Param(
Params._dummy(),
"numPartitions",
"Number of partitions (at least 1) used by parallel FP-growth. " +
"By default the param is not set, " +
"and partition number of the input dataset is used.",
typeConverter=TypeConverters.toInt)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
def getNumPartitions(self):
"""
Gets the value of :py:attr:`numPartitions` or its default value.
"""
return self.getOrDefault(self.numPartitions)
class HasMinConfidence(Params):
"""
Mixin for param minConfidence.
"""
minConfidence = Param(
Params._dummy(),
"minConfidence",
"Minimal confidence for generating Association Rule. [0.0, 1.0]. " +
"minConfidence will not affect the mining for frequent itemsets, " +
"but will affect the association rules generation.",
typeConverter=TypeConverters.toFloat)
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
def getMinConfidence(self):
"""
Gets the value of minConfidence or its default value.
"""
return self.getOrDefault(self.minConfidence)
class HasItemsCol(Params):
"""
Mixin for param itemsCol: items column name.
"""
itemsCol = Param(Params._dummy(), "itemsCol",
"items column name", typeConverter=TypeConverters.toString)
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
def getItemsCol(self):
"""
Gets the value of itemsCol or its default value.
"""
return self.getOrDefault(self.itemsCol)
class FPGrowthModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by FPGrowth.
.. versionadded:: 2.2.0
"""
@property
@since("2.2.0")
def freqItemsets(self):
"""
DataFrame with two columns:
* `items` - Itemset of the same type as the input column.
* `freq` - Frequency of the itemset (`LongType`).
"""
return self._call_java("freqItemsets")
@property
@since("2.2.0")
def associationRules(self):
"""
Data with three columns:
* `antecedent` - Array of the same type as the input column.
* `consequent` - Array of the same type as the input column.
* `confidence` - Confidence for the rule (`DoubleType`).
"""
return self._call_java("associationRules")
class FPGrowth(JavaEstimator, HasItemsCol, HasPredictionCol,
HasMinSupport, HasNumPartitions, HasMinConfidence,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
A parallel FP-growth algorithm to mine frequent itemsets. The algorithm is described in
Li et al., PFP: Parallel FP-Growth for Query Recommendation [LI2008]_.
PFP distributes computation in such a way that each worker executes an
independent group of mining tasks. The FP-Growth algorithm is described in
Han et al., Mining frequent patterns without candidate generation [HAN2000]_
.. [LI2008] http://dx.doi.org/10.1145/1454008.1454027
.. [HAN2000] http://dx.doi.org/10.1145/335191.335372
.. note:: null values in the feature column are ignored during fit().
.. note:: Internally `transform` `collects` and `broadcasts` association rules.
>>> from pyspark.sql.functions import split
>>> data = (spark.read
... .text("data/mllib/sample_fpgrowth.txt")
... .select(split("value", "\s+").alias("items")))
>>> data.show(truncate=False)
+------------------------+
|items |
+------------------------+
|[r, z, h, k, p] |
|[z, y, x, w, v, u, t, s]|
|[s, x, o, n, r] |
|[x, z, y, m, t, s, q, e]|
|[z] |
|[x, z, y, r, q, t, p] |
+------------------------+
>>> fp = FPGrowth(minSupport=0.2, minConfidence=0.7)
>>> fpm = fp.fit(data)
>>> fpm.freqItemsets.show(5)
+---------+----+
| items|freq|
+---------+----+
| [s]| 3|
| [s, x]| 3|
|[s, x, z]| 2|
| [s, z]| 2|
| [r]| 3|
+---------+----+
only showing top 5 rows
>>> fpm.associationRules.show(5)
+----------+----------+----------+
|antecedent|consequent|confidence|
+----------+----------+----------+
| [t, s]| [y]| 1.0|
| [t, s]| [x]| 1.0|
| [t, s]| [z]| 1.0|
| [p]| [r]| 1.0|
| [p]| [z]| 1.0|
+----------+----------+----------+
only showing top 5 rows
>>> new_data = spark.createDataFrame([(["t", "s"], )], ["items"])
>>> sorted(fpm.transform(new_data).first().prediction)
['x', 'y', 'z']
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
__init__(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
super(FPGrowth, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.FPGrowth", self.uid)
self._setDefault(minSupport=0.3, minConfidence=0.8,
itemsCol="items", predictionCol="prediction")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FPGrowthModel(java_model)
|
|
"""
Geographically weighted regression
"""
import numpy as np
from .gwr.base.gwr import GWR as PySAL_GWR
from .gwr.base.sel_bw import Sel_BW
import json
from crankshaft.analysis_data_provider import AnalysisDataProvider
import plpy
class GWR:
def __init__(self, data_provider=None):
if data_provider:
self.data_provider = data_provider
else:
self.data_provider = AnalysisDataProvider()
def gwr(self, subquery, dep_var, ind_vars,
bw=None, fixed=False, kernel='bisquare',
geom_col='the_geom', id_col='cartodb_id'):
"""
subquery: 'select * from demographics'
dep_var: 'pctbachelor'
ind_vars: ['intercept', 'pctpov', 'pctrural', 'pctblack']
bw: value of bandwidth, if None then select optimal
fixed: False (kNN) or True ('distance')
kernel: 'bisquare' (default), or 'exponential', 'gaussian'
"""
params = {'geom_col': geom_col,
'id_col': id_col,
'subquery': subquery,
'dep_var': dep_var,
'ind_vars': ind_vars}
# get data from data provider
query_result = self.data_provider.get_gwr(params)
# exit if data to analyze is empty
if len(query_result) == 0:
plpy.error('No data passed to analysis or independent variables '
'are all null-valued')
# unique ids and variable names list
rowid = np.array(query_result[0]['rowid'], dtype=np.int)
# x, y are centroids of input geometries
x = np.array(query_result[0]['x'], dtype=np.float)
y = np.array(query_result[0]['y'], dtype=np.float)
coords = list(zip(x, y))
# extract dependent variable
Y = np.array(query_result[0]['dep_var'], dtype=np.float).reshape((-1, 1))
n = Y.shape[0]
k = len(ind_vars)
X = np.zeros((n, k))
# extract query result
for attr in range(0, k):
attr_name = 'attr' + str(attr + 1)
X[:, attr] = np.array(
query_result[0][attr_name], dtype=np.float).flatten()
# add intercept variable name
ind_vars.insert(0, 'intercept')
# calculate bandwidth if none is supplied
if bw is None:
bw = Sel_BW(coords, Y, X,
fixed=fixed, kernel=kernel).search()
model = PySAL_GWR(coords, Y, X, bw,
fixed=fixed, kernel=kernel).fit()
# containers for outputs
coeffs = []
stand_errs = []
t_vals = []
filtered_t_vals = []
# extracted model information
c_alpha = model.adj_alpha
filtered_t = model.filter_tvals(c_alpha[1])
predicted = model.predy.flatten()
residuals = model.resid_response
r_squared = model.localR2.flatten()
bw = np.repeat(float(bw), n)
# create lists of json objs for model outputs
for idx in range(n):
coeffs.append(json.dumps({var: model.params[idx, k]
for k, var in enumerate(ind_vars)}))
stand_errs.append(json.dumps({var: model.bse[idx, k]
for k, var in enumerate(ind_vars)}))
t_vals.append(json.dumps({var: model.tvalues[idx, k]
for k, var in enumerate(ind_vars)}))
filtered_t_vals.append(
json.dumps({var: filtered_t[idx, k]
for k, var in enumerate(ind_vars)}))
return list(zip(coeffs, stand_errs, t_vals, filtered_t_vals,
predicted, residuals, r_squared, bw, rowid))
def gwr_predict(self, subquery, dep_var, ind_vars,
bw=None, fixed=False, kernel='bisquare',
geom_col='the_geom', id_col='cartodb_id'):
"""
subquery: 'select * from demographics'
dep_var: 'pctbachelor'
ind_vars: ['intercept', 'pctpov', 'pctrural', 'pctblack']
bw: value of bandwidth, if None then select optimal
fixed: False (kNN) or True ('distance')
kernel: 'bisquare' (default), or 'exponential', 'gaussian'
"""
params = {'geom_col': geom_col,
'id_col': id_col,
'subquery': subquery,
'dep_var': dep_var,
'ind_vars': ind_vars}
# get data from data provider
query_result = self.data_provider.get_gwr_predict(params)
# exit if data to analyze is empty
if len(query_result) == 0:
plpy.error('No data passed to analysis or independent variables '
'are all null-valued')
# unique ids and variable names list
rowid = np.array(query_result[0]['rowid'], dtype=np.int)
x = np.array(query_result[0]['x'], dtype=np.float)
y = np.array(query_result[0]['y'], dtype=np.float)
coords = np.array(list(zip(x, y)), dtype=np.float)
# extract dependent variable
Y = np.array(query_result[0]['dep_var']).reshape((-1, 1))
n = Y.shape[0]
k = len(ind_vars)
X = np.empty((n, k), dtype=np.float)
for attr in range(0, k):
attr_name = 'attr' + str(attr + 1)
X[:, attr] = np.array(
query_result[0][attr_name], dtype=np.float).flatten()
# add intercept variable name
ind_vars.insert(0, 'intercept')
# split data into "training" and "test" for predictions
# create index to split based on null y values
train = np.where(Y != np.array(None))[0]
test = np.where(Y == np.array(None))[0]
# report error if there is no data to predict
if len(test) < 1:
plpy.error('No rows flagged for prediction: verify that rows '
'denoting prediction locations have a dependent '
'variable value of `null`')
# split dependent variable (only need training which is non-Null's)
Y_train = Y[train].reshape((-1, 1))
Y_train = Y_train.astype(np.float)
# split coords
coords_train = coords[train]
coords_test = coords[test]
# split explanatory variables
X_train = X[train]
X_test = X[test]
# calculate bandwidth if none is supplied
if bw is None:
bw = Sel_BW(coords_train, Y_train, X_train,
fixed=fixed, kernel=kernel).search()
# estimate model and predict at new locations
model = PySAL_GWR(coords_train, Y_train, X_train,
bw, fixed=fixed,
kernel=kernel).predict(coords_test, X_test)
coeffs = []
stand_errs = []
t_vals = []
r_squared = model.localR2.flatten()
predicted = model.predy.flatten()
m = len(model.predy)
for idx in range(m):
coeffs.append(json.dumps({var: model.params[idx, k]
for k, var in enumerate(ind_vars)}))
stand_errs.append(json.dumps({var: model.bse[idx, k]
for k, var in enumerate(ind_vars)}))
t_vals.append(json.dumps({var: model.tvalues[idx, k]
for k, var in enumerate(ind_vars)}))
return list(zip(coeffs, stand_errs, t_vals,
r_squared, predicted, rowid[test]))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
functions for mathematical calculations:
* transfer matrix for quad, drift, undulator, chicane, etc.
.. Tong Zhang
.. Aug. 11, 2015
.. Revised: 2016-05-11 11:12:20 AM CST
.. 1: expand 2 x 2 transport matrice to 6 x 6
"""
import numpy as np
from functools import reduce
def funTransQuadF(k, s):
""" Focusing quad in X, defocusing in Y
:param k: k1, in [T/m]
:param s: width, in [m]
:return: 2x2 numpy array
"""
sqrtk = np.sqrt(complex(k))
a = np.cos(sqrtk * s)
b = np.sin(sqrtk * s) / sqrtk
c = -sqrtk * np.sin(sqrtk * s)
d = np.cos(sqrtk * s)
return np.matrix([[a.real, b.real], [c.real, d.real]], dtype=np.double)
def funTransQuadD(k, s):
""" Defocusing quad in X, focusing in Y
:param k: k1, in [T/m]
:param s: width, in [m]
:return: 2x2 numpy array
"""
sqrtk = np.sqrt(complex(k))
a = np.cosh(sqrtk * s)
b = np.sinh(sqrtk * s) / sqrtk
c = sqrtk * np.sinh(sqrtk * s)
d = np.cosh(sqrtk * s)
return np.matrix([[a.real, b.real], [c.real, d.real]], dtype=np.double)
def funTransDrift(s):
""" Drift space
:param s: drift length, in [m]
:return: 2x2 numpy array
"""
return np.matrix([[1, s], [0, 1]], dtype=np.double)
def funTransUnduV(k, s):
""" Planar undulator transport matrix in vertical direction
:param k: equivalent k1, in [T/m], i.e. natural focusing
:param s: horizontal width, in [m]
:return: 2x2 numpy array
"""
m = funTransQuadF(k, s)
return m
def funTransUnduH(s):
""" Planar undulator transport matrix in horizontal direction
:param s: horizontal width, in [m]
:return: 2x2 numpy array
"""
return np.matrix([[1, s], [0, 1]], dtype=np.double)
def funTransEdgeX(theta, rho):
""" Fringe matrix in X
:param theta: fringe angle, in [rad]
:param rho: bend radius, in [m]
:return: 2x2 numpy array
"""
return np.matrix([[1, 0], [np.tan(theta) / rho, 1]], dtype=np.double)
def funTransEdgeY(theta, rho):
""" Fringe matrix in Y
:param theta: fringe angle, in [rad]
:param rho: bend radius, in [m]
:return: 2x2 numpy array
"""
return np.matrix([[1, 0], [-np.tan(theta) / rho, 1]], dtype=np.double)
def funTransSectX(theta, rho):
""" Sector matrix in X
:param theta: bend angle, in [rad]
:param rho: bend radius, in [m]
:return: 2x2 numpy array
"""
return np.matrix([[np.cos(theta), rho * np.sin(theta)], [-np.sin(theta) / rho, np.cos(theta)]], dtype=np.double)
def funTransSectY(theta, rho):
""" Sector matrix in Y
:param theta: bend angle, in [rad]
:param rho: bend radius, in [m]
:return: 2x2 numpy array
"""
return np.matrix([[1, rho * theta], [0, 1]], dtype=np.double)
def funTransChica(imagl, idril, ibfield, gamma0, xoy='x'):
""" Chicane matrix, composed of four rbends, seperated by drifts
:param imagl: rbend width, in [m]
:param idril: drift length between two adjacent rbends, in [m]
:param ibfield: rbend magnetic strength, in [T]
:param gamma0: electron energy, gamma
:param xoy: ``'x'`` or ``'y'``, matrix in X or Y direction, ``'x'`` by default
:return: 2x2 numpy array
"""
m0 = 9.10938215e-31
e0 = 1.602176487e-19
c0 = 299792458
rho = np.sqrt(gamma0 ** 2 - 1) * m0 * c0 / ibfield / e0
theta = np.arcsin(imagl / rho)
ld = idril
mx = reduce(np.dot, [funTransDrift(idril),
funTransSectX(theta, rho), funTransEdgeX(theta, rho),
funTransDrift(ld),
funTransEdgeX(-theta, -rho), funTransSectX(-theta, -rho),
funTransDrift(ld),
funTransSectX(-theta, -rho), funTransEdgeX(-theta, -rho),
funTransDrift(ld),
funTransEdgeX(theta, rho), funTransSectX(theta, rho),
funTransDrift(idril)])
my = reduce(np.dot, [funTransDrift(idril),
funTransSectY(theta, rho), funTransEdgeY(theta, rho),
funTransDrift(ld),
funTransEdgeY(-theta, -rho), funTransSectY(-theta, -rho),
funTransDrift(ld),
funTransSectY(-theta, -rho), funTransEdgeY(-theta, -rho),
funTransDrift(ld),
funTransEdgeY(theta, rho), funTransSectY(theta, rho),
funTransDrift(idril)])
if xoy == 'x':
m = mx
else:
m = my
return m
# 6 x 6 transport matrice
def transDrift(length=0.0, gamma=None):
""" Transport matrix of drift
:param length: drift length in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
m = np.eye(6, 6, dtype=np.float64)
if length == 0.0:
print("warning: 'length' should be a positive float number.")
elif gamma is not None and gamma != 0.0:
m[0, 1] = m[2, 3] = length
m[4, 5] = float(length) / gamma / gamma
else:
print("warning: 'gamma' should be a positive float number.")
return m
def transQuad(length=0.0, k1=0.0, gamma=None):
""" Transport matrix of quadrupole
:param length: quad width in [m]
:param k1: quad k1 strength in [T/m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
m = np.eye(6, 6, dtype=np.float64)
if length == 0.0:
print("warning: 'length' should be a positive float number.")
elif gamma is not None and gamma != 0.0:
if k1 == 0:
print("warning: 'k1' should be a positive float number.")
m[0, 1] = m[2, 3] = 1.0
m[4, 5] = float(length) / gamma / gamma
else:
sqrtk = np.sqrt(complex(k1))
sqrtkl = sqrtk * length
m[0, 0] = m[1, 1] = (np.cos(sqrtkl)).real
m[0, 1] = (np.sin(sqrtkl) / sqrtk).real
m[1, 0] = (-np.sin(sqrtkl) * sqrtk).real
m[2, 2] = m[3, 3] = (np.cosh(sqrtkl)).real
m[2, 3] = (np.sinh(sqrtkl) / sqrtk).real
m[3, 2] = (-np.sinh(sqrtkl) * sqrtk).real
m[4, 5] = float(length) / gamma / gamma
else:
print("warning: 'gamma' should be a positive float number.")
return m
def transSect(theta=None, rho=None, gamma=None):
""" Transport matrix of sector dipole
:param theta: bending angle in [RAD]
:param rho: bending radius in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
m = np.eye(6, 6, dtype=np.float64)
if None in (theta, rho, gamma):
print("warning: 'theta', 'rho', 'gamma' should be positive float numbers.")
return m
else:
rc = rho * np.cos(theta)
rs = rho * np.sin(theta)
m[0, 0] = m[1, 1] = rc / rho
m[0, 1] = rs
m[0, 5] = rho - rc
m[1, 0] = -np.sin(theta) / rho
m[1, 5] = rs / rho
m[2, 3] = rho * np.sin(theta)
m[4, 0] = m[1, 5]
m[4, 1] = m[0, 5]
m[4, 5] = rho * np.sin(theta) / gamma / gamma - rho * theta + rs
return m
def transRbend(theta=None, rho=None, gamma=None, incsym=-1):
""" Transport matrix of rectangle dipole
:param theta: bending angle in [RAD]
:param incsym: incident symmetry, -1 by default,
available options:
* -1: left half symmetry,
* 0: full symmetry,
* 1: right half symmetry
:param rho: bending radius in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
if None in (theta, rho, gamma):
print("warning: 'theta', 'rho', 'gamma' should be positive float numbers.")
m = np.eye(6, 6, dtype=np.float64)
return m
else:
beta12d = {'-1': (0, theta), '0': (theta * 0.5, theta * 0.5), '1': (theta, 0)}
(beta1, beta2) = beta12d[str(incsym)]
mf1 = transFringe(beta=beta1, rho=rho)
mf2 = transFringe(beta=beta2, rho=rho)
ms = transSect(theta=theta, rho=rho, gamma=gamma)
m = reduce(np.dot, [mf1, ms, mf2])
return m
def transFringe(beta=None, rho=None):
""" Transport matrix of fringe field
:param beta: angle of rotation of pole-face in [RAD]
:param rho: bending radius in [m]
:return: 6x6 numpy array
"""
m = np.eye(6, 6, dtype=np.float64)
if None in (beta, rho):
print("warning: 'theta', 'rho' should be positive float numbers.")
return m
else:
m[1, 0] = np.tan(beta) / rho
m[3, 2] = -np.tan(beta) / rho
return m
def transChicane(bend_length=None, bend_field=None, drift_length=None, gamma=None):
""" Transport matrix of chicane
composed of four rbends and three drifts between them
:param bend_length: rbend width in [m]
:param bend_field: rbend magnetic field in [T]
:param drift_length: drift length, list or tuple of three elements, in [m]
single float number stands for same length for three drifts
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
if None in (bend_length, bend_field, drift_length, gamma):
print("warning: 'bend_length', 'bend_field', 'drift_length', 'gamma' should be positive float numbers.")
m = np.eye(6, 6, dtype=np.float64)
return m
else:
if isinstance(drift_length, tuple) or isinstance(drift_length, list):
if len(drift_length) == 1:
dflist = drift_length * 3
elif len(drift_length) == 2:
dflist = []
dflist.extend(drift_length)
dflist.append(drift_length[0])
elif len(drift_length) >= 3:
dflist = drift_length[0:3]
if dflist[0] != dflist[-1]:
print("warning: chicane is not symmetric.")
else:
print("drift_length is not a valid list or tuple.")
else:
dflist = []
dflist.extend([drift_length, drift_length, drift_length])
m0 = 9.10938215e-31
e0 = 1.602176487e-19
c0 = 299792458.0
rho = np.sqrt(gamma ** 2 - 1) * m0 * c0 / bend_field / e0
theta = np.arcsin(bend_length / rho)
m_rb_1 = transRbend(theta, rho, gamma, -1)
m_rb_2 = transRbend(-theta, -rho, gamma, 1)
m_rb_3 = transRbend(-theta, -rho, gamma, -1)
m_rb_4 = transRbend(theta, rho, gamma, 1)
m_df_12 = transDrift(dflist[0], gamma)
m_df_23 = transDrift(dflist[1], gamma)
m_df_34 = transDrift(dflist[2], gamma)
m = reduce(np.dot, [m_rb_1, m_df_12, m_rb_2, m_df_23, m_rb_3, m_df_34, m_rb_4])
return m
class Chicane(object):
""" Chicane class
transport configuration of a chicane, comprising of four dipole with three drift sections
.. warning:: it's better to issue ``getMatrix()`` before ``getR()``, ``getAngle()``
:param bend_length: bend length, [m]
:param bend_field: bend field, [T]
:param drift_length: drift length [m], list: [1,2,1], [1], [1,2], 1
:param gamma: electron energy, gamma value
"""
def __init__(self, bend_length=None, bend_field=None, drift_length=None, gamma=None):
self.transM = np.eye(6, 6, dtype=np.float64)
self.setParams(bend_length, bend_field, drift_length, gamma)
self.mflag = True # if calculate m or return eye matrix
self.refresh = False # refresh or not
def setParams(self, bend_length, bend_field, drift_length, gamma):
""" set chicane parameters
:param bend_length: bend length, [m]
:param bend_field: bend field, [T]
:param drift_length: drift length, [m], list
:param gamma: electron energy, gamma
:return: None
"""
if None in (bend_length, bend_field, drift_length, gamma):
print("warning: 'bend_length', 'bend_field', 'drift_length', 'gamma' should be positive float numbers.")
self.mflag = False
else:
self._setDriftList(drift_length)
self.gamma = gamma
self.bend_length = bend_length
self.bend_field = bend_field
def _setDriftList(self, drift_length):
""" set drift length list of three elements
:param drift_length: input drift_length in [m], single float, or list/tuple of float numbers
"""
if isinstance(drift_length, tuple) or isinstance(drift_length, list):
if len(drift_length) == 1:
self.dflist = drift_length * 3
elif len(drift_length) == 2:
self.dflist = []
self.dflist.extend(drift_length)
self.dflist.append(drift_length[0])
elif len(drift_length) >= 3:
self.dflist = drift_length[0:3]
if self.dflist[0] != self.dflist[-1]:
print("warning: chicane is not symmetric.")
else:
print("drift_length is not a valid list or tuple.")
self.mflag = False
else:
self.dflist = []
self.dflist.extend([drift_length, drift_length, drift_length])
def getMatrix(self):
""" get transport matrix with ``mflag`` flag,
if ``mflag`` is True, return calculated matrix, else return unity matrix
:return: transport matrix
"""
if self.mflag:
m0 = 9.10938215e-31
e0 = 1.602176487e-19
c0 = 299792458.0
rho = np.sqrt(self.gamma ** 2 - 1) * m0 * c0 / self.bend_field / e0
theta = np.arcsin(self.bend_length / rho)
self.bangle = theta
m_rb_1 = transRbend(theta, rho, self.gamma, -1)
m_rb_2 = transRbend(-theta, -rho, self.gamma, 1)
m_rb_3 = transRbend(-theta, -rho, self.gamma, -1)
m_rb_4 = transRbend(theta, rho, self.gamma, 1)
m_df_12 = transDrift(self.dflist[0], self.gamma)
m_df_23 = transDrift(self.dflist[1], self.gamma)
m_df_34 = transDrift(self.dflist[2], self.gamma)
self.transM = reduce(np.dot, [m_rb_1, m_df_12, m_rb_2, m_df_23, m_rb_3, m_df_34, m_rb_4])
return self.transM
def getAngle(self, mode='deg'):
""" return bend angle
:param mode: 'deg' or 'rad'
:return: deflecting angle in RAD
"""
if self.refresh is True:
self.getMatrix()
try:
if self.mflag:
if mode == 'deg':
return self.bangle / np.pi * 180
else: # rad
return self.bangle
else:
return 0
except AttributeError:
print("Please execute getMatrix() first.")
def getR(self, i=5, j=6):
""" return transport matrix element, indexed by i, j,
be default, return dispersion value, i.e. getR(5,6) in [m]
:param i: row index, with initial index of 1
:param j: col indx, with initial index of 1
:return: transport matrix element
"""
if self.refresh is True:
self.getMatrix()
return self.transM[i - 1, j - 1]
def setBendLength(self, x):
""" set bend length
:param x: new bend length to be assigned, [m]
:return: None
"""
if x != self.bend_length:
self.bend_length = x
self.refresh = True
def getBendLength(self):
"""
:return: bend length
"""
return self.bend_length
def setBendField(self, x):
""" set bend magnetic field
:param x: new bend field to be assigned, [T]
:return: None
"""
if x != self.bend_field:
self.bend_field = x
self.refresh = True
def getBendField(self):
"""
:return: bend magnetic field
"""
return self.bend_field
def setDriftLength(self, x):
""" set lengths for drift sections
:param x: single double or list
:return: None
:Example:
>>> import beamline
>>> chi = beamline.mathutils.Chicane(bend_length=1,bend_field=0.5,drift_length=1,gamma=1000)
>>> chi.getMatrix()
>>> r56 = chi.getR(5,6) # r56 = -0.432
>>> chi.setDriftLength([2,4,2])
>>> # same effect (to R56) as ``chi.setDriftLength([2,4])`` or ``chi.setDriftLength([2])``
>>> # or ``chi.setDriftLength(2)``
>>> r56 = chi.getR(5,6) # r56 = -0.620
"""
if x != self.getDriftLength():
self._setDriftList(x)
self.refresh = True
def getDriftLength(self):
"""
:return: drift lengths list
"""
return self.dflist
def setGamma(self, x):
""" set electron energy, gamma value
:param x: new energy, gamma value
:return: None
"""
if x != self.gamma:
self.gamma = x
self.refresh = True
def getGamma(self):
"""
:return: gamma value
"""
return self.gamma
def test():
k = -10
s = 1
theta = 2
rho = 9
imagl = 0.5
idril = 1.0
ibfield = 0.8
gamma0 = 500
xoy = 'y'
f1 = funTransQuadF(k, s)
f2 = funTransQuadD(k, s)
f3 = funTransUnduV(k, s)
f4 = funTransChica(imagl, idril, ibfield, gamma0, xoy)
print(f1)
print(f2)
print(f3)
print("-" * 40)
print(f1.dot(f2).dot(f3))
print(reduce(np.dot, [f1, f2, f3]))
print("-" * 40)
print(f4)
print("-" * 40)
print(funTransQuadF(k, s) - funTransQuadD(-k, s))
if __name__ == "__main__":
test()
|
|
#!/usr/bin/env python3
'''
Source code metrics for C programs
Copyright 2013-2018 RIKEN
Copyright 2018-2020 Chiba Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'Masatomo Hashimoto <[email protected]>'
import pathsetup
import dp
import sparql
from factutils.entity import SourceCodeEntity
from virtuoso import VIRTUOSO_PW, VIRTUOSO_PORT
from sourcecode_metrics_for_survey_base import get_lver, get_proj_list, ftbl_list_to_orange, MetricsBase
from metrics_queries_cpp import QUERY_TBL
FOP_TBL = { # number of FP operations (for SPARC64 VIIIfx)
'nint' : 2,
'jnint' : 2,
'cos' : 29,
'dcos' : 31,
'exp' : 19,
'dexp' : 23,
'log' : 19,
'alog' : 19,
'dlog' : 23,
'mod' : 8,
'amod' : 8,
'dmod' : 8,
'sign' : 2,
'dsign' : 2,
'sin' : 29,
'dsin' : 31,
'sqrt' : 11,
'dsqrt' : 21,
'tan' : 58,
'dtan' : 64,
}
FOP_TBL_DBL_EXTRA = {
'cos' : 2,
'exp' : 4,
'log' : 4,
'sin' : 2,
'sqrt' : 10,
'tan' : 6,
}
FOP_TBL_VA = {
'max' : lambda n: n-1,
'amax1' : lambda n: n-1,
'dmax1' : lambda n: n-1,
'min' : lambda n: n-1,
'amin1' : lambda n: n-1,
'dmin1' : lambda n: n-1,
}
LINES_OF_CODE = 'lines_of_code'
MAX_LOOP_DEPTH = 'max_loop_depth'
MAX_FUSIBLE_LOOPS = 'max_fusible_loops'
MAX_MERGEABLE_ARRAYS = 'max_mergeable_arrays'
MAX_ARRAY_RANK = 'max_array_rank'
MAX_LOOP_LEVEL = 'max_loop_level'
N_BRANCHES = 'branches'
N_STMTS = 'stmts'
N_FP_OPS = 'fp_ops'
N_OPS = 'ops'
N_CALLS = 'calls'
N_A_REFS = ['array_refs0','array_refs1','array_refs2']
N_IND_A_REFS = ['indirect_array_refs0','indirect_array_refs1','indirect_array_refs2']
N_DBL_A_REFS = ['dbl_array_refs0','dbl_array_refs1','dbl_array_refs2']
BF = ['bf0','bf1','bf2']
META_KEYS = ['proj', 'ver', 'path', 'sub', 'lnum', 'digest']
abbrv_tbl = {
LINES_OF_CODE : 'LOC',
MAX_LOOP_DEPTH : 'LpD',
MAX_FUSIBLE_LOOPS : 'FLp',
MAX_MERGEABLE_ARRAYS : 'MA',
MAX_ARRAY_RANK : 'ARk',
MAX_LOOP_LEVEL : 'LLv',
N_BRANCHES : 'Br',
N_STMTS : 'St',
N_FP_OPS : 'FOp',
N_OPS : 'Op',
N_CALLS : 'Ca',
N_A_REFS[0] : 'AR0',
N_IND_A_REFS[0] : 'IAR0',
N_DBL_A_REFS[0] : 'DAR0',
N_A_REFS[1] : 'AR1',
N_IND_A_REFS[1] : 'IAR1',
N_DBL_A_REFS[1] : 'DAR1',
N_A_REFS[2] : 'AR2',
N_IND_A_REFS[2] : 'IAR2',
N_DBL_A_REFS[2] : 'DAR2',
BF[0] : 'BF0',
BF[1] : 'BF1',
BF[2] : 'BF2',
}
###
def count_aas(aas):
c = 0
for aa in aas:
if aa.startswith(','):
c += 2
else:
c += 1
return c
def get_nfops(name, nargs, double=False):
nfop = 1
try:
nfop = FOP_TBL_VA[name](nargs)
except KeyError:
nfop = FOP_TBL.get(name, 1)
if double:
nfop += FOP_TBL_DBL_EXTRA.get(name, 0)
prec = 's'
if double:
prec = 'd'
dp.debug('%s{%s}(%d) --> %d' % (name, prec, nargs, nfop))
return nfop
def make_feature_tbl():
v = { 'meta' : {'proj' : '',
'ver' : '',
'path' : '',
'sub' : '',
'lnum' : '',
},
BF[0] : 0.0,
BF[1] : 0.0,
BF[2] : 0.0,
N_FP_OPS : 0,
N_OPS : 0,
N_A_REFS[0] : 0,
N_IND_A_REFS[0] : 0,
N_DBL_A_REFS[0] : 0,
N_A_REFS[1] : 0,
N_IND_A_REFS[1] : 0,
N_DBL_A_REFS[1] : 0,
N_A_REFS[2] : 0,
N_IND_A_REFS[2] : 0,
N_DBL_A_REFS[2] : 0,
N_BRANCHES : 0,
N_STMTS : 0,
N_CALLS : 0,
LINES_OF_CODE : 0,
MAX_LOOP_LEVEL : 0,
MAX_ARRAY_RANK : 0,
MAX_LOOP_DEPTH : 0,
MAX_FUSIBLE_LOOPS : 0,
MAX_MERGEABLE_ARRAYS : 0,
}
return v
def ftbl_to_string(ftbl):
meta_str = '%(proj)s:%(ver)s:%(path)s:%(sub)s:%(lnum)s' % ftbl['meta']
cpy = ftbl.copy()
cpy['meta'] = meta_str
ks = ftbl.keys()
ks.remove('meta')
fmt = '%(meta)s ('
fmt += ','.join(['%s:%%(%s)s' % (abbrv_tbl[k], k) for k in ks])
fmt += ')'
s = fmt % cpy
return s
class Metrics(MetricsBase):
def __init__(self, proj_id, method='odbc',
pw=VIRTUOSO_PW, port=VIRTUOSO_PORT):
MetricsBase.__init__(self, proj_id, method, pw, port)
def find_ftbl(self, key):
md = self.get_metadata(key)
fn = md['fn']
digest = md['digest']
(ver, path, lnum) = key
ftbl = make_feature_tbl()
ftbl['meta'] = {
'proj' : self._proj_id,
'ver' : ver,
'path' : path,
'fn' : fn,
'lnum' : str(lnum),
'digest' : digest,
}
fop = self.get_value(N_FP_OPS, key)
if fop > 0:
for lv in range(3):
if BF[lv] in ftbl:
aa = self.get_value(N_A_REFS[lv], key)
daa = self.get_value(N_DBL_A_REFS[lv], key)
saa = aa - daa
bf = float(saa * 4 + daa * 8) / float(fop)
print('!!! {} -> fop={} aa[{}]={} daa[{}]={} bf[{}]={}'.format(key, fop, lv, aa, lv, daa, lv, bf))
ftbl[BF[lv]] = bf
for item in ftbl.keys():
try:
ftbl[item] = self._result_tbl[item][key]
except KeyError:
pass
return ftbl
def key_to_string(self, key):
(ver, loc, fn, loop, vname) = key
e = SourceCodeEntity(uri=loop)
lnum = e.get_range().get_start_line()
s = '%s:%s:%s:%s' % (ver, loc, fn, lnum)
return s
def set_metrics(self, name, _key, value, add=False):
#print('!!! set_metrics: name={} key={} value={} add={}'.format(name, _key, value, add))
(ver, loc, fn, loop, vname) = _key
ent = SourceCodeEntity(uri=loop)
lnum = ent.get_range().get_start_line()
key = (ver, loc, str(lnum))
key_str = '%s:%s:%s' % key
self.debug('%s(%s): %s -> %s' % (self.key_to_string(_key), key_str, name, value))
loop_d = self.get_loop_digest(_key)
self._metadata_tbl[key] = {'fn':fn,'digest':loop_d}
try:
tbl = self._result_tbl[name]
except KeyError:
tbl = {}
self._result_tbl[name] = tbl
if add:
v = tbl.get(key, 0)
tbl[key] = v + value
else:
tbl[key] = value
def finalize_ipp(self):
self.message('finalizing call graph...')
query = QUERY_TBL['fd_fd'] % { 'proj' : self._graph_uri }
for qvs, row in self._sparql.query(query):
callee = row['callee']
fd = row['fd']
self.ipp_add(callee, fd)
query = QUERY_TBL['loop_fd'] % { 'proj' : self._graph_uri }
for qvs, row in self._sparql.query(query):
callee = row['callee']
loop = row['loop']
self.ipp_add(callee, loop, is_loop=True)
def build_tree(self, f=None):
query = QUERY_TBL['loop_loop'] % { 'proj' : self._graph_uri }
children_tbl = {}
parent_tbl = {}
for qvs, row in self._sparql.query(query):
ver = row['ver']
loc = row['loc']
fn = row.get('fn', '')
loop = row['loop']
loop_d = row['loop_d']
vname = ''
child_loop = row.get('child_loop', None)
child_loop_d = row.get('child_loop_d', '')
child_vname = ''
lver = get_lver(ver)
key = (lver, loc, fn, loop, vname)
self.set_loop_digest(key, loop_d)
if f:
f(key, row)
try:
child_loops = children_tbl[key]
except KeyError:
child_loops = []
children_tbl[key] = child_loops
if child_loop:
child_key = (lver, loc, fn, child_loop, child_vname)
self.set_loop_digest(child_key, child_loop_d)
if child_key not in child_loops:
child_loops.append(child_key)
parent_tbl[child_key] = key
self.ipp_add(child_loop, loop, is_loop=True)
roots = []
for k in children_tbl.keys():
if k not in parent_tbl:
roots.append(k)
r = SourceCodeEntity(uri=self.get_loop_of_key(k)).get_range()
lines = r.get_end_line() - r.get_start_line() + 1
self.set_metrics(LINES_OF_CODE, k, lines)
self.message('%d top loops found' % len(roots))
tree = {'children':children_tbl,'parent':parent_tbl,'roots':roots}
self.set_tree(tree)
return tree
def get_key(self, row):
ver = row['ver']
loc = row['loc']
fn = row.get('fn', '')
loop = row['loop']
vname = ''
lver = get_lver(ver)
key = (lver, loc, fn, loop, vname)
return key
def calc_array_metrics(self):
self.message('calculating array metrics...')
try:
query = QUERY_TBL['arrays'] % { 'proj' : self._graph_uri }
tbl = {}
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
array = row['dtor']
tyc = row['tyc']
rank = int(row['rank'])
try:
arrays = tbl[key]
except KeyError:
arrays = []
tbl[key] = arrays
arrays.append((array, (tyc, rank)))
def get(key):
arrays = tbl.get(key, [])
max_rank = 0
t = {}
for (a, spec) in arrays:
(tyc, rank) = spec
if rank > max_rank:
max_rank = rank
try:
t[spec] += 1
except KeyError:
t[spec] = 1
max_mergeable_arrays = 0
for spec in t.keys():
if t[spec] > max_mergeable_arrays:
max_mergeable_arrays = t[spec]
return {'max_rank':max_rank, 'max_mergeable_arrays':max_mergeable_arrays}
tree = self.get_tree()
for key in tree['roots']:
data = {'max_rank':0, 'max_mergeable_arrays':0}
def f(k):
d = get(k)
if d['max_rank'] > data['max_rank']:
data['max_rank'] = d['max_rank']
if d['max_mergeable_arrays'] > data['max_mergeable_arrays']:
data['max_mergeable_arrays'] = d['max_mergeable_arrays']
self.iter_tree(tree, key, f)
self.debug('key=%s' % (self.key_to_string(key)))
self.debug('max_mergeable_arrays=%(max_mergeable_arrays)d max_rank=%(max_rank)d' % data)
self.set_metrics(MAX_MERGEABLE_ARRAYS, key, data['max_mergeable_arrays'])
self.set_metrics(MAX_ARRAY_RANK, key, data['max_rank'])
except KeyError:
pass
self.message('done.')
def calc_in_loop_metrics(self):
self.message('calculating other in_loop metrics...')
try:
query = QUERY_TBL['in_loop'] % { 'proj' : self._graph_uri }
def make_data():
return { 'nbr' : 0,
'nes' : 0,
'nop' : 0,
'nc' : 0,
}
tbl = {}
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
data = make_data()
data['nbr'] = int(row['nbr'] or '0')
data['nes'] = int(row['nes'] or '0')
data['nop'] = int(row['nop'] or '0')
data['nc'] = int(row['nc'] or '0')
tbl[key] = data
fd = row['fd']
if fd:
self.ipp_add(row['loop'], fd)
tree = self.get_tree()
for key in tree['roots']:
data = make_data()
def f(k):
d = tbl.get(k, None)
if d:
data['nbr'] += d['nbr']
data['nes'] += d['nes']
data['nop'] += d['nop']
data['nc'] += d['nc']
self.iter_tree(tree, key, f)
self.set_metrics(N_BRANCHES, key, data['nbr'])
self.set_metrics(N_STMTS, key, data['nes'])
self.set_metrics(N_OPS, key, data['nop'])
self.set_metrics(N_CALLS, key, data['nc'])
except KeyError:
raise
self.message('done.')
# end of calc_in_loop_metrics
def calc_aref_in_loop_metrics(self, lv): # level: 0, 1, 2
self.message('calculating other aref_in_loop metrics (lv=%d)...' % lv)
try:
if lv == 0:
qtbl = QUERY_TBL['aref0_in_loop']
elif lv == 1 or lv == 2:
qtbl = QUERY_TBL['aref12_in_loop']
else:
self.warning('illegal level: %d' % lv)
return
tbl = {}
kinds = ['aa','iaa','daa']
def make_data():
d = {}
for k in kinds:
d[k] = set()
return d
for kind in kinds:
query = qtbl[kind] % {'proj':self._graph_uri,'level':lv}
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
sig = row.get('sig')
if sig:
try:
data = tbl[key]
except KeyError:
data = make_data()
tbl[key] = data
data[kind].add(sig)
tree = self.get_tree()
for key in tree['roots']:
data = make_data()
def f(k):
d = tbl.get(k, None)
if d:
for kind in kinds:
data[kind] |= d.get(kind, set())
self.iter_tree(tree, key, f)
self.set_metrics(N_A_REFS[lv], key, count_aas(data['aa']))
self.set_metrics(N_IND_A_REFS[lv], key, count_aas(data['iaa']))
self.set_metrics(N_DBL_A_REFS[lv], key, count_aas(data['daa']))
except KeyError:
raise
self.message('done.')
# end of calc_aref_in_loop_metrics
def calc_fop_in_loop_metrics(self):
self.message('calculating fop metrics...')
try:
query = QUERY_TBL['fop_in_loop'] % { 'proj' : self._graph_uri }
def make_data():
return {
'nfop' : 0,
}
tbl = {}
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
data = make_data()
data['nfop'] = int(row['nfop'] or '0')
tbl[key] = data
fd = row['fd']
if fd:
self.ipp_add(row['loop'], fd)
tree = self.get_tree()
for key in tree['roots']:
data = make_data()
def f(k):
d = tbl.get(k, None)
if d:
data['nfop'] += d['nfop']
self.iter_tree(tree, key, f)
self.set_metrics(N_FP_OPS, key, data['nfop'])
except KeyError:
raise
self.message('done.')
# end of calc_fop_in_loop_metrics
def calc_ffr_in_loop_metrics(self):
self.message('calculating ffr metrics...')
try:
query = QUERY_TBL['ffr_in_loop'] % { 'proj' : self._graph_uri }
tbl = {} # key -> hash -> fname * nargs * is_dbl
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
try:
fref_tbl = tbl[key] # hash -> fname * nargs * is_dbl
except KeyError:
fref_tbl = {}
tbl[key] = fref_tbl
h = row['h']
fname = row['fname']
nargs = row['nargs']
fref_tbl[h] = (fname, nargs, False)
fd = row['fd']
if fd:
self.ipp_add(row['loop'], fd)
#
query = QUERY_TBL['dfr_in_loop'] % { 'proj' : self._graph_uri }
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
fref_tbl = tbl.get(key, None)
if fref_tbl:
h = row['h']
fname = row['fname']
try:
(fn, na, b) = fref_tbl[h]
if fn == fname:
fref_tbl[h] = (fn, na, True)
else:
self.warning('function name mismatch (%s != %s)' % (fname, fn))
except KeyError:
self.warning('reference of %s not found (hash=%s)' % (fname, h))
#
tree = self.get_tree()
def make_data():
return {
'nfop' : 0,
}
for key in tree['roots']:
data = make_data()
def f(k):
fref_tbl = tbl.get(k, None)
if fref_tbl:
for (h, (fn, na, dbl)) in fref_tbl.items():
data['nfop'] += get_nfops(fn, na, double=dbl)
self.iter_tree(tree, key, f)
self.set_metrics(N_FP_OPS, key, data['nfop'], add=True)
except KeyError:
raise
self.message('done.')
# end of calc_ffr_in_loop_metrics
def filter_results(self):
self.message('filtering results...')
to_be_removed = set()
for item in (MAX_ARRAY_RANK, N_FP_OPS, N_A_REFS[0]):
for (k, v) in self._result_tbl.get(item, {}).items():
if v == 0:
to_be_removed.add(k)
for (item, tbl) in self._result_tbl.items():
for k in to_be_removed:
del tbl[k]
def calc(self):
self.message('calculating for "%s"...' % self._proj_id)
self.calc_loop_metrics()
self.calc_array_metrics()
self.calc_fop_in_loop_metrics()
self.calc_ffr_in_loop_metrics()
for lv in range(3):
self.calc_aref_in_loop_metrics(lv)
self.calc_in_loop_metrics()
self.finalize_ipp()
self.calc_max_loop_level()
self.filter_results()
#self.dump()
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description='get source code metrics')
parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='enable debug printing')
parser.add_argument('-k', '--key', dest='key', default=None,
metavar='KEY', type=str, help='show metrics for KEY=VER:PATH:LNUM')
parser.add_argument('-o', '--outfile', dest='outfile', default=None,
metavar='FILE', type=str, help='dump feature vector into FILE')
parser.add_argument('-m', '--method', dest='method', default='odbc',
metavar='METHOD', type=str, help='execute query via METHOD (odbc|http)')
parser.add_argument('proj_list', nargs='*', default=[],
metavar='PROJ', type=str, help='project id (default: all projects)')
args = parser.parse_args()
dp.debug_flag = args.debug
proj_list = []
if args.key:
l = args.key.split(':')
if len(l) != 3:
print('invalid key: %s' % args.key)
exit(1)
else:
try:
int(l[2])
except:
print('invalid key: %s' % args.key)
exit(1)
if args.proj_list:
proj_list = args.proj_list
else:
proj_list = get_proj_list()
ftbl_list = []
for proj_id in proj_list:
m = Metrics(proj_id, method=args.method)
m.calc()
if args.key:
ftbl_list += m.search(args.key)
else:
ftbl_list += m.get_ftbl_list()
if ftbl_list:
if args.outfile:
ftbl_list_to_orange(ftbl_list, args.outfile, META_KEYS)
else:
for ftbl in sorted(ftbl_list,
key=lambda x: (x['meta']['ver'],
x['meta']['fn'],
x['meta']['lnum'])):
print('%s' % ftbl_to_string(ftbl))
else:
print('not found')
|
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006-2007 XenSource Inc.
#============================================================================
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import gettext
import xmlrpclib
import httplib
import socket
translation = gettext.translation('xen-xm', fallback = True)
API_VERSION_1_1 = '1.1'
API_VERSION_1_2 = '1.2'
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception, exn:
import sys
print >>sys.stderr, exn
return "Xen-API failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
_RECONNECT_AND_RETRY = (lambda _ : ())
class UDSHTTPConnection(httplib.HTTPConnection):
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
def connect(self):
path = self.host.replace("_", "/")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(path)
class UDSHTTP(httplib.HTTP):
_connection_class = UDSHTTPConnection
class UDSTransport(xmlrpclib.Transport):
def __init__(self, use_datetime=0):
self._use_datetime = use_datetime
self._extra_headers=[]
def add_extra_header(self, key, value):
self._extra_headers += [ (key,value) ]
def make_connection(self, host):
return UDSHTTP(host)
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
for key, value in self._extra_headers:
connection.putheader(key, value)
class Session(xmlrpclib.ServerProxy):
"""A server proxy and session manager for communicating with xapi using
the Xen-API.
Example:
session = Session('http://localhost/')
session.login_with_password('me', 'mypassword')
session.xenapi.VM.start(vm_uuid)
session.xenapi.session.logout()
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none)
self.transport = transport
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
retry_count = 0
while retry_count < 3:
full_params = (self._session,) + params
result = _parse_result(getattr(self, methodname)(*full_params))
if result == _RECONNECT_AND_RETRY:
retry_count += 1
if self.last_login_method:
self._login(self.last_login_method,
self.last_login_params)
else:
raise xmlrpclib.Fault(401, 'You must log in')
else:
return result
raise xmlrpclib.Fault(
500, 'Tried 3 times to get a valid session, but failed')
def _login(self, method, params):
result = _parse_result(getattr(self, 'session.%s' % method)(*params))
if result == _RECONNECT_AND_RETRY:
raise xmlrpclib.Fault(
500, 'Received SESSION_INVALID when logging in')
self._session = result
self.last_login_method = method
self.last_login_params = params
self.API_version = self._get_api_version()
def _logout(self):
try:
if self.last_login_method.startswith("slave_local"):
return _parse_result(self.session.local_logout(self._session))
else:
return _parse_result(self.session.logout(self._session))
finally:
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def _get_api_version(self):
pool = self.xenapi.pool.get_all()[0]
host = self.xenapi.pool.get_master(pool)
major = self.xenapi.host.get_API_version_major(host)
minor = self.xenapi.host.get_API_version_minor(host)
return "%s.%s"%(major,minor)
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.API_version, self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
else:
return xmlrpclib.ServerProxy.__getattr__(self, name)
def xapi_local():
return Session("http://_var_xapi_xapi/", transport=UDSTransport())
def _parse_result(result):
if type(result) != dict or 'Status' not in result:
raise xmlrpclib.Fault(500, 'Missing Status in response from server' + result)
if result['Status'] == 'Success':
if 'Value' in result:
return result['Value']
else:
raise xmlrpclib.Fault(500,
'Missing Value in response from server')
else:
if 'ErrorDescription' in result:
if result['ErrorDescription'][0] == 'SESSION_INVALID':
return _RECONNECT_AND_RETRY
else:
raise Failure(result['ErrorDescription'])
else:
raise xmlrpclib.Fault(
500, 'Missing ErrorDescription in response from server')
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, API_version, send, name):
self.__API_version = API_version
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<XenAPI._Dispatcher for %s>' % self.__name
else:
return '<XenAPI._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__API_version, self.__send, name)
else:
return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
|
# -*- coding: utf-8 -*-
"""
h2/settings
~~~~~~~~~~~
This module contains a HTTP/2 settings object. This object provides a simple
API for manipulating HTTP/2 settings, keeping track of both the current active
state of the settings and the unacknowledged future values of the settings.
"""
import collections
import enum
from hyperframe.frame import SettingsFrame
from h2.errors import ErrorCodes
from h2.exceptions import InvalidSettingsValueError
try:
from collections.abc import MutableMapping
except ImportError: # pragma: no cover
# Python 2.7 compatibility
from collections import MutableMapping
class SettingCodes(enum.IntEnum):
"""
All known HTTP/2 setting codes.
.. versionadded:: 2.6.0
"""
#: Allows the sender to inform the remote endpoint of the maximum size of
#: the header compression table used to decode header blocks, in octets.
HEADER_TABLE_SIZE = SettingsFrame.HEADER_TABLE_SIZE
#: This setting can be used to disable server push. To disable server push
#: on a client, set this to 0.
ENABLE_PUSH = SettingsFrame.ENABLE_PUSH
#: Indicates the maximum number of concurrent streams that the sender will
#: allow.
MAX_CONCURRENT_STREAMS = SettingsFrame.MAX_CONCURRENT_STREAMS
#: Indicates the sender's initial window size (in octets) for stream-level
#: flow control.
INITIAL_WINDOW_SIZE = SettingsFrame.INITIAL_WINDOW_SIZE
#: Indicates the size of the largest frame payload that the sender is
#: willing to receive, in octets.
MAX_FRAME_SIZE = SettingsFrame.MAX_FRAME_SIZE
#: This advisory setting informs a peer of the maximum size of header list
#: that the sender is prepared to accept, in octets. The value is based on
#: the uncompressed size of header fields, including the length of the name
#: and value in octets plus an overhead of 32 octets for each header field.
MAX_HEADER_LIST_SIZE = SettingsFrame.MAX_HEADER_LIST_SIZE
#: This setting can be used to enable the connect protocol. To enable on a
#: client set this to 1.
ENABLE_CONNECT_PROTOCOL = SettingsFrame.ENABLE_CONNECT_PROTOCOL
def _setting_code_from_int(code):
"""
Given an integer setting code, returns either one of :class:`SettingCodes
<h2.settings.SettingCodes>` or, if not present in the known set of codes,
returns the integer directly.
"""
try:
return SettingCodes(code)
except ValueError:
return code
class ChangedSetting:
def __init__(self, setting, original_value, new_value):
#: The setting code given. Either one of :class:`SettingCodes
#: <h2.settings.SettingCodes>` or ``int``
#:
#: .. versionchanged:: 2.6.0
self.setting = setting
#: The original value before being changed.
self.original_value = original_value
#: The new value after being changed.
self.new_value = new_value
def __repr__(self):
return (
"ChangedSetting(setting=%s, original_value=%s, "
"new_value=%s)"
) % (
self.setting,
self.original_value,
self.new_value
)
class Settings(MutableMapping):
"""
An object that encapsulates HTTP/2 settings state.
HTTP/2 Settings are a complex beast. Each party, remote and local, has its
own settings and a view of the other party's settings. When a settings
frame is emitted by a peer it cannot assume that the new settings values
are in place until the remote peer acknowledges the setting. In principle,
multiple settings changes can be "in flight" at the same time, all with
different values.
This object encapsulates this mess. It provides a dict-like interface to
settings, which return the *current* values of the settings in question.
Additionally, it keeps track of the stack of proposed values: each time an
acknowledgement is sent/received, it updates the current values with the
stack of proposed values. On top of all that, it validates the values to
make sure they're allowed, and raises :class:`InvalidSettingsValueError
<h2.exceptions.InvalidSettingsValueError>` if they are not.
Finally, this object understands what the default values of the HTTP/2
settings are, and sets those defaults appropriately.
.. versionchanged:: 2.2.0
Added the ``initial_values`` parameter.
.. versionchanged:: 2.5.0
Added the ``max_header_list_size`` property.
:param client: (optional) Whether these settings should be defaulted for a
client implementation or a server implementation. Defaults to ``True``.
:type client: ``bool``
:param initial_values: (optional) Any initial values the user would like
set, rather than RFC 7540's defaults.
:type initial_vales: ``MutableMapping``
"""
def __init__(self, client=True, initial_values=None):
# Backing object for the settings. This is a dictionary of
# (setting: [list of values]), where the first value in the list is the
# current value of the setting. Strictly this doesn't use lists but
# instead uses collections.deque to avoid repeated memory allocations.
#
# This contains the default values for HTTP/2.
self._settings = {
SettingCodes.HEADER_TABLE_SIZE: collections.deque([4096]),
SettingCodes.ENABLE_PUSH: collections.deque([int(client)]),
SettingCodes.INITIAL_WINDOW_SIZE: collections.deque([65535]),
SettingCodes.MAX_FRAME_SIZE: collections.deque([16384]),
SettingCodes.ENABLE_CONNECT_PROTOCOL: collections.deque([0]),
}
if initial_values is not None:
for key, value in initial_values.items():
invalid = _validate_setting(key, value)
if invalid:
raise InvalidSettingsValueError(
"Setting %d has invalid value %d" % (key, value),
error_code=invalid
)
self._settings[key] = collections.deque([value])
def acknowledge(self):
"""
The settings have been acknowledged, either by the user (remote
settings) or by the remote peer (local settings).
:returns: A dict of {setting: ChangedSetting} that were applied.
"""
changed_settings = {}
# If there is more than one setting in the list, we have a setting
# value outstanding. Update them.
for k, v in self._settings.items():
if len(v) > 1:
old_setting = v.popleft()
new_setting = v[0]
changed_settings[k] = ChangedSetting(
k, old_setting, new_setting
)
return changed_settings
# Provide easy-access to well known settings.
@property
def header_table_size(self):
"""
The current value of the :data:`HEADER_TABLE_SIZE
<h2.settings.SettingCodes.HEADER_TABLE_SIZE>` setting.
"""
return self[SettingCodes.HEADER_TABLE_SIZE]
@header_table_size.setter
def header_table_size(self, value):
self[SettingCodes.HEADER_TABLE_SIZE] = value
@property
def enable_push(self):
"""
The current value of the :data:`ENABLE_PUSH
<h2.settings.SettingCodes.ENABLE_PUSH>` setting.
"""
return self[SettingCodes.ENABLE_PUSH]
@enable_push.setter
def enable_push(self, value):
self[SettingCodes.ENABLE_PUSH] = value
@property
def initial_window_size(self):
"""
The current value of the :data:`INITIAL_WINDOW_SIZE
<h2.settings.SettingCodes.INITIAL_WINDOW_SIZE>` setting.
"""
return self[SettingCodes.INITIAL_WINDOW_SIZE]
@initial_window_size.setter
def initial_window_size(self, value):
self[SettingCodes.INITIAL_WINDOW_SIZE] = value
@property
def max_frame_size(self):
"""
The current value of the :data:`MAX_FRAME_SIZE
<h2.settings.SettingCodes.MAX_FRAME_SIZE>` setting.
"""
return self[SettingCodes.MAX_FRAME_SIZE]
@max_frame_size.setter
def max_frame_size(self, value):
self[SettingCodes.MAX_FRAME_SIZE] = value
@property
def max_concurrent_streams(self):
"""
The current value of the :data:`MAX_CONCURRENT_STREAMS
<h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS>` setting.
"""
return self.get(SettingCodes.MAX_CONCURRENT_STREAMS, 2**32+1)
@max_concurrent_streams.setter
def max_concurrent_streams(self, value):
self[SettingCodes.MAX_CONCURRENT_STREAMS] = value
@property
def max_header_list_size(self):
"""
The current value of the :data:`MAX_HEADER_LIST_SIZE
<h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE>` setting. If not set,
returns ``None``, which means unlimited.
.. versionadded:: 2.5.0
"""
return self.get(SettingCodes.MAX_HEADER_LIST_SIZE, None)
@max_header_list_size.setter
def max_header_list_size(self, value):
self[SettingCodes.MAX_HEADER_LIST_SIZE] = value
@property
def enable_connect_protocol(self):
"""
The current value of the :data:`ENABLE_CONNECT_PROTOCOL
<h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL>` setting.
"""
return self[SettingCodes.ENABLE_CONNECT_PROTOCOL]
@enable_connect_protocol.setter
def enable_connect_protocol(self, value):
self[SettingCodes.ENABLE_CONNECT_PROTOCOL] = value
# Implement the MutableMapping API.
def __getitem__(self, key):
val = self._settings[key][0]
# Things that were created when a setting was received should stay
# KeyError'd.
if val is None:
raise KeyError
return val
def __setitem__(self, key, value):
invalid = _validate_setting(key, value)
if invalid:
raise InvalidSettingsValueError(
"Setting %d has invalid value %d" % (key, value),
error_code=invalid
)
try:
items = self._settings[key]
except KeyError:
items = collections.deque([None])
self._settings[key] = items
items.append(value)
def __delitem__(self, key):
del self._settings[key]
def __iter__(self):
return self._settings.__iter__()
def __len__(self):
return len(self._settings)
def __eq__(self, other):
if isinstance(other, Settings):
return self._settings == other._settings
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Settings):
return not self == other
else:
return NotImplemented
def _validate_setting(setting, value): # noqa: C901
"""
Confirms that a specific setting has a well-formed value. If the setting is
invalid, returns an error code. Otherwise, returns 0 (NO_ERROR).
"""
if setting == SettingCodes.ENABLE_PUSH:
if value not in (0, 1):
return ErrorCodes.PROTOCOL_ERROR
elif setting == SettingCodes.INITIAL_WINDOW_SIZE:
if not 0 <= value <= 2147483647: # 2^31 - 1
return ErrorCodes.FLOW_CONTROL_ERROR
elif setting == SettingCodes.MAX_FRAME_SIZE:
if not 16384 <= value <= 16777215: # 2^14 and 2^24 - 1
return ErrorCodes.PROTOCOL_ERROR
elif setting == SettingCodes.MAX_HEADER_LIST_SIZE:
if value < 0:
return ErrorCodes.PROTOCOL_ERROR
elif setting == SettingCodes.ENABLE_CONNECT_PROTOCOL:
if value not in (0, 1):
return ErrorCodes.PROTOCOL_ERROR
return 0
|
|
from unittest import mock
import pytest
from astropy import units as u
from astropy.coordinates import (
ICRS,
BarycentricMeanEcliptic,
CartesianDifferential,
CartesianRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from poliastro.bodies import Earth, Venus
from poliastro.ephem import Ephem, InterpolationMethods
from poliastro.frames import Planes
from poliastro.warnings import TimeScaleWarning
AVAILABLE_INTERPOLATION_METHODS = InterpolationMethods.__members__.values()
AVAILABLE_PLANES = Planes.__members__.values()
def assert_coordinates_allclose(actual, desired, rtol=1e-7, atol_scale=None, **kwargs):
if atol_scale is None:
atol_scale = 0
assert_quantity_allclose(
actual.xyz, desired.xyz, rtol, atol=atol_scale * desired.xyz.unit, **kwargs
)
if "s" in desired.differentials:
assert_quantity_allclose(
actual.differentials["s"].d_xyz,
desired.differentials["s"].d_xyz,
rtol=rtol,
atol=atol_scale * desired.differentials["s"].d_xyz.unit,
**kwargs,
)
@pytest.fixture
def epochs():
return Time(
[
"2020-03-01 12:00:00",
"2020-03-02 12:00:00",
"2020-03-03 12:00:00",
"2020-03-04 12:00:00",
],
scale="tdb",
)
@pytest.fixture
def coordinates():
return CartesianRepresentation(
[(1, 0, 0), (0.9, 0.1, 0), (0.8, 0.2, 0), (0.7, 0.3, 0)] * u.au,
xyz_axis=1,
differentials=CartesianDifferential(
[(0, 1, 0), (-0.1, 0.9, 0), (-0.2, 0.8, 0), (-0.3, 0.7, 0)]
* (u.au / u.day),
xyz_axis=1,
),
)
@pytest.mark.parametrize("plane", AVAILABLE_PLANES)
def test_ephem_has_given_plane(epochs, coordinates, plane):
ephem = Ephem(epochs, coordinates, plane)
assert ephem.plane is plane
def test_ephem_fails_if_dimensions_are_not_correct(epochs, coordinates):
unused_plane = Planes.EARTH_EQUATOR
with pytest.raises(ValueError) as excinfo:
Ephem(epochs[0], coordinates, unused_plane)
assert (
"Coordinates and epochs must have dimension 1, got 0 and 1" in excinfo.exconly()
)
@pytest.mark.parametrize("method", AVAILABLE_INTERPOLATION_METHODS)
def test_ephem_sample_no_arguments_returns_exactly_same_input(
epochs, coordinates, method
):
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, unused_plane)
result_coordinates = ephem.sample(method=method)
# Exactly the same
assert result_coordinates == coordinates
@pytest.mark.parametrize("method", AVAILABLE_INTERPOLATION_METHODS)
def test_ephem_sample_scalar_epoch_returns_1_dimensional_coordinates(
epochs, coordinates, method
):
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, unused_plane)
result_coordinates = ephem.sample(epochs[0], method=method)
# Exactly the same
assert result_coordinates.ndim == 1
def test_ephem_str_matches_expected_representation(epochs, coordinates):
plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, plane)
expected_str = (
"Ephemerides at 4 epochs "
"from 2020-03-01 12:00:00.000 (TDB) to 2020-03-04 12:00:00.000 (TDB)"
)
assert repr(ephem) == str(ephem) == expected_str
@pytest.mark.parametrize("method", AVAILABLE_INTERPOLATION_METHODS)
def test_ephem_sample_scalar_epoch_and_coordinates_returns_exactly_same_input(
epochs, coordinates, method
):
unused_plane = Planes.EARTH_EQUATOR
coordinates = coordinates[0].reshape(-1)
epochs = epochs[0].reshape(-1)
ephem = Ephem(coordinates, epochs, unused_plane)
result_coordinates = ephem.sample(epochs[0], method=method)
# Exactly the same
assert result_coordinates == coordinates
@pytest.mark.parametrize("method", AVAILABLE_INTERPOLATION_METHODS)
def test_ephem_sample_same_epochs_returns_same_input(epochs, coordinates, method):
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, unused_plane)
result_coordinates = ephem.sample(epochs, method=method)
# TODO: Should it return exactly the same?
assert_coordinates_allclose(result_coordinates, coordinates, atol_scale=1e-17)
@pytest.mark.parametrize("method", AVAILABLE_INTERPOLATION_METHODS)
def test_ephem_sample_existing_epochs_returns_corresponding_input(
epochs, coordinates, method
):
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, unused_plane)
result_coordinates = ephem.sample(epochs[::2], method=method)
# Exactly the same
assert_coordinates_allclose(result_coordinates, coordinates[::2], atol_scale=1e-17)
def test_rv_no_parameters_returns_input_vectors(coordinates, epochs):
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, unused_plane)
expected_r = coordinates.get_xyz(xyz_axis=1)
expected_v = coordinates.differentials["s"].get_d_xyz(xyz_axis=1)
r, v = ephem.rv()
assert_quantity_allclose(r, expected_r)
assert_quantity_allclose(v, expected_v)
def test_rv_scalar_epoch_returns_scalar_vectors(coordinates, epochs):
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem(coordinates, epochs, unused_plane)
expected_r = coordinates.get_xyz(xyz_axis=1)[0]
expected_v = coordinates.differentials["s"].get_d_xyz(xyz_axis=1)[0]
r, v = ephem.rv(epochs[0])
assert_quantity_allclose(r, expected_r)
assert_quantity_allclose(v, expected_v)
@pytest.mark.parametrize("method", AVAILABLE_INTERPOLATION_METHODS)
@pytest.mark.parametrize(
"plane, FrameClass, rtol",
[
(Planes.EARTH_EQUATOR, ICRS, 1e-7),
(Planes.EARTH_ECLIPTIC, BarycentricMeanEcliptic, 1e-5),
],
)
def test_ephem_from_body_has_expected_properties(method, plane, FrameClass, rtol):
epochs = Time(
["2020-03-01 12:00:00", "2020-03-17 00:00:00.000", "2020-04-01 12:00:00.000"],
scale="tdb",
)
equatorial_coordinates = CartesianRepresentation(
[
(-1.40892271e08, 45067626.83900666, 19543510.68386639),
(-1.4925067e08, 9130104.71634121, 3964948.59999307),
(-1.46952333e08, -27413113.24215863, -11875983.21773582),
]
* u.km,
xyz_axis=1,
differentials=CartesianDifferential(
[
(-10.14262131, -25.96929533, -11.25810932),
(-2.28639444, -27.3906416, -11.87218591),
(5.67814544, -26.84316701, -11.63720607),
]
* (u.km / u.s),
xyz_axis=1,
),
)
expected_coordinates = (
ICRS(equatorial_coordinates)
.transform_to(FrameClass)
.represent_as(CartesianRepresentation, CartesianDifferential)
)
earth = Ephem.from_body(Earth, epochs, plane=plane)
coordinates = earth.sample(method=method)
assert earth.epochs is epochs
assert_coordinates_allclose(coordinates, expected_coordinates, rtol=rtol)
def test_from_body_non_tdb_epochs_warning(epochs):
unused_body = Earth
epochs = Time.now() # This uses UTC scale
with pytest.warns(TimeScaleWarning) as record:
Ephem.from_body(unused_body, epochs)
assert len(record) == 1
assert "Input time was converted to scale='tdb'" in record[0].message.args[0]
assert "Use Time(..., scale='tdb') instead" in record[0].message.args[0]
def test_from_body_scalar_epoch_uses_reshaped_epochs():
expected_epochs = Time(["2020-03-01 12:00:00"], scale="tdb")
epochs = expected_epochs[0]
unused_plane = Planes.EARTH_EQUATOR
ephem = Ephem.from_body(Earth, epochs, plane=unused_plane)
assert ephem.epochs == expected_epochs
@mock.patch("poliastro.ephem.Horizons")
@pytest.mark.parametrize(
"attractor,location_str", [(None, "@ssb"), (Earth, "500@399"), (Venus, "500@299")]
)
@pytest.mark.parametrize(
"plane,refplane_str",
[(Planes.EARTH_EQUATOR, "earth"), (Planes.EARTH_ECLIPTIC, "ecliptic")],
)
def test_ephem_from_horizons_calls_horizons_with_correct_parameters(
horizons_mock, attractor, location_str, plane, refplane_str
):
unused_name = "Strange Object"
unused_id_type = "id_type"
epochs = Time(["2020-03-01 12:00:00"], scale="tdb")
horizons_mock().vectors.return_value = {
"x": [1] * u.au,
"y": [0] * u.au,
"z": [0] * u.au,
"vx": [0] * (u.au / u.day),
"vy": [1] * (u.au / u.day),
"vz": [0] * (u.au / u.day),
}
expected_coordinates = CartesianRepresentation(
[(1, 0, 0)] * u.au,
xyz_axis=1,
differentials=CartesianDifferential([(0, 1, 0)] * (u.au / u.day), xyz_axis=1),
)
ephem = Ephem.from_horizons(
unused_name, epochs, attractor=attractor, plane=plane, id_type=unused_id_type
)
horizons_mock.assert_called_with(
id=unused_name, location=location_str, epochs=epochs.jd, id_type=unused_id_type
)
horizons_mock().vectors.assert_called_once_with(refplane=refplane_str)
coordinates = ephem.sample()
assert_coordinates_allclose(coordinates, expected_coordinates)
@mock.patch("poliastro.ephem.Horizons")
def test_from_horizons_scalar_epoch_uses_reshaped_epochs(horizons_mock):
unused_name = "Strange Object"
unused_id_type = "id_type"
unused_plane = Planes.EARTH_EQUATOR
unused_location_str = "500@399"
unused_attractor = Earth
expected_epochs = Time(["2020-03-01 12:00:00"], scale="tdb")
epochs = expected_epochs[0]
horizons_mock().vectors.return_value = {
"x": [1] * u.au,
"y": [0] * u.au,
"z": [0] * u.au,
"vx": [0] * (u.au / u.day),
"vy": [1] * (u.au / u.day),
"vz": [0] * (u.au / u.day),
}
Ephem.from_horizons(
unused_name,
epochs,
attractor=unused_attractor,
plane=unused_plane,
id_type=unused_id_type,
)
horizons_mock.assert_called_with(
id=unused_name,
location=unused_location_str,
epochs=expected_epochs.jd,
id_type=unused_id_type,
)
|
|
"""
XML serializer.
"""
from django.conf import settings
from django.core.serializers import base
from django.db import models
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import smart_unicode
from xml.dom import pulldom
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent', None) is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent', None) * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
self.xml.startElement("object", {
"pk" : smart_unicode(obj._get_pk_val()),
"model" : smart_unicode(obj._meta),
})
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"type" : field.get_internal_type()
})
# Get a "string version" of the object's data (this is handled by the
# serializer base class).
if getattr(obj, field.name) is not None:
value = self.get_string_value(obj, field)
self.xml.characters(smart_unicode(value))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related = getattr(obj, field.name)
if related is not None:
# TODO: can we remove the field_name part?
if field.rel.field_name in related._meta.pk.names:
# Related to remote object via primary key
related = related._get_pk_val()
else:
# Related to remote object via other field
related = getattr(related, field.rel.field_name)
self.xml.characters(smart_unicode(related))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.creates_table:
self._start_relational_field(field)
for relobj in getattr(obj, field.name).iterator():
self.xml.addQuickElement("object", attrs={"pk" : smart_unicode(relobj._get_pk_val())})
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"rel" : field.rel.__class__.__name__,
"to" : smart_unicode(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream)
def next(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object. If the node is
# missing the pk attribute, bail.
pk = node.getAttribute("pk")
if not pk:
raise base.DeserializationError("<object> node is missing the 'pk' attribute")
data = {Model._meta.pk.attname : Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
return field.rel.to._meta.get_field(field.rel.field_name).to_python(
getInnerText(node).strip())
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
return [field.rel.to._meta.pk.to_python(
c.getAttribute("pk"))
for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute" \
% (node.nodeName, attr))
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'" % \
(node.nodeName, model_identifier))
return Model
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return u"".join(inner_text)
|
|
"""
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical)
from pandas.tslib import NaTType
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.common import (PerformanceWarning,
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas.io.common import get_filepath_or_buffer
from pandas.core.internals import BlockManager, make_block
import pandas.core.internals as internals
from pandas.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
elif isinstance(obj, SparsePanel):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_panel',
# 'klass': obj.__class__.__name__,
# 'items': obj.items}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, df)
# for name, df in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64, NaTType)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
offset = obj.offset
if offset is not None:
offset = u(offset.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'offset': offset,
u'tz': tz}
if isinstance(obj, NaTType):
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
return Timestamp(obj[u'value'], tz=obj[u'tz'], offset=obj[u'offset'])
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']](data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'],
name=obj[u'name'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
np_dtype = pandas_dtype(dtype).base
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=np_dtype,
name=obj[u'name'])
tz = getattr(pd_dtype, 'tz', None)
if tz:
result = result.dt.tz_localize('UTC').dt.tz_convert(tz)
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = unconvert(b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']).reshape(b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import zmq
from supervisor.loggers import Logger
from supvisors.ttypes import Payload
from supvisors.utils import *
# Constant for Zmq sockets
INPROC_NAME = 'supvisors'
ZMQ_LINGER = 0
# reference to the Zmq Context instance
ZmqContext = zmq.Context.instance()
class InternalEventPublisher(object):
""" This class is the wrapper of the ZeroMQ socket that publishes the events
to the Supvisors instances.
Attributes are:
- logger: a reference to the Supvisors logger,
- address: the address name where this process is running,
- socket: the ZeroMQ socket with a PUBLISH pattern, bound on the internal_port defined in the ['supvisors'] section of the Supervisor configuration file.
"""
def __init__(self, address: str, port: int, logger: Logger) -> None:
""" Initialization of the attributes. """
# keep a reference to supvisors
self.logger = logger
# get local address
self.address = address
# create ZMQ socket
self.socket = ZmqContext.socket(zmq.PUB)
url = 'tcp://*:{}'.format(port)
self.logger.info('binding InternalEventPublisher to %s' % url)
self.socket.bind(url)
def close(self) -> None:
""" This method closes the PyZMQ socket. """
self.socket.close(ZMQ_LINGER)
def send_tick_event(self, payload: Payload) -> None:
""" Publishes the tick event with ZeroMQ. """
self.logger.trace('send TickEvent {}'.format(payload))
self.socket.send_pyobj((InternalEventHeaders.TICK, self.address, payload))
def send_process_event(self, payload: Payload) -> None:
""" Publishes the process event with ZeroMQ. """
self.logger.trace('send ProcessEvent {}'.format(payload))
self.socket.send_pyobj((InternalEventHeaders.PROCESS, self.address, payload))
def send_statistics(self, payload: Payload) -> None:
""" Publishes the statistics with ZeroMQ. """
self.logger.trace('send Statistics {}'.format(payload))
self.socket.send_pyobj((InternalEventHeaders.STATISTICS, self.address, payload))
class InternalEventSubscriber(object):
""" Class for subscription to Listener events.
Attributes:
- port: the port number used for internal events,
- socket: the PyZMQ subscriber.
"""
def __init__(self, addresses, port: int):
""" Initialization of the attributes. """
self.port = port
self.socket = ZmqContext.socket(zmq.SUB)
# connect all addresses
for address in addresses:
url = 'tcp://{}:{}'.format(address, self.port)
self.socket.connect(url)
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
def close(self) -> None:
""" This method closes the PyZMQ socket. """
self.socket.close(ZMQ_LINGER)
def receive(self):
""" Reception and pyobj de-serialization of one message. """
return self.socket.recv_pyobj(zmq.NOBLOCK)
def disconnect(self, addresses) -> None:
""" This method disconnects from the PyZMQ socket all addresses passed in parameter. """
for address in addresses:
url = 'tcp://{}:{}'.format(address, self.port)
self.socket.disconnect(url)
class EventPublisher(object):
""" Class for ZMQ publication of Supvisors events. """
def __init__(self, port, logger):
""" Initialization of the attributes. """
self.logger = logger
self.socket = ZmqContext.socket(zmq.PUB)
# WARN: this is a local binding, only visible to processes located on the same address
url = 'tcp://127.0.0.1:%d' % port
self.logger.info('binding local Supvisors EventPublisher to %s' % url)
self.socket.bind(url)
def close(self) -> None:
""" This method closes the PyZMQ socket. """
self.socket.close(ZMQ_LINGER)
def send_supvisors_status(self, status: Payload) -> None:
""" This method sends a serialized form of the supvisors status through the socket. """
self.logger.trace('send SupvisorsStatus {}'.format(status))
self.socket.send_string(EventHeaders.SUPVISORS, zmq.SNDMORE)
self.socket.send_json(status)
def send_address_status(self, status: Payload) -> None:
""" This method sends a serialized form of the address status through the socket. """
self.logger.trace('send AddressStatus {}'.format(status))
self.socket.send_string(EventHeaders.ADDRESS, zmq.SNDMORE)
self.socket.send_json(status)
def send_application_status(self, status: Payload) -> None:
""" This method sends a serialized form of the application status through the socket. """
self.logger.trace('send ApplicationStatus {}'.format(status))
self.socket.send_string(EventHeaders.APPLICATION, zmq.SNDMORE)
self.socket.send_json(status)
def send_process_event(self, address: str, event: Payload) -> None:
""" This method sends a process event through the socket. """
# build the event before it is sent
evt = event.copy()
evt['address'] = address
self.logger.trace('send Process Event {}'.format(evt))
self.socket.send_string(EventHeaders.PROCESS_EVENT, zmq.SNDMORE)
self.socket.send_json(evt)
def send_process_status(self, status: Payload) -> None:
""" This method sends a serialized form of the process status through the socket. """
self.logger.trace('send Process Status {}'.format(status))
self.socket.send_string(EventHeaders.PROCESS_STATUS, zmq.SNDMORE)
self.socket.send_json(status)
class EventSubscriber(object):
""" The EventSubscriber wraps the ZeroMQ socket that connects
to **Supvisors**.
The TCP socket is configured with a ZeroMQ ``SUBSCRIBE`` pattern.
It is connected to the **Supvisors** instance running on the localhost
and bound on the event port.
The EventSubscriber requires:
- the event port number used by **Supvisors** to publish its events,
- a logger reference to log traces.
Attributes:
- logger: the reference to the logger,
- socket: the ZeroMQ socket connected to **Supvisors**.
"""
def __init__(self, zmq_context, port, logger):
""" Initialization of the attributes. """
self.logger = logger
# create ZeroMQ socket
self.socket = zmq_context.socket(zmq.SUB)
# WARN: this is a local binding, only visible to processes
# located on the same address
url = 'tcp://127.0.0.1:%d' % port
self.logger.info('connecting EventSubscriber to Supvisors at %s' % url)
self.socket.connect(url)
self.logger.debug('EventSubscriber connected')
def close(self):
""" Close the ZeroMQ socket. """
self.socket.close(ZMQ_LINGER)
# subscription part
def subscribe_all(self):
""" Subscription to all events. """
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
def subscribe_supvisors_status(self):
""" Subscription to Supvisors Status messages. """
self.subscribe(EventHeaders.SUPVISORS)
def subscribe_address_status(self):
""" Subscription to Address Status messages. """
self.subscribe(EventHeaders.ADDRESS)
def subscribe_application_status(self):
""" Subscription to Application Status messages. """
self.subscribe(EventHeaders.APPLICATION)
def subscribe_process_event(self):
""" Subscription to Process Event messages. """
self.subscribe(EventHeaders.PROCESS_EVENT)
def subscribe_process_status(self):
""" Subscription to Process Status messages. """
self.subscribe(EventHeaders.PROCESS_STATUS)
def subscribe(self, code):
""" Subscription to the event named code. """
self.socket.setsockopt(zmq.SUBSCRIBE, code.encode('utf-8'))
# unsubscription part
def unsubscribe_all(self):
""" Subscription to all events. """
self.socket.setsockopt(zmq.UNSUBSCRIBE, b'')
def unsubscribe_supvisors_status(self):
""" Subscription to Supvisors Status messages. """
self.unsubscribe(EventHeaders.SUPVISORS)
def unsubscribe_address_status(self):
""" Subscription to Address Status messages. """
self.unsubscribe(EventHeaders.ADDRESS)
def unsubscribe_application_status(self):
""" Subscription to Application Status messages. """
self.unsubscribe(EventHeaders.APPLICATION)
def unsubscribe_process_event(self):
""" Subscription to Process Event messages. """
self.unsubscribe(EventHeaders.PROCESS_EVENT)
def unsubscribe_process_status(self):
""" Subscription to Process Status messages. """
self.unsubscribe(EventHeaders.PROCESS_STATUS)
def unsubscribe(self, code):
""" Remove subscription to the event named code. """
self.socket.setsockopt(zmq.UNSUBSCRIBE, code.encode('utf-8'))
# reception part
def receive(self):
""" Reception of two-parts message:
- header as an unicode string,
- data encoded in JSON.
"""
return self.socket.recv_string(), self.socket.recv_json()
class RequestPuller(object):
""" Class for pulling deferred XML-RPC.
Attributes:
- socket: the PyZMQ puller.
As it uses an inproc transport, this implies the following conditions:
- the RequestPusher instance and the RequestPuller instance MUST share the same ZMQ context,
- the RequestPusher instance MUST be created before the RequestPuller instance.
"""
def __init__(self):
""" Initialization of the attributes. """
self.socket = ZmqContext.socket(zmq.PULL)
url = 'inproc://' + INPROC_NAME
self.socket.connect(url)
def close(self):
""" This method closes the PyZMQ socket. """
self.socket.close(ZMQ_LINGER)
def receive(self):
""" Reception and pyobj deserialization of one message. """
return self.socket.recv_pyobj()
class RequestPusher(object):
""" Class for pushing deferred XML-RPC.
Attributes:
- logger: a reference to the Supvisors logger,
- socket: the PyZMQ pusher.
As it uses an inproc transport, this implies the following conditions:
- the RequestPusher instance and the RequestPuller instance MUST share the same ZMQ context,
- the RequestPusher instance MUST be created before the RequestPuller instance.
"""
def __init__(self, logger):
""" Initialization of the attributes. """
self.logger = logger
self.socket = ZmqContext.socket(zmq.PUSH)
url = 'inproc://' + INPROC_NAME
self.logger.info('binding RequestPuller to %s' % url)
self.socket.bind(url)
def close(self):
""" This method closes the PyZMQ socket. """
self.socket.close(ZMQ_LINGER)
def send_check_address(self, address_name: str) -> None:
""" Send request to check address. """
self.logger.debug('RequestPusher.send_check_address: address_name={}'.format(address_name))
try:
self.socket.send_pyobj((DeferredRequestHeaders.CHECK_ADDRESS, (address_name,)),
zmq.NOBLOCK)
except zmq.error.Again:
self.logger.error('RequestPusher.send_check_address: CHECK_ADDRESS not sent')
def send_isolate_addresses(self, address_names):
""" Send request to isolate address. """
self.logger.trace('RequestPusher.send_isolate_addresses: address_names={}'.format(address_names))
try:
self.socket.send_pyobj((DeferredRequestHeaders.ISOLATE_ADDRESSES, address_names),
zmq.NOBLOCK)
except zmq.error.Again:
self.logger.error('RequestPusher.send_isolate_addresses: ISOLATE_ADDRESSES not sent')
def send_start_process(self, address_name, namespec, extra_args):
""" Send request to start process. """
self.logger.trace('send START_PROCESS {} to {} with {}'.format(namespec, address_name, extra_args))
try:
self.socket.send_pyobj((DeferredRequestHeaders.START_PROCESS, (address_name, namespec, extra_args)),
zmq.NOBLOCK)
except zmq.error.Again:
self.logger.error('START_PROCESS not sent')
def send_stop_process(self, address_name, namespec):
""" Send request to stop process. """
self.logger.trace('send STOP_PROCESS {} to {}'.format(namespec, address_name))
try:
self.socket.send_pyobj((DeferredRequestHeaders.STOP_PROCESS, (address_name, namespec)),
zmq.NOBLOCK)
except zmq.error.Again:
self.logger.error('STOP_PROCESS not sent')
def send_restart(self, address_name):
""" Send request to restart a Supervisor. """
self.logger.trace('send RESTART {}'.format(address_name))
try:
self.socket.send_pyobj((DeferredRequestHeaders.RESTART, (address_name,)),
zmq.NOBLOCK)
except zmq.error.Again:
self.logger.error('RESTART not sent')
def send_shutdown(self, address_name):
""" Send request to shutdown a Supervisor. """
self.logger.trace('send SHUTDOWN {}'.format(address_name))
try:
self.socket.send_pyobj((DeferredRequestHeaders.SHUTDOWN, (address_name,)),
zmq.NOBLOCK)
except zmq.error.Again:
self.logger.error('SHUTDOWN not sent')
class SupervisorZmq(object):
""" Class for PyZmq context and sockets used from the Supervisor thread.
This instance owns the PyZmq context that is shared between the Supervisor thread and the Supvisors thread.
"""
def __init__(self, supvisors):
""" Create the sockets. """
self.publisher = EventPublisher(supvisors.options.event_port, supvisors.logger)
self.internal_publisher = InternalEventPublisher(supvisors.address_mapper.local_address,
supvisors.options.internal_port,
supvisors.logger)
self.pusher = RequestPusher(supvisors.logger)
def close(self):
""" Close the sockets. """
self.pusher.close()
self.internal_publisher.close()
self.publisher.close()
class SupvisorsZmq(object):
""" Class for PyZmq context and sockets used from the Supvisors thread.
"""
def __init__(self, supvisors):
""" Create the sockets.
The Supervisor logger cannot be used here (not thread-safe). """
self.internal_subscriber = InternalEventSubscriber(supvisors.address_mapper.addresses,
supvisors.options.internal_port)
self.puller = RequestPuller()
def close(self):
""" Close the sockets. """
self.puller.close()
self.internal_subscriber.close()
|
|
"""BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
import urllib.parse
from wsgiref.handlers import SimpleHandler
__version__ = "0.1"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.parse.unquote(path)
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
for k, v in self.headers.items():
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from io import StringIO
stdout = StringIO()
print("Hello world!", file=stdout)
print(file=stdout)
h = sorted(environ.items())
for k,v in h:
print(k,'=',repr(v), file=stdout)
start_response(b"200 OK", [(b'Content-Type',b'text/plain; charset=utf-8')])
return [stdout.getvalue().encode("utf-8")]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
httpd = make_server('', 8000, demo_app)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
#
|
|
import os
import six
from twisted.trial import unittest
from twisted.protocols.policies import WrappingFactory
from twisted.python.filepath import FilePath
from twisted.internet import reactor, defer, error
from twisted.web import server, static, util, resource
from twisted.web.test.test_webclient import ForeverTakingResource, \
NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
from twisted.cred import portal, checkers, credentials
from w3lib.url import path_to_file_uri
from scrapy import twisted_version
from scrapy.core.downloader.handlers import DownloadHandlers
from scrapy.core.downloader.handlers.file import FileDownloadHandler
from scrapy.core.downloader.handlers.http import HTTPDownloadHandler, HttpDownloadHandler
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.core.downloader.handlers.s3 import S3DownloadHandler
from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.utils.test import get_crawler
from scrapy.utils.python import to_bytes
from scrapy.exceptions import NotConfigured
from tests.mockserver import MockServer, ssl_context_factory
from tests.spiders import SingleRequestSpider
class DummyDH(object):
def __init__(self, crawler):
pass
class OffDH(object):
def __init__(self, crawler):
raise NotConfigured
class LoadTestCase(unittest.TestCase):
def test_enabled_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.DummyDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
def test_not_configured_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.OffDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
def test_disabled_handler(self):
handlers = {'scheme': None}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
class FileTestCase(unittest.TestCase):
def setUp(self):
self.tmpname = self.mktemp()
fd = open(self.tmpname + '^', 'w')
fd.write('0123456789')
fd.close()
self.download_request = FileDownloadHandler(Settings()).download_request
def test_download(self):
def _test(response):
self.assertEquals(response.url, request.url)
self.assertEquals(response.status, 200)
self.assertEquals(response.body, b'0123456789')
request = Request(path_to_file_uri(self.tmpname + '^'))
assert request.url.upper().endswith('%5E')
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_non_existent(self):
request = Request('file://%s' % self.mktemp())
d = self.download_request(request, Spider('foo'))
return self.assertFailure(d, IOError)
class HttpTestCase(unittest.TestCase):
scheme = 'http'
download_handler_cls = HTTPDownloadHandler
def setUp(self):
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent(b"0123456789")
r = static.File(name)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"hang-after-headers", ForeverTakingResource(write=True))
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.host = 'localhost'
if self.scheme == 'https':
self.port = reactor.listenSSL(
0, self.wrapper, ssl_context_factory(), interface=self.host)
else:
self.port = reactor.listenTCP(0, self.wrapper, interface=self.host)
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "%s://%s:%d/%s" % (self.scheme, self.host, self.portno, path)
def test_download(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
return d
def test_download_head(self):
request = Request(self.getURL('file'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b'')
return d
def test_redirect_status(self):
request = Request(self.getURL('redirect'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
def test_redirect_status_head(self):
request = Request(self.getURL('redirect'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
@defer.inlineCallbacks
def test_timeout_download_from_spider(self):
if self.scheme == 'https':
raise unittest.SkipTest(
'test_timeout_download_from_spider skipped under https')
spider = Spider('foo')
meta = {'download_timeout': 0.2}
# client connects but no data is received
request = Request(self.getURL('wait'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
# client connects, server send headers and some body bytes but hangs
request = Request(self.getURL('hang-after-headers'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
def test_host_header_not_in_request_headers(self):
def _test(response):
self.assertEquals(
response.body, to_bytes('%s:%d' % (self.host, self.portno)))
self.assertEquals(request.headers, {})
request = Request(self.getURL('host'))
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_host_header_seted_in_request_headers(self):
def _test(response):
self.assertEquals(response.body, b'example.com')
self.assertEquals(request.headers.get('Host'), b'example.com')
request = Request(self.getURL('host'), headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b'example.com')
return d
def test_payload(self):
body = b'1'*100 # PayloadResource requires body length to be 100
request = Request(self.getURL('payload'), method='POST', body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, body)
return d
class DeprecatedHttpTestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HttpDownloadHandler
class Http10TestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HTTP10DownloadHandler
class Https10TestCase(Http10TestCase):
scheme = 'https'
class Http11TestCase(HttpTestCase):
"""HTTP 1.1 test case"""
download_handler_cls = HTTP11DownloadHandler
if twisted_version < (11, 1, 0):
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
def test_download_without_maxsize_limit(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
return d
@defer.inlineCallbacks
def test_download_with_maxsize(self):
request = Request(self.getURL('file'))
# 10 is minimal size for this request and the limit is only counted on
# response body. (regardless of headers)
d = self.download_request(request, Spider('foo', download_maxsize=10))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
yield d
d = self.download_request(request, Spider('foo', download_maxsize=9))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_maxsize_per_req(self):
meta = {'download_maxsize': 2}
request = Request(self.getURL('file'), meta=meta)
d = self.download_request(request, Spider('foo'))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_small_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=2))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
def test_download_with_large_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=100))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
return d
class Https11TestCase(Http11TestCase):
scheme = 'https'
class Http11MockServerTestCase(unittest.TestCase):
"""HTTP 1.1 test case with MockServer"""
if twisted_version < (11, 1, 0):
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_download_with_content_length(self):
crawler = get_crawler(SingleRequestSpider)
# http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
# download it
yield crawler.crawl(seed=Request(url='http://localhost:8998/partial', meta={'download_maxsize': 1000}))
failure = crawler.spider.meta['failure']
self.assertIsInstance(failure.value, defer.CancelledError)
@defer.inlineCallbacks
def test_download(self):
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=Request(url='http://localhost:8998'))
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
@defer.inlineCallbacks
def test_download_gzip_response(self):
if twisted_version > (12, 3, 0):
crawler = get_crawler(SingleRequestSpider)
body = b'1'*100 # PayloadResource requires body length to be 100
request = Request('http://localhost:8998/payload', method='POST', body=body, meta={'download_maxsize': 50})
yield crawler.crawl(seed=request)
failure = crawler.spider.meta['failure']
# download_maxsize < 100, hence the CancelledError
self.assertIsInstance(failure.value, defer.CancelledError)
if six.PY2:
request.headers.setdefault(b'Accept-Encoding', b'gzip,deflate')
request = request.replace(url='http://localhost:8998/xpayload')
yield crawler.crawl(seed=request)
# download_maxsize = 50 is enough for the gzipped response
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
else:
# See issue https://twistedmatrix.com/trac/ticket/8175
raise unittest.SkipTest("xpayload only enabled for PY2")
else:
raise unittest.SkipTest("xpayload and payload endpoint only enabled for twisted > 12.3.0")
class UriResource(resource.Resource):
"""Return the full uri that was requested"""
def getChild(self, path, request):
return self
def render(self, request):
return request.uri
class HttpProxyTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
site = server.Site(UriResource(), timeout=None)
wrapper = WrappingFactory(site)
self.port = reactor.listenTCP(0, wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download_with_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, b'http://example.com')
http_proxy = self.getURL('')
request = Request('http://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_with_proxy_https_noconnect(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, b'https://example.com')
http_proxy = '%s?noconnect' % self.getURL('')
request = Request('https://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_without_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, b'/path/to/resource')
request = Request(self.getURL('path/to/resource'))
return self.download_request(request, Spider('foo')).addCallback(_test)
class DeprecatedHttpProxyTestCase(unittest.TestCase):
"""Old deprecated reference to http10 downloader handler"""
download_handler_cls = HttpDownloadHandler
class Http10ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP10DownloadHandler
class Http11ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP11DownloadHandler
if twisted_version < (11, 1, 0):
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
@defer.inlineCallbacks
def test_download_with_proxy_https_timeout(self):
""" Test TunnelingTCP4ClientEndpoint """
http_proxy = self.getURL('')
domain = 'https://no-such-domain.nosuch'
request = Request(
domain, meta={'proxy': http_proxy, 'download_timeout': 0.2})
d = self.download_request(request, Spider('foo'))
timeout = yield self.assertFailure(d, error.TimeoutError)
self.assertIn(domain, timeout.osError)
class HttpDownloadHandlerMock(object):
def __init__(self, settings):
pass
def download_request(self, request, spider):
return request
class S3AnonTestCase(unittest.TestCase):
try:
import boto
except ImportError:
skip = 'missing boto library'
def setUp(self):
self.s3reqh = S3DownloadHandler(Settings(),
httpdownloadhandler=HttpDownloadHandlerMock,
#anon=True, # is implicit
)
self.download_request = self.s3reqh.download_request
self.spider = Spider('foo')
def test_anon_request(self):
req = Request('s3://aws-publicdatasets/')
httpreq = self.download_request(req, self.spider)
self.assertEqual(hasattr(self.s3reqh.conn, 'anon'), True)
self.assertEqual(self.s3reqh.conn.anon, True)
class S3TestCase(unittest.TestCase):
download_handler_cls = S3DownloadHandler
try:
import boto
except ImportError:
skip = 'missing boto library'
# test use same example keys than amazon developer guide
# http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf
# and the tests described here are the examples from that manual
AWS_ACCESS_KEY_ID = '0PN5J17HBGZHT7JJ3X82'
AWS_SECRET_ACCESS_KEY = 'uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o'
def setUp(self):
s3reqh = S3DownloadHandler(Settings(), self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
httpdownloadhandler=HttpDownloadHandlerMock)
self.download_request = s3reqh.download_request
self.spider = Spider('foo')
def test_request_signing1(self):
# gets an object from the johnsmith bucket.
req = Request('s3://johnsmith/photos/puppy.jpg',
headers={'Date': 'Tue, 27 Mar 2007 19:36:42 +0000'})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=')
def test_request_signing2(self):
# puts an object into the johnsmith bucket.
req = Request('s3://johnsmith/photos/puppy.jpg', method='PUT', headers={
'Content-Type': 'image/jpeg',
'Date': 'Tue, 27 Mar 2007 21:15:45 +0000',
'Content-Length': '94328',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=')
def test_request_signing3(self):
# lists the content of the johnsmith bucket.
req = Request('s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy', \
method='GET', headers={
'User-Agent': 'Mozilla/5.0',
'Date': 'Tue, 27 Mar 2007 19:42:41 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=')
def test_request_signing4(self):
# fetches the access control policy sub-resource for the 'johnsmith' bucket.
req = Request('s3://johnsmith/?acl', \
method='GET', headers={'Date': 'Tue, 27 Mar 2007 19:44:46 +0000'})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=')
def test_request_signing5(self):
# deletes an object from the 'johnsmith' bucket using the
# path-style and Date alternative.
req = Request('s3://johnsmith/photos/puppy.jpg', \
method='DELETE', headers={
'Date': 'Tue, 27 Mar 2007 21:20:27 +0000',
'x-amz-date': 'Tue, 27 Mar 2007 21:20:26 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=')
def test_request_signing6(self):
# uploads an object to a CNAME style virtual hosted bucket with metadata.
req = Request('s3://static.johnsmith.net:8080/db-backup.dat.gz', \
method='PUT', headers={
'User-Agent': 'curl/7.15.5',
'Host': 'static.johnsmith.net:8080',
'Date': 'Tue, 27 Mar 2007 21:06:08 +0000',
'x-amz-acl': 'public-read',
'content-type': 'application/x-download',
'Content-MD5': '4gJE4saaMU4BqNR0kLY+lw==',
'X-Amz-Meta-ReviewedBy': '[email protected],[email protected]',
'X-Amz-Meta-FileChecksum': '0x02661779',
'X-Amz-Meta-ChecksumAlgorithm': 'crc32',
'Content-Disposition': 'attachment; filename=database.dat',
'Content-Encoding': 'gzip',
'Content-Length': '5913339',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=')
def test_request_signing7(self):
# ensure that spaces are quoted properly before signing
req = Request(
("s3://johnsmith/photos/my puppy.jpg"
"?response-content-disposition=my puppy.jpg"),
method='GET',
headers={
'Date': 'Tue, 27 Mar 2007 19:42:41 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(
httpreq.headers['Authorization'],
'AWS 0PN5J17HBGZHT7JJ3X82:+CfvG8EZ3YccOrRVMXNaK2eKZmM=')
class FTPTestCase(unittest.TestCase):
username = "scrapy"
password = "passwd"
if twisted_version < (10, 2, 0):
skip = "Twisted pre 10.2.0 doesn't allow to set home path other than /home"
if six.PY3:
skip = "Twisted missing ftp support for PY3"
def setUp(self):
from twisted.protocols.ftp import FTPRealm, FTPFactory
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
# setup dirs and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
userdir = os.path.join(self.directory, self.username)
os.mkdir(userdir)
fp = FilePath(userdir)
fp.child('file.txt').setContent("I have the power!")
fp.child('file with spaces.txt').setContent("Moooooooooo power!")
# setup server
realm = FTPRealm(anonymousRoot=self.directory, userHome=self.directory)
p = portal.Portal(realm)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = FTPFactory(portal=p)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def _add_test_callbacks(self, deferred, callback=None, errback=None):
def _clean(data):
self.download_handler.client.transport.loseConnection()
return data
deferred.addCallback(_clean)
if callback:
deferred.addCallback(callback)
if errback:
deferred.addErrback(errback)
return deferred
def test_ftp_download_success(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'I have the power!')
self.assertEqual(r.headers, {'Local Filename': [''], 'Size': ['17']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_path_with_spaces(self):
request = Request(
url="ftp://127.0.0.1:%s/file with spaces.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password}
)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'Moooooooooo power!')
self.assertEqual(r.headers, {'Local Filename': [''], 'Size': ['18']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_notexist(self):
request = Request(url="ftp://127.0.0.1:%s/notexist.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 404)
return self._add_test_callbacks(d, _test)
def test_ftp_local_filename(self):
local_fname = "/tmp/file.txt"
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password, "ftp_local_filename": local_fname})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.body, local_fname)
self.assertEqual(r.headers, {'Local Filename': ['/tmp/file.txt'], 'Size': ['17']})
self.assertTrue(os.path.exists(local_fname))
with open(local_fname) as f:
self.assertEqual(f.read(), "I have the power!")
os.remove(local_fname)
return self._add_test_callbacks(d, _test)
def test_invalid_credentials(self):
from twisted.protocols.ftp import ConnectionLost
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": 'invalid'})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.type, ConnectionLost)
return self._add_test_callbacks(d, errback=_test)
|
|
"""
This module provide GUI for the neutron scattering length density calculator
"""
import wx
import math
import sys
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sasgui.guiframe.utils import format_number
from sas.sasgui.guiframe.utils import check_float
from sas.sasgui.guiframe.events import StatusEvent
# the calculator default value for wavelength is 6
#import periodictable
from periodictable import formula
from periodictable.xsf import xray_energy
from periodictable.xsf import xray_sld_from_atoms
from periodictable.nsf import neutron_scattering
from sas.sasgui.perspectives.calculator import calculator_widgets as widget
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
WAVELENGTH = 6.0
_BOX_WIDTH = 76
_STATICBOX_WIDTH = 350
_SCALE = 1e-6
#SLD panel size
if sys.platform.count("win32") > 0:
PANEL_TOP = 0
_STATICBOX_WIDTH = 350
PANEL_SIZE = 400
FONT_VARIANT = 0
else:
PANEL_TOP = 60
_STATICBOX_WIDTH = 380
PANEL_SIZE = 410
FONT_VARIANT = 1
class SldPanel(wx.Panel, PanelBase):
"""
Provides the SLD calculator GUI.
"""
## Internal nickname for the window, used by the AUI manager
window_name = "SLD Calculator"
## Name to appear on the window title bar
window_caption = "SLD Calculator"
## Flag to tell the AUI manager to put this panel in the center pane
CENTER_PANE = True
def __init__(self, parent, base=None, *args, **kwds):
"""
"""
wx.Panel.__init__(self, parent, *args, **kwds)
PanelBase.__init__(self)
#Font size
self.SetWindowVariant(variant=FONT_VARIANT)
# Object that receive status event
self.base = base
self.neutron_wavelength = WAVELENGTH
self.xray_source_input = WAVELENGTH
self.parent = parent
#layout attribute
self.compound_ctl = None
self.density_ctl = None
self.compound = ""
self.density = ""
self.neutron_wavelength_ctl = None
self.xray_source_input_ctl = None
self.xray_cbox = None
self.neutron_sld_real_ctl = None
self.neutron_sld_im_ctl = None
self.xray_sld_real_ctl = None
self.xray_sld_im_ctl = None
self.neutron_abs_ctl = None
self.neutron_inc_ctl = None
self.neutron_length_ctl = None
self.button_calculate = None
self.xray_source = None
#Draw the panel
self._do_layout()
self.SetAutoLayout(True)
self.Layout()
self.fill_xray_cbox()
def _do_layout(self):
"""
Draw window content
"""
unit_a = '[A]'
unit_density = '[g/cm^(3)]'
unit_sld = '[1/A^(2)]'
unit_cm1 = '[1/cm]'
unit_cm = '[cm]'
sizer_input = wx.GridBagSizer(5, 5)
sizer_output = wx.GridBagSizer(5, 5)
sizer_button = wx.BoxSizer(wx.HORIZONTAL)
sizer1 = wx.BoxSizer(wx.HORIZONTAL)
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
sizer3 = wx.BoxSizer(wx.HORIZONTAL)
#---------inputs----------------
inputbox = wx.StaticBox(self, -1, "Input")
boxsizer1 = wx.StaticBoxSizer(inputbox, wx.VERTICAL)
boxsizer1.SetMinSize((_STATICBOX_WIDTH, -1))
compound_txt = wx.StaticText(self, -1, 'Compound ')
self.compound_ctl = wx.TextCtrl(self, -1, size=(_BOX_WIDTH * 2, -1))
density_txt = wx.StaticText(self, -1, 'Density ')
self.density_ctl = wx.TextCtrl(self, -1, size=(_BOX_WIDTH, -1))
unit_density_txt = wx.StaticText(self, -1, unit_density)
neutron_wavelength_txt = wx.StaticText(self, -1, 'Neutron wavelength')
self.neutron_wavelength_ctl = wx.TextCtrl(self, -1, size=(_BOX_WIDTH, -1))
self.neutron_wavelength_ctl.SetValue(str(self.neutron_wavelength))
self.xray_source_input_txt = wx.StaticText(self, -1, 'X-ray wavelength')
self.xray_source_input_ctl = wx.TextCtrl(self, -1, size=(_BOX_WIDTH, -1))
self.xray_source_input_ctl.SetValue(str(self.xray_source_input))
neutron_unit_a_txt = wx.StaticText(self, -1, unit_a)
self.xray_cbox = wx.ComboBox(self, -1, size=(70, 20), style=wx.CB_READONLY)
xray_cbox_tip = "Select an element, wavelength or energy"
self.xray_cbox.SetToolTipString(xray_cbox_tip)
wx.EVT_COMBOBOX(self.xray_cbox, -1, self.on_select_xray)
iy = 0
ix = 0
sizer_input.Add(compound_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_input.Add(self.compound_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_input.Add(density_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_input.Add(self.density_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_input.Add(unit_density_txt, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_input.Add(neutron_wavelength_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_input.Add(self.neutron_wavelength_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_input.Add(neutron_unit_a_txt, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_input.Add(self.xray_source_input_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_input.Add(self.xray_source_input_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_input.Add(self.xray_cbox, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
boxsizer1.Add(sizer_input)
sizer1.Add(boxsizer1, 0, wx.EXPAND | wx.ALL, 10)
#---------Outputs sizer--------
outputbox = wx.StaticBox(self, -1, "Output")
boxsizer2 = wx.StaticBoxSizer(outputbox, wx.VERTICAL)
boxsizer2.SetMinSize((_STATICBOX_WIDTH, -1))
i_complex = '- i'
neutron_sld_txt = wx.StaticText(self, -1, 'Neutron SLD')
self.neutron_sld_real_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.neutron_sld_real_ctl.SetEditable(False)
self.neutron_sld_real_ctl.SetToolTipString("Neutron SLD real")
self.neutron_sld_im_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.neutron_sld_im_ctl.SetEditable(False)
self.neutron_sld_im_ctl.SetToolTipString("Neutron SLD imaginary")
neutron_sld_units_txt = wx.StaticText(self, -1, unit_sld)
xray_sld_txt = wx.StaticText(self, -1, 'X-ray SLD')
self.xray_sld_real_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.xray_sld_real_ctl.SetEditable(False)
self.xray_sld_real_ctl.SetToolTipString("X-ray SLD real")
self.xray_sld_im_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.xray_sld_im_ctl.SetEditable(False)
self.xray_sld_im_ctl.SetToolTipString("X-ray SLD imaginary")
xray_sld_units_txt = wx.StaticText(self, -1, unit_sld)
neutron_inc_txt = wx.StaticText(self, -1, 'Neutron Inc. Xs')
self.neutron_inc_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.neutron_inc_ctl.SetEditable(False)
self.neutron_inc_ctl.SetToolTipString("Neutron Inc. Xs")
neutron_inc_units_txt = wx.StaticText(self, -1, unit_cm1)
neutron_abs_txt = wx.StaticText(self, -1, 'Neutron Abs. Xs')
self.neutron_abs_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.neutron_abs_ctl.SetEditable(False)
self.neutron_abs_ctl.SetToolTipString("Neutron Abs. Xs")
neutron_abs_units_txt = wx.StaticText(self, -1, unit_cm1)
neutron_length_txt = wx.StaticText(self, -1, 'Neutron 1/e length')
self.neutron_length_ctl = wx.TextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
self.neutron_length_ctl.SetEditable(False)
self.neutron_length_ctl.SetToolTipString("Neutron 1/e length")
neutron_length_units_txt = wx.StaticText(self, -1, unit_cm)
iy = 0
ix = 0
sizer_output.Add(neutron_sld_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_output.Add(self.neutron_sld_real_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_output.Add(wx.StaticText(self, -1, i_complex),
(iy, ix), (1, 1), wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_output.Add(self.neutron_sld_im_ctl,
(iy, ix), (1, 1), wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_output.Add(neutron_sld_units_txt,
(iy, ix), (1, 1), wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_output.Add(xray_sld_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_output.Add(self.xray_sld_real_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_output.Add(wx.StaticText(self, -1, i_complex),
(iy, ix), (1, 1), wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_output.Add(self.xray_sld_im_ctl,
(iy, ix), (1, 1), wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 1
sizer_output.Add(xray_sld_units_txt,
(iy, ix), (1, 1), wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_output.Add(neutron_inc_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_output.Add(self.neutron_inc_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 2
sizer_output.Add(neutron_inc_units_txt, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_output.Add(neutron_abs_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_output.Add(self.neutron_abs_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 2
sizer_output.Add(neutron_abs_units_txt, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
iy += 1
ix = 0
sizer_output.Add(neutron_length_txt, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_output.Add(self.neutron_length_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
ix += 2
sizer_output.Add(neutron_length_units_txt, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
boxsizer2.Add(sizer_output)
sizer2.Add(boxsizer2, 0, wx.EXPAND | wx.ALL, 10)
#-----Button sizer------------
id = wx.NewId()
self.button_calculate = wx.Button(self, id, "Calculate")
self.button_calculate.SetToolTipString("Calculate SLD.")
self.Bind(wx.EVT_BUTTON, self.calculateSld, id=id)
id = wx.NewId()
self.button_help = wx.Button(self, id, "HELP")
self.button_help.SetToolTipString("help on SLD calculator.")
self.Bind(wx.EVT_BUTTON, self.on_help, id=id)
self.button_close = wx.Button(self, wx.ID_CANCEL, 'Close')
self.button_close.Bind(wx.EVT_BUTTON, self.on_close)
self.button_close.SetToolTipString("Close this window.")
sizer_button.Add((150, 20), 1, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
sizer_button.Add(self.button_calculate, 0, wx.RIGHT | wx.ADJUST_MINSIZE, 20)
sizer_button.Add(self.button_help, 0, wx.RIGHT | wx.ADJUST_MINSIZE, 20)
sizer_button.Add(self.button_close, 0, wx.RIGHT | wx.ADJUST_MINSIZE, 20)
sizer3.Add(sizer_button)
#---------layout----------------
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(sizer1)
vbox.Add(sizer2)
vbox.Add(sizer3)
vbox.Fit(self)
self.SetSizer(vbox)
def fill_xray_cbox(self):
"""
fill the x-ray combobox with the sources
"""
source_list = ['[A]', '[keV]', 'Element']
for source in source_list:
pos = self.xray_cbox.Append(str(source))
self.xray_cbox.SetClientData(pos, str(source.strip()))
self.xray_cbox.SetSelection(0)
self.xray_source = source_list[0]
def on_select_xray(self, event=None):
"""
On Selecting a source
"""
item = event.GetEventObject()
self.xray_source = item.GetValue().strip()
if self.xray_source == "[A]":
self.xray_source_input_txt.SetLabel("X-ray wavelength")
elif self.xray_source == "[keV]":
self.xray_source_input_txt.SetLabel("X-ray energy")
elif self.xray_source == "Element":
self.xray_source_input_txt.SetLabel("X-ray source")
def on_help(self, event):
"""
Bring up the SLD Documentation whenever
the HELP button is clicked.
Calls DocumentationWindow with the path of the location within the
documentation tree (after /doc/ ....". Note that when using old
versions of Wx (before 2.9) and thus not the release version of
installers, the help comes up at the top level of the file as
webbrowser does not pass anything past the # to the browser when it is
running "file:///...."
:param evt: Triggers on clicking the help button
"""
_TreeLocation = "user/sasgui/perspectives/calculator/"
_TreeLocation += "sld_calculator_help.html"
_doc_viewer = DocumentationWindow(self, -1, _TreeLocation, "",
"General Scattering Calculator Help")
def on_close(self, event):
"""
close the window containing this panel
"""
self.parent.Close()
def calculate_xray_sld(self, element):
"""
Get an element and compute the corresponding SLD for a given formula
:param element: elements a string of existing atom
"""
myformula = formula(str(element))
if len(myformula.atoms) != 1:
return
element = myformula.atoms.keys()[0]
energy = xray_energy(element.K_alpha)
self.sld_formula = formula(str(self.compound), density=self.density)
atom = self.sld_formula.atoms
return xray_sld_from_atoms(atom, density=self.density, energy=energy)
def check_inputs(self):
"""Check validity user inputs"""
flag = True
msg = ""
if check_float(self.density_ctl):
self.density = float(self.density_ctl.GetValue())
else:
flag = False
msg += "Error for Density value :expect float"
self.neutron_wavelength = self.neutron_wavelength_ctl.GetValue()
self.xray_source_input = self.xray_source_input_ctl.GetValue()
if str(self.neutron_wavelength).lstrip().rstrip() == "":
self.neutron_wavelength = WAVELENGTH
self.neutron_wavelength_ctl.SetValue(str(WAVELENGTH))
self.neutron_wavelength_ctl.SetBackgroundColour(wx.WHITE)
self.neutron_wavelength_ctl.Refresh()
msg += "Default value for wavelength is 6.0"
else:
if check_float(self.neutron_wavelength_ctl):
self.neutron_wavelength = float(self.neutron_wavelength)
else:
flag = False
msg += "Error for wavelength value :expect float"
if str(self.xray_source_input).lstrip().rstrip() == "":
self.xray_source_input = WAVELENGTH
self.xray_source_input_ctl.SetValue(str(WAVELENGTH))
self.xray_source_input_ctl.SetBackgroundColour(wx.WHITE)
self.xray_source_input_ctl.Refresh()
msg += "Default value for wavelength is 6.0"
else:
if (self.xray_source == '[A]') or (self.xray_source == '[keV]'):
if check_float(self.xray_source_input_ctl):
self.xray_source_input = float(self.xray_source_input)
else:
flag = False
msg += "Error for wavelength value :expect float"
elif (self.xray_source == 'Element'):
try:
import periodictable
exec("periodictable." + self.xray_source_input)
except AttributeError:
flag = False
msg += "X-ray element supplied isn't in the database"
self.compound = self.compound_ctl.GetValue().lstrip().rstrip()
if self.compound != "":
try :
formula(self.compound)
self.compound_ctl.SetBackgroundColour(wx.WHITE)
self.compound_ctl.Refresh()
except:
self.compound_ctl.SetBackgroundColour("pink")
self.compound_ctl.Refresh()
flag = False
msg += "Enter correct formula"
else:
self.compound_ctl.SetBackgroundColour("pink")
self.compound_ctl.Refresh()
flag = False
msg += "Enter a formula"
return flag, msg
def calculate_sld_helper(self, element, density, molecule_formula):
"""
Get an element and compute the corresponding SLD for a given formula
:param element: elements a string of existing atom
"""
element_formula = formula(str(element))
if len(element_formula.atoms) != 1:
return
element = element_formula.atoms.keys()[0]
energy = xray_energy(element.K_alpha)
atom = molecule_formula.atoms
return xray_sld_from_atoms(atom, density=density, energy=energy)
def calculateSld(self, event):
"""
Calculate the neutron scattering density length of a molecule
"""
self.clear_outputs()
try:
#Check validity user inputs
flag, msg = self.check_inputs()
if self.base is not None and msg.lstrip().rstrip() != "":
msg = "SLD Calculator: %s" % str(msg)
wx.PostEvent(self.base, StatusEvent(status=msg))
if not flag:
return
#get ready to compute
self.sld_formula = formula(self.compound,
density=self.density)
(sld_real, sld_im, _), (_, absorp, incoh), \
length = neutron_scattering(compound=self.compound,
density=self.density,
wavelength=self.neutron_wavelength)
if self.xray_source == "[A]":
energy = xray_energy(self.xray_source_input)
xray_real, xray_im = xray_sld_from_atoms(self.sld_formula.atoms,
density=self.density,
energy=energy)
elif self.xray_source == "[keV]":
xray_real, xray_im = xray_sld_from_atoms(self.sld_formula.atoms,
density=self.density,
energy=self.xray_source_input)
elif self.xray_source == "Element":
xray_real, xray_im = self.calculate_sld_helper(element=self.xray_source_input,
density=self.density,
molecule_formula=self.sld_formula)
# set neutron sld values
val = format_number(sld_real * _SCALE)
self.neutron_sld_real_ctl.SetValue(val)
val = format_number(math.fabs(sld_im) * _SCALE)
self.neutron_sld_im_ctl.SetValue(val)
# Compute the Cu SLD
self.xray_sld_real_ctl.SetValue(format_number(xray_real * _SCALE))
val = format_number(math.fabs(xray_im) * _SCALE)
self.xray_sld_im_ctl.SetValue(val)
# set incoherence and absorption
self.neutron_inc_ctl.SetValue(format_number(incoh))
self.neutron_abs_ctl.SetValue(format_number(absorp))
# Neutron length
self.neutron_length_ctl.SetValue(format_number(length))
# display wavelength
#self.wavelength_ctl.SetValue(str(self.wavelength))
#self.wavelength_ctl.SetValue(str(self.wavelength))
except:
if self.base is not None:
msg = "SLD Calculator: %s" % (sys.exc_value)
wx.PostEvent(self.base, StatusEvent(status=msg))
if event is not None:
event.Skip()
def clear_outputs(self):
"""
Clear the outputs textctrl
"""
self.neutron_sld_real_ctl.SetValue("")
self.neutron_sld_im_ctl.SetValue("")
self.xray_sld_real_ctl.SetValue("")
self.xray_sld_im_ctl.SetValue("")
self.neutron_abs_ctl.SetValue("")
self.neutron_inc_ctl.SetValue("")
self.neutron_length_ctl.SetValue("")
class SldWindow(widget.CHILD_FRAME):
"""
"""
def __init__(self, parent=None, title="SLD Calculator",
base=None, manager=None,
size=(PANEL_SIZE, PANEL_SIZE), *args, **kwds):
"""
"""
kwds['title'] = title
kwds['size'] = size
widget.CHILD_FRAME.__init__(self, parent, *args, **kwds)
"""
"""
self.parent = parent
self.base = base
self.manager = manager
self.panel = SldPanel(self, base=base)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.SetPosition((wx.LEFT, PANEL_TOP))
self.Show(True)
def on_close(self, event):
"""
On close event
"""
if self.manager is not None:
self.manager.sld_frame = None
self.Destroy()
class ViewApp(wx.App):
"""
"""
def OnInit(self):
"""
"""
widget.CHILD_FRAME = wx.Frame
frame = SldWindow(None, title='SLD Calculator')
frame.Show(True)
self.SetTopWindow(frame)
return True
if __name__ == "__main__":
app = ViewApp(0)
app.MainLoop()
|
|
"""
Base classes are defined here.
"""
__docformat__='reStructuredText'
__author__ = 'Anand Patil, [email protected]'
import os, sys, pdb
import numpy as np
import types
def logp_of_set(s):
exc = None
logp = 0.
for obj in s:
try:
logp += obj.logp
except:
cls, inst, tb = sys.exc_info()
if cls is ZeroProbability:
raise cls, inst, tb
elif exc is None:
exc = (cls, inst, tb)
if exc is None:
return logp
else:
raise exc[0], exc[1], exc[2]
def batchsd(trace, batches=5):
"""
Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated.
"""
if len(np.shape(trace)) > 1:
dims = np.shape(trace)
#ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
ttrace = np.transpose([t.ravel() for t in trace])
return np.reshape([batchsd(t, batches) for t in ttrace], dims[1:])
else:
if batches == 1: return np.std(trace)/np.sqrt(len(trace))
try:
batched_traces = np.resize(trace, (batches, len(trace)/batches))
except ValueError:
# If batches do not divide evenly, trim excess samples
resid = len(trace) % batches
batched_traces = np.resize(trace[:-resid], (batches, len(trace)/batches))
means = np.mean(batched_traces, 1)
return np.std(means)/np.sqrt(batches)
class ZeroProbability(ValueError):
"Log-probability is undefined or negative infinity"
pass
class Node(object):
"""
The base class for Stochastic, Deterministic and Potential.
:Parameters:
doc : string
The docstring for this node.
name : string
The name of this node.
parents : dictionary
A dictionary containing the parents of this node.
cache_depth : integer
An integer indicating how many of this node's
value computations should be 'memorized'.
verbose (optional) : integer
Level of output verbosity: 0=none, 1=low, 2=medium, 3=high
.. seealso::
:class:`Stochastic`
The class defining *random* variables, or unknown parameters.
:class:`Deterministic`
The class defining deterministic values, ie the result of a function.
:class:`Potential`
An arbitrary log-probability term to multiply into the joint
distribution.
:class:`Variable`
The base class for :class:`Stochastics` and :class:`Deterministics`.
"""
def __init__(self, doc, name, parents, cache_depth, verbose=None):
# Name and docstrings
self.__doc__ = doc
self.__name__ = name
# Level of feedback verbosity
self.verbose = verbose
# Number of memorized values
self._cache_depth = cache_depth
# Initialize
self.parents = parents
def _get_parents(self):
# Get parents of this object
return self._parents
def _set_parents(self, new_parents):
# Define parents of this object
# Remove from current parents
if hasattr(self,'_parents'):
self._parents.detach_children()
# Specify new parents
self._parents = self.ParentDict(regular_dict = new_parents, owner = self)
# Add self as child of parents
self._parents.attach_parents()
# Get new lazy function
self.gen_lazy_function()
parents = property(_get_parents, _set_parents, doc="Self's parents: the variables referred to in self's declaration.")
def __str__(self):
return self.__repr__()
def __repr__(self):
return object.__repr__(self).replace(' object ', " '%s' "%self.__name__)
def gen_lazy_function(self):
pass
class Variable(Node):
"""
The base class for Stochastics and Deterministics.
:Parameters:
doc : string
The docstring for this node.
name : string
The name of this node.
parents : dictionary
A dictionary containing the parents of this node.
cache_depth : integer
An integer indicating how many of this node's
value computations should be 'memorized'.
trace : boolean
Indicates whether a trace should be kept for this variable
if its model is fit using a Monte Carlo method.
plot : boolean
Indicates whether summary plots should be prepared for this
variable if summary plots of its model are requested.
dtype : numpy dtype
If the value of this variable's numpy dtype can be known in
advance, it is advantageous to specify it here.
verbose (optional) : integer
Level of output verbosity: 0=none, 1=low, 2=medium, 3=high
:SeeAlso:
Stochastic, Deterministic, Potential, Node
"""
def __init__(self, doc, name, parents, cache_depth, trace=False, dtype=None, plot=None, verbose=None):
self.dtype=dtype
self.trace=trace
self._plot=plot
self.children = set()
self.extended_children = set()
Node.__init__(self, doc, name, parents, cache_depth, verbose=verbose)
if self.dtype is None:
if hasattr(self.value, 'dtype'):
self.dtype = self.value.dtype
else:
self.dtype = np.dtype(type(self.value))
def __str__(self):
return self.__name__
def _get_plot(self):
# Get plotting flag
return self._plot
def _set_plot(self, true_or_false):
# Set plotting flag
self._plot = true_or_false
plot = property(_get_plot, _set_plot, doc='A flag indicating whether self should be plotted.')
def stats(self, alpha=0.05, start=0, batches=100):
"""
Generate posterior statistics for node.
"""
from utils import hpd, quantiles
from numpy import sqrt
try:
trace = np.squeeze(np.array(self.trace(), float)[start:])
n = len(trace)
if not n:
print 'Cannot generate statistics for zero-length trace in', self.__name__
return
return {
'n': n,
'standard deviation': trace.std(0),
'mean': trace.mean(0),
'%s%s HPD interval' % (int(100*(1-alpha)),'%'): hpd(trace, alpha),
'mc error': batchsd(trace, batches),
'quantiles': quantiles(trace)
}
except:
print 'Could not generate output statistics for', self.__name__
return
ContainerRegistry = []
class ContainerMeta(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
def change_method(self, *args, **kwargs):
raise NotImplementedError, name + ' instances cannot be changed.'
if cls.register:
ContainerRegistry.append((cls, cls.containing_classes))
for meth in cls.change_methods:
setattr(cls, meth, types.UnboundMethodType(change_method, None, cls))
cls.register=False
class ContainerBase(object):
"""
Abstract base class.
:SeeAlso:
ListContainer, SetContainer, DictContainer, TupleContainer, ArrayContainer
"""
register = False
__metaclass__ = ContainerMeta
change_methods = []
containing_classes = []
def __init__(self, input):
# ContainerBase class initialization
# Look for name attributes
if hasattr(input, '__file__'):
_filename = os.path.split(input.__file__)[-1]
self.__name__ = os.path.splitext(_filename)[0]
elif hasattr(input, '__name__'):
self.__name__ = input.__name__
else:
try:
self.__name__ = input['__name__']
except:
self.__name__ = 'container'
def assimilate(self, new_container):
self.containers.append(new_container)
self.variables.update(new_container.variables)
self.stochastics.update(new_container.stochastics)
self.potentials.update(new_container.potentials)
self.deterministics.update(new_container.deterministics)
self.observed_stochastics.update(new_container.observed_stochastics)
def _get_logp(self):
# Return total log-probabilities from all elements
return logp_of_set(self.stochastics | self.potentials | self.observed_stochastics)
# Define log-probability property
logp = property(_get_logp, doc='The summed log-probability of all stochastic variables (data\nor otherwise) and factor potentials in self.')
StochasticRegistry = []
class StochasticMeta(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
StochasticRegistry.append(cls)
class StochasticBase(Variable):
"""
Abstract base class.
:SeeAlso:
Stochastic, Variable
"""
__metaclass__ = StochasticMeta
DeterministicRegistry = []
class DeterministicMeta(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
DeterministicRegistry.append(cls)
class DeterministicBase(Variable):
"""
Abstract base class.
:SeeAlso:
Deterministic, Variable
"""
__metaclass__ = DeterministicMeta
PotentialRegistry = []
class PotentialMeta(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
PotentialRegistry.append(cls)
class PotentialBase(Node):
"""
Abstract base class.
:SeeAlso:
Potential, Variable
"""
__metaclass__ = PotentialMeta
|
|
import Op, Interface
import numpy as np
from GCore import Calibrate
class Cameras(Op.Op):
def __init__(self, name='/Calibrate Cameras', locations='', detections='', x3ds='', solve_focal_length=True, solve_distortion=True,
error_threshold=0.05, min_samples=100, jumpFrames=5, showDetections=False, frameRange=''):
fields = [
('name', 'Name', 'Name', 'string', name, {}),
('locations', 'Camera locations', 'Camera locations', 'string', locations, {}),
('detections', '2D Detections', '2D Detections', 'string', detections, {}),
('x3ds', '3D Points', '3D Points', 'string', x3ds, {}),
('solve_focal_length', 'Solve Focal Length', 'Solve Focal Length', 'bool', solve_focal_length, {}),
('solve_distortion', 'Solve Distortion', 'Solve Distortion', 'bool', solve_distortion, {}),
('error_threshold', 'Error Threshold', 'Error Threshold', 'float', error_threshold, {}),
('min_samples', 'Min. samples', 'Min. samples to solve distortion', 'int', min_samples, {}),
('jumpFrames', 'Jump Frames', 'Handle every Nth frame', 'int', jumpFrames, {}),
('showDetections', 'Show detections', 'Show all collected detections', 'bool', showDetections, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, fields)
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']):
interface.setAttr('updateMats', False)
return
# We need 2D data e.g. wand detections from a wand op
# We need 3D wand data from e.g. c3d or a 3D wand detector
dets_location = attrs['detections']
x3ds_location = attrs['x3ds']
if not dets_location or not x3ds_location: return
# Get the 2D and 3D data
x2ds = interface.attr('rx2ds', atLocation=dets_location)
x2d_splits = interface.attr('x2ds_splits', atLocation=dets_location)
x3ds = interface.attr('x3ds', atLocation=x3ds_location)
if x2ds is None or x2d_splits is None or x3ds is None: return
numCameras = len(x2d_splits) - 1
error_threshold = attrs['error_threshold']
# Get the data we've collected already so we can add to it
frame = interface.frame()
dets_colours = interface.attr('x2ds_colours', atLocation=dets_location)
collectedDets = interface.attr('collect_rx2ds')
collectedX3ds = interface.attr('collect_x3ds')
lastFrame = interface.attr('lastFrame', [frame] * numCameras)
emptyFrame3d = np.array([[]], dtype=np.float32).reshape(-1, 3)
# This is potentially used by other ops so we only set it when we have some confidence
# (and we might reset or tweak the values to indicate confidence levels at some point)
cameraErrors = interface.attr('cameraErrors', [-1] * numCameras)
# This is never modified to allow checking the camera rms values regardless of what we make of them
rmsValues = interface.attr('rms', [-1] * numCameras)
# Get the width and height for the videos
vwidth = interface.attr('vwidth', [1920] * numCameras)
vheight = interface.attr('vheight', [1080] * numCameras)
# Get the frame mapping for x3ds
x3ds_frames = interface.attr('x3ds_frames', {})
x2ds_frames = interface.attr('x2ds_frames', [[] for i in xrange(numCameras)])
# Get the camera matrices. We initialise them with default settings if we don't find any
mats = interface.attr('mats', atLocation=location)
if mats is None:
mats = []
for ci in range(numCameras):
mats.append(Calibrate.makeUninitialisedMat(ci, (vheight[ci], vwidth[ci])))
# Allow overriding the error threshold using an attribute (on the cooked location)
error_threshold_attr = interface.attr('error_threshold')
if error_threshold_attr is not None:
error_threshold = error_threshold_attr
Ps = interface.attr('Ps')
if Ps is None: Ps = [np.array([], dtype=np.float32) for n in range(numCameras)]
# Get the minimum number of samples we need to start solving distortion etc. as specified by the user
minSamples = attrs['min_samples']
# Prepare the collected data for further processing (or initialise if nothing has been collected)
if collectedDets is not None:
c_x2ds, c_splits = collectedDets
cams_collected = [c_x2ds[c0:c1] for ci, (c0, c1) in enumerate(zip(c_splits[:-1], c_splits[1:]))]
else:
cams_collected = [[] for ci, (c0, c1) in enumerate(zip(x2d_splits[:-1], x2d_splits[1:]))]
collectedX3ds = []
for ci, (c0, c1) in enumerate(zip(x2d_splits[:-1], x2d_splits[1:])):
collectedX3ds.append(emptyFrame3d)
# Process each camera by looking for a wand and attempt a calibration. If we're happy with the results we'll
# add it to our collection
for ci, (c0, c1) in enumerate(zip(x2d_splits[:-1], x2d_splits[1:])):
elapsed = frame - lastFrame[ci]
if 0 < elapsed < attrs['jumpFrames']: continue
# Get the 2Ds and 3Ds for the wand in this camera (if any)
cameraDetections = x2ds[c0:c1]
cameraX3ds = x3ds
if not cameraDetections.any() or not cameraX3ds.any(): continue
# Add the new detection to the existing collection as a candidate for a new collection
if cams_collected[ci] is None or len(cams_collected[ci]) == 0:
proposalDets, proposalX3ds = cameraDetections, cameraX3ds
else:
proposalDets = np.concatenate((cams_collected[ci], cameraDetections))
proposalX3ds = np.concatenate((collectedX3ds[ci], cameraX3ds))
# Check if we want to solve for distortion and focal length by looking at the number of samples
# we've got already compared to our minimum number of samples required
numSamples = len(proposalDets) / 5
# if numSamples == minSamples: self.logger.info('Camera %d reached min samples of %d' % (ci, minSamples))
solveTrigger = True if numSamples > minSamples else False
solve_focal_length = attrs['solve_focal_length'] if solveTrigger else False
solve_distortion = attrs['solve_distortion'] if solveTrigger else False
# The wand is assumed to have 5 points so we make sure we've got at least one wand before attempting
# to calibrate
if len(proposalDets) >= 5 and len(proposalX3ds) >= 5:
P, ks, rms = Calibrate.cv2_solve_camera_from_3d(proposalX3ds, proposalDets,
solve_focal_length=solve_focal_length,
solve_distortion=solve_distortion)
if ks[0] < -3. or ks[0] > 3.: ks[0] = 0.
if ks[1] < -3. or ks[1] > 3.: ks[1] = 0.
# This shouldn't' happen but if we lose confidence in the camera we can visualise it
# by resetting the camera error (this will change the colour in the UI)
if rms > error_threshold:
cameraErrors[ci] = -1
continue
# See how the rms for the calibration compares to the last recorded value for this camera
prevRms = rms if rmsValues[ci] == -1 else rmsValues[ci]
rmsDelta = rms - prevRms
# If the rms is lower than the last recorded error for this camera then
# we want to keep this data
if rmsDelta <= 0 or not solveTrigger:
cams_collected[ci] = proposalDets
collectedX3ds[ci] = proposalX3ds
if frame not in x3ds_frames:
x3ds_frames[frame] = proposalX3ds[-5:]
x2ds_frames[ci] += ([frame] * 5)
else:
continue
# Record the rms value for the camera
rmsValues[ci] = rms
# Once we've solved for distortion etc. we are more confident with the accuracy of our
# error so we start reporting it, where the value can be used for visualiation etc.
if solveTrigger: cameraErrors[ci] = rms
lastFrame[ci] = frame
# Everything has gone well so far so we create and add the new camera matrix
mat = Calibrate.makeMat(P, ks, (vheight[ci], vwidth[ci]))
mats[ci] = mat
Ps[ci] = P
# Concatenate the results from all the cameras
cams = [np.concatenate((cc)) for cc in cams_collected if len(cc)]
if not cams:
# We haven't found a wand in any camera so we just keep calm and return
return
# Build our collections and write to the interface
collectedDets = np.array(np.concatenate(cams), dtype=np.float32).reshape(-1, 2), \
Interface.makeSplitBoundaries(map(len, cams_collected))
interface.setAttr('collect_rx2ds', collectedDets)
interface.setAttr('collect_x3ds', collectedX3ds)
interface.setAttr('x2ds_frames', x2ds_frames)
interface.setAttr('x3ds_frames', x3ds_frames)
interface.setAttr('lastFrame', lastFrame)
# Write the calibration data to the interface and request an update at render time
interface.setAttr('mats', mats)
interface.setAttr('Ps', Ps)
interface.setAttr('rms', rmsValues)
interface.setAttr('cameraErrors', cameraErrors)
interface.setAttr('updateMats', True)
# Optionally display all the collected wand detections
if 'showDetections' in attrs and attrs['showDetections']:
colours = np.tile(dets_colours, (len(collectedDets[0]) / 5, 1))
allAttrs = {'x2ds': collectedDets[0], 'x2ds_splits': collectedDets[1],
'x2ds_colours': colours}
interface.createChild('collected', 'detections', attrs=allAttrs)
class WandCorrespondences(Op.Op):
def __init__(self, name='/Wand Correspondences', detections='', matsLocation=''):
fields = [
('name', 'Name', 'Name', 'string', name, {}),
('detections', 'Detections', 'Detections', 'string', detections, {}),
('matsLocation', 'Mats Location', 'Mats locations', 'string', matsLocation, {}),
]
super(self.__class__, self).__init__(name, fields)
def cook(self, location, interface, attrs):
detections = attrs['detections']
matsLocation = attrs['matsLocation']
if not detections or not matsLocation: return
wand_frames = interface.attr('x2ds', atLocation=detections)
print wand_frames[1:2]
vicon_mats = interface.attr('mats', atLocation=matsLocation)
vicon_solved = [not (m[1][1,3] == 0.0 and m[1][2,3] == 0.0 and m[1][0,3] != 0.0) for m in vicon_mats]
x2s_cameras, x3s_cameras, frames_cameras, num_kept_frames = Calibrate.generate_wand_correspondences(wand_frames, vicon_mats, vicon_solved)
# TODO: Finish this bit of code
import numpy as np
import scipy.optimize as sci_opt
from scipy.sparse import lil_matrix
from GCore import Calibrate
import cv2
import ISCV
'''
Implements Bundle Adjustment with Scipy
Cameras are represented as 1 floats, 3 rotation angles, 3 translations, focal length, 2 for optical centre and 2 distortions
Points3d are just a 3 vector in world space
Points2d are a 2 vector of the pixel location
camera_indices is the camera_ids for each observation
points_indices is the index of the 2d point for each observation
'''
def matToVec(P, distortion):
outVec = np.zeros(11, dtype=np.float32)
K, RT = Calibrate.decomposeKRT(P)
outVec[:3] = cv2.Rodrigues(RT[:3, :3])[0].ravel()
outVec[3:6] = RT[:3, 3]
outVec[6] = K[0, 0] # Focal Length
outVec[7:9] = distortion
outVec[9:] = K[:2, 2] # Optical Centre
return outVec
def vecToMat(vec):
f, k1, k2, ox, oy = vec[6:]
rot = vec[:3]
trans = vec[3:6]
K = np.eye(3)
K[[0,1],[0,1]] = f
K[:2, 2] = [ox, oy]
R = cv2.Rodrigues(rot)[0]
RT = np.zeros((3, 4), dtype=np.float32)
RT[:3, :3] = R
RT[:3, 3] = trans
P = Calibrate.composeKRT(K, RT)[:3,:]
return np.float32(P), (k1, k2)
def bundle_adjustment_sparsity(n_cameras, n_points, x2ds_splits, point_indices):
camera_indices = np.zeros(x2ds_splits[-1], dtype=int)
for i, (c0, c1) in enumerate(zip(x2ds_splits[:-1], x2ds_splits[1:])):
camera_indices[c0:c1] = i
m = camera_indices.size * 2
n = n_cameras * 11 + n_points * 3
A = lil_matrix((m, n), dtype=int)
i = np.arange(camera_indices.size)
for s in range(11):
A[2 * i, camera_indices * 11 + s] = 1
A[2 * i + 1, camera_indices * 11 + s] = 1
for s in range(3):
A[2 * i, n_cameras * 11 + point_indices * 3 + s] = 1
A[2 * i + 1, n_cameras * 11 + point_indices * 3 + s] = 1
return A
def errorFunction(X, n_cameras, n_points, x2d_splits, x2ds_labels, x2ds):
camera_params = X[:n_cameras * 11].reshape((n_cameras, 11))
x3ds = X[n_cameras * 11:].reshape((n_points, 3))
projected_x2ds = np.zeros_like(x2ds)
for camVec, c0, c1 in zip(camera_params, x2d_splits[:-1], x2d_splits[1:]):
P, distortion = vecToMat(camVec)
x3d_labels = np.int32([x2ds_labels[i] for i in xrange(c0, c1)])
proj_x2ds, proj_splits, proj_labels = ISCV.project(np.float32(x3ds[x3d_labels]), x3d_labels, np.float32([P]))
assert np.all(x3d_labels == proj_labels)
ISCV.distort_points(proj_x2ds, float(camVec[9]), float(camVec[10]), float(distortion[0]), float(distortion[1]), proj_x2ds)
projected_x2ds[c0:c1, :] = proj_x2ds
return (projected_x2ds - x2ds).ravel()
def printProblemDetails(n_cameras, n_points, x2ds):
n = 9 * n_cameras + 3 * n_points
m = 2 * x2ds.shape[0]
print("n_cameras: {}".format(n_cameras))
print("n_points: {}".format(n_points))
print("Total number of parameters: {}".format(n))
print("Total number of residuals: {}".format(m))
def adjustBundles(x3ds, x2ds, x2ds_splits, x2ds_labels, Ps, distortions):
n_cameras = x2ds_splits.shape[0] - 1
n_points = x3ds.shape[0]
printProblemDetails(n_cameras, n_points, x2ds)
camera_params = np.float32([matToVec(P, distortion) for P, distortion in zip(Ps, distortions)])
x0 = np.hstack((camera_params.ravel(), x3ds.ravel()))
sparsity = bundle_adjustment_sparsity(n_cameras, n_points, x2ds_splits, x2ds_labels)
res = sci_opt.least_squares(errorFunction, x0, jac_sparsity=sparsity, verbose=2, x_scale='jac', ftol=1e-10,
method='trf', args=(n_cameras, n_points, x2ds_splits, x2ds_labels, x2ds))
X = res.x
error = res.fun
camera_params = X[:n_cameras * 11].reshape((n_cameras, 11))
Ps, distortions = [], []
for vec in camera_params:
P, distortion = vecToMat(vec)
Ps.append(P)
distortions.append(distortion)
return Ps, distortions, X, error
class BundleAdjust_Wand(Op.Op):
def __init__(self, name='/BundleAdjust_Wand', locations='', frameRange=''):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
mats = interface.attr('mats')
collect_rx2ds = interface.attr('collect_rx2ds')
x2ds_frames = interface.attr('x2ds_frames')
x3ds_frames = interface.attr('x3ds_frames')
frames = sorted(x3ds_frames.keys())
frameIndex = {frame: fi for fi, frame in enumerate(frames)}
x3ds = np.array([x3ds_frames[frame] for frame in frames], dtype=np.float32).reshape(-1, 3)
x2ds = collect_rx2ds[0].copy()
x2ds_splits = collect_rx2ds[1].copy()
x2ds_labels = []
for ci in xrange(len(x2ds_frames)):
for fi, frame in enumerate(x2ds_frames[ci]):
x2ds_labels.append(5 * frameIndex[frame] + (fi % 5))
assert np.max(x2ds_labels) < x3ds.shape[0]
x2ds_labels = np.array(x2ds_labels, dtype=int)
Ps = [mat[2] for mat in mats]
distortions = [mat[3] for mat in mats]
new_Ps, new_distortions, _, _ = adjustBundles(x3ds, x2ds, x2ds_splits, x2ds_labels, Ps, distortions)
for i in xrange(len(mats)):
new_mat = list(mats[i])
print "\n----\n{}\n{}\n----\n".format(Ps[i], new_Ps[i])
new_mat[2] = new_Ps[i]
new_mat[3] = new_distortions[i]
print "\n----\n{}\n{}\n----\n".format(distortions[i], new_distortions[i])
mats[i] = tuple(new_mat)
interface.setAttr('mats', mats)
interface.setAttr('Ps', new_Ps)
interface.setAttr('updateMats', True)
# Register Ops
import Registry
Registry.registerOp('Calibrate Cameras', Cameras)
Registry.registerOp('Generate Wand Correspondences', WandCorrespondences)
|
|
from __future__ import absolute_import
from __future__ import print_function
from .._abstract.abstract import BaseAGSServer
import json
########################################################################
class GlobeServiceLayer(BaseAGSServer):
"""
Represents a single globe layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_extent = None
_displayField = None
_baseOption = None
_name = None
_baseID = None
_dataType = None
_fields = None
_cullMode = None
_defaultVisibility = None
_copyrightText = None
_extrusionExpression = None
_currentVersion = None
_subLayers = None
_minDistance = None
_type = None
_samplingMode = None
_maxDistance = None
_id = None
_layerFolderName = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print (k, " - attribute not implemented for Globe Service Layer.")
del k,v
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.items():
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def extent(self):
"""returns the globe layer extent"""
if self._extent is None:
self.__init()
return self._extent
#----------------------------------------------------------------------
@property
def displayField(self):
"""returns the layer's display field"""
if self._displayField is None:
self.__init()
return self._displayField
#----------------------------------------------------------------------
@property
def baseOption(self):
"""returns the base option"""
if self._baseOption is None:
self.__init()
return self._baseOption
#----------------------------------------------------------------------
@property
def name(self):
"""returns the layers' name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def baseID(self):
"""returns the layers' base ID"""
if self._baseID is None:
self.__init()
return self._baseID
#----------------------------------------------------------------------
@property
def dataType(self):
"""returns the data type for the layer"""
if self._dataType is None:
self.__init()
return self._dataType
#----------------------------------------------------------------------
@property
def fields(self):
"""returns the fields"""
if self._fields is None:
self.__init()
return self._fields
#----------------------------------------------------------------------
@property
def cullMode(self):
"""returns cull mode"""
if self._cullMode is None:
self.__init()
return self._cullMode
#----------------------------------------------------------------------
@property
def defaultVisibility(self):
"""returns the defaultVisibility value"""
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""returns the copyright text"""
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def extrusionExpression(self):
"""returns the extrusionExpression value"""
if self._extrusionExpression is None:
self.__init()
return self._extrusionExpression
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""returns the currentVersion value"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def subLayers(self):
"""returns the subLayers value"""
if self._subLayers is None:
self.__init()
return self._subLayers
#----------------------------------------------------------------------
@property
def minDistance(self):
"""returns the min distance value"""
if self._minDistance is None:
self.__init()
return self._minDistance
#----------------------------------------------------------------------
@property
def type(self):
"""returns the type"""
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def samplingMode(self):
"""returns the sampling mode"""
if self._samplingMode is None:
self.__init()
return self._samplingMode
#----------------------------------------------------------------------
@property
def maxDistance(self):
"""returns the maximum distance"""
if self._maxDistance is None:
self.__init()
return self._maxDistance
#----------------------------------------------------------------------
@property
def id(self):
"""returns the id value"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def layerFolderName(self):
"""returns the layer folder name"""
if self._layerFolderName is None:
self.__init()
return self._layerFolderName
########################################################################
class GlobeService(BaseAGSServer):
"""
The Globe Service resource represents a globe service published with
ArcGIS for Server. The resource provides information about the service
such as the service description and the various layers contained in the
published globe document.
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_currentVersion = None
_layers = None
_serviceDescription = None
_documentInfo = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print (k, " - attribute not implemented for Globe Service.")
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = ["currentVersion",
"documentInfo",
"layers",
"serviceDescription"]
for att in attributes:
yield [att, getattr(self, att)]
#----------------------------------------------------------------------
@property
def layers(self):
"""gets the globe service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
lyr['object'] = GlobeServiceLayer(url=self._url + "/%s" % lyr['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
lyrs.append(lyr)
return lyrs
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""returns the service current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def serviceDescription(self):
"""returns the service current version"""
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
#----------------------------------------------------------------------
@property
def documentInfo(self):
"""returns the service document information"""
if self._documentInfo is None:
self.__init()
return self._documentInfo
|
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class ScenePlugTest( GafferSceneTest.SceneTestCase ) :
def testRunTimeTyped( self ) :
p = GafferScene.ScenePlug()
self.failUnless( p.isInstanceOf( Gaffer.ValuePlug.staticTypeId() ) )
self.assertEqual( IECore.RunTimeTyped.baseTypeId( p.typeId() ), Gaffer.ValuePlug.staticTypeId() )
def testDynamicSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = GafferScene.ScenePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
def testFullTransform( self ) :
translation = imath.M44f().translate( imath.V3f( 1 ) )
scaling = imath.M44f().scale( imath.V3f( 10 ) )
n = GafferSceneTest.CompoundObjectSource()
n["in"].setValue(
IECore.CompoundObject( {
"children" : {
"group" : {
"transform" : IECore.M44fData( translation ),
"children" : {
"ball" : {
"transform" : IECore.M44fData( scaling ),
}
}
},
},
} )
)
self.assertEqual( n["out"].transform( "/" ), imath.M44f() )
self.assertEqual( n["out"].transform( "/group" ), translation )
self.assertEqual( n["out"].transform( "/group/ball" ), scaling )
self.assertEqual( n["out"].fullTransform( "/" ), imath.M44f() )
self.assertEqual( n["out"].fullTransform( "/group" ), translation )
m = n["out"].fullTransform( "/group/ball" )
self.assertEqual( m.translation(), imath.V3f( 1 ) )
extractedScaling = imath.V3f()
m.extractScaling( extractedScaling )
self.assertEqual( extractedScaling, imath.V3f( 10 ) )
self.assertEqual( m, scaling * translation )
def testFullAttributes( self ) :
n = GafferSceneTest.CompoundObjectSource()
n["in"].setValue(
IECore.CompoundObject( {
"children" : {
"group" : {
"attributes" : {
"a" : IECore.StringData( "a" ),
"b" : IECore.StringData( "b" ),
},
"children" : {
"ball" : {
"attributes" : {
"b" : IECore.StringData( "bOverride" ),
"c" : IECore.StringData( "c" ),
},
}
}
},
},
} )
)
self.assertEqual(
n["out"].fullAttributes( "/group" ),
IECore.CompoundObject( {
"a" : IECore.StringData( "a" ),
"b" : IECore.StringData( "b" ),
} )
)
self.assertEqual(
n["out"].fullAttributes( "/group/ball" ),
IECore.CompoundObject( {
"a" : IECore.StringData( "a" ),
"b" : IECore.StringData( "bOverride" ),
"c" : IECore.StringData( "c" ),
} )
)
def testCreateCounterpart( self ) :
s1 = GafferScene.ScenePlug( "a", Gaffer.Plug.Direction.Out )
s2 = s1.createCounterpart( "b", Gaffer.Plug.Direction.In )
self.assertEqual( s2.getName(), "b" )
self.assertEqual( s2.getFlags(), s1.getFlags() )
self.assertEqual( s2.direction(), Gaffer.Plug.Direction.In )
def testAccessorOverloads( self ) :
p = GafferScene.Plane()
self.assertEqual( p["out"].attributes( "/plane" ), p["out"].attributes( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].transform( "/plane" ), p["out"].transform( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].object( "/plane" ), p["out"].object( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].bound( "/plane" ), p["out"].bound( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].childNames( "/plane" ), p["out"].childNames( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].attributesHash( "/plane" ), p["out"].attributesHash( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].transformHash( "/plane" ), p["out"].transformHash( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].objectHash( "/plane" ), p["out"].objectHash( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].boundHash( "/plane" ), p["out"].boundHash( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertEqual( p["out"].childNamesHash( "/plane" ), p["out"].childNamesHash( IECore.InternedStringVectorData( [ "plane" ] ) ) )
self.assertRaises( TypeError, p["out"].boundHash, 10 )
def testBoxPromotion( self ) :
b = Gaffer.Box()
b["n"] = GafferScene.StandardAttributes()
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["n"]["in"] ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["n"]["out"] ) )
i = Gaffer.PlugAlgo.promote( b["n"]["in"] )
o = Gaffer.PlugAlgo.promote( b["n"]["out"] )
self.assertEqual( b["n"]["in"].getInput(), i )
self.assertEqual( o.getInput(), b["n"]["out"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( b["n"]["in"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( b["n"]["out"] ) )
def testNoneAsPath( self ) :
p = GafferScene.Plane()
self.assertRaises( Exception, p["out"].transform, None )
def testStringToPath( self ) :
self.assertEqual( GafferScene.ScenePlug.stringToPath( "" ), IECore.InternedStringVectorData() )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "/" ), IECore.InternedStringVectorData() )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "/a" ), IECore.InternedStringVectorData( [ "a" ] ) )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "a" ), IECore.InternedStringVectorData( [ "a" ] ) )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "/a/b" ), IECore.InternedStringVectorData( [ "a", "b" ] ) )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "/a/b/" ), IECore.InternedStringVectorData( [ "a", "b" ] ) )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "//a//b//" ), IECore.InternedStringVectorData( [ "a", "b" ] ) )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "/foo/bar/" ), IECore.InternedStringVectorData( [ "foo", "bar" ] ) )
self.assertEqual( GafferScene.ScenePlug.stringToPath( "foo/bar/" ), IECore.InternedStringVectorData( [ "foo", "bar" ] ) )
def testPathToString( self ) :
self.assertEqual( GafferScene.ScenePlug.pathToString( IECore.InternedStringVectorData() ), "/" )
self.assertEqual( GafferScene.ScenePlug.pathToString( IECore.InternedStringVectorData( [ "a" ] ) ), "/a" )
self.assertEqual( GafferScene.ScenePlug.pathToString( IECore.InternedStringVectorData( [ "a", "b" ] ) ), "/a/b" )
def testManyStringToPathCalls( self ) :
GafferSceneTest.testManyStringToPathCalls()
def testSetPlugs( self ) :
p = GafferScene.ScenePlug()
self.assertTrue( isinstance( p["setNames"], Gaffer.InternedStringVectorDataPlug ) )
self.assertEqual( p["setNames"].defaultValue(), IECore.InternedStringVectorData() )
self.assertTrue( isinstance( p["set"], Gaffer.PathMatcherDataPlug ) )
self.assertEqual( p["set"].defaultValue(), IECore.PathMatcherData() )
def testGlobalsAccessors( self ) :
p = GafferScene.ScenePlug()
self.assertEqual( p.globals(), p["globals"].getValue() )
self.assertFalse( p.globals().isSame( p["globals"].getValue() ) )
self.assertTrue( p.globals( _copy = False ).isSame( p["globals"].getValue( _copy = False ) ) )
self.assertEqual( p.setNames(), p["setNames"].getValue() )
self.assertFalse( p.setNames().isSame( p["setNames"].getValue() ) )
self.assertTrue( p.setNames( _copy = False ).isSame( p["setNames"].getValue( _copy = False ) ) )
self.assertEqual( p.globalsHash(), p["globals"].hash() )
self.assertEqual( p.setNamesHash(), p["setNames"].hash() )
if __name__ == "__main__":
unittest.main()
|
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://matthewdhoffman.com/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
import warnings
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
The default learning method is going to be changed to 'batch' in the 0.20 release.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, [n_topics, n_features]
Variational parameters for topic word distribution. Since the complete
conditional for topic word distribution is a Dirichlet,
``components_[i, j]`` can be viewed as pseudocount that represents the
number of times word `j` was assigned to topic `i`.
It can also be viewed as distribution over the words for each topic
after normalization:
``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://matthewdhoffman.com//code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method=None,
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online", None):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if learning_method == None:
warnings.warn("The default value for 'learning_method' will be "
"changed from 'online' to 'batch' in the release 0.20. "
"This warning was introduced in 0.18.",
DeprecationWarning)
learning_method = 'online'
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
return self
def _unnormalized_transform(self, X):
"""Transform data X according to fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def _perplexity_precomp_distr(self, X, doc_topic_distr=None,
sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
def perplexity(self, X, doc_topic_distr='deprecated', sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
This argument is deprecated and is currently being ignored.
.. deprecated:: 0.19
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr != 'deprecated':
warnings.warn("Argument 'doc_topic_distr' is deprecated and is "
"being ignored as of 0.19. Support for this "
"argument will be removed in 0.21.",
DeprecationWarning)
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
|
|
from epiphany.isa import reg_map
def make_zero_operand_factory(opcode):
def factory():
return opcode
return factory
gie16 = make_zero_operand_factory(0b0000000110010010)
gid16 = make_zero_operand_factory(0b0000001110010010)
nop16 = make_zero_operand_factory(0b0000000110100010)
idle16 = make_zero_operand_factory(0b0000000110110010)
bkpt16 = make_zero_operand_factory(0b0000000111000010)
mbkpt16 = make_zero_operand_factory(0b0000001111000010)
sync16 = make_zero_operand_factory(0b0000000111110010)
rti16 = make_zero_operand_factory(0b0000000111010010)
wand16 = make_zero_operand_factory(0b0000000110000010)
unimpl = make_zero_operand_factory(0b00000000000011110000000000001111)
def trap16(trap=0):
return 0b1111100010 | (trap << 10)
def make_arith32_immediate_factory(name):
def arith32_immediate(rd=0, rn=0, imm=0):
if name == 'add':
opcode = 0b0011011
elif name == 'sub':
opcode = 0b0111011
else:
raise NotImplementedError()
return (opcode | ((imm & 7) << 7) | ((rn & 7) << 10) |
((rd & 7) << 13) | ((imm & (0xFF << 3)) << 13) |
((rn & 56) << 23) | ((rd & 56) << 26))
return arith32_immediate
add32_immediate = make_arith32_immediate_factory('add')
sub32_immediate = make_arith32_immediate_factory('sub')
def make_bitwise32_factory(name):
def arith32(rd=0, rn=0, rm=0):
bits_16_20 = 0b1010
if name == 'add':
opcode = 0b0011111
elif name == 'sub':
opcode = 0b0111111
elif name == 'and':
opcode = 0b1011111
elif name == 'orr':
opcode = 0b1111111
elif name == 'eor':
opcode = 0b0001111
elif name == 'asr':
opcode = 0b1101111
elif name == 'lsr':
opcode = 0b1001111
elif name == 'lsl':
opcode = 0b0101111
else:
raise NotImplementedError()
return (opcode | ((rm & 7) << 7) | ((rn & 7) << 10) |
((rd & 7) << 13) | (bits_16_20 << 16) |
((rm & 56) << 20) | ((rn & 56) << 23) | ((rd & 56) << 26))
return arith32
add32 = make_bitwise32_factory('add')
sub32 = make_bitwise32_factory('sub')
and32 = make_bitwise32_factory('and')
orr32 = make_bitwise32_factory('orr')
eor32 = make_bitwise32_factory('eor')
asr32 = make_bitwise32_factory('asr')
lsr32 = make_bitwise32_factory('lsr')
lsl32 = make_bitwise32_factory('lsl')
def make_bitwise32_immediate_factory(name):
def bit32_immediate(rd=0, rn=0, imm=0):
if name == 'lsr':
opcode = 0b01111
bits_16_20 = 0b0110
elif name == 'lsl':
opcode = 0b11111
bits_16_20 = 0b0110
elif name == 'asr':
opcode = 0b01111
bits_16_20 = 0b1110
elif name == 'bitr':
opcode = 0b11111
bits_16_20 = 0b1110
else:
raise NotImplementedError()
return (opcode | (imm << 5) | ((rn & 7) << 10) | ((rd & 7) << 13) |
(bits_16_20 << 16) | ((rn & 56) << 23) | ((rd & 56) << 26))
return bit32_immediate
lsr32_immediate = make_bitwise32_immediate_factory('lsr')
lsl32_immediate = make_bitwise32_immediate_factory('lsl')
asr32_immediate = make_bitwise32_immediate_factory('asr')
bitr32_immediate = make_bitwise32_immediate_factory('bitr')
def make_bitwise16_immediate_factory(name):
def bit16_immediate(rd=0, rn=0, imm=0):
if name == 'lsr':
opcode = 0b00110
elif name == 'lsl':
opcode = 0b10110
elif name == 'asr':
opcode = 0b01110
elif name == 'bitr': # No immediate on pp 81 of reference manual.
opcode = 0b11110
else:
raise NotImplementedError()
return (opcode | (imm << 5) | (rn << 10) | (rd << 13))
return bit16_immediate
lsr16_immediate = make_bitwise16_immediate_factory('lsr')
lsl16_immediate = make_bitwise16_immediate_factory('lsl')
asr16_immediate = make_bitwise16_immediate_factory('asr')
bitr16_immediate = make_bitwise16_immediate_factory('bitr')
def make_bitwise16_factory(name):
def bit16(rd=0, rn=0, rm=0):
assert rd <= 0b111
assert rn <= 0b111
assert rm <= 0b111
if name == 'add':
opcode = 0b0011010
elif name == 'sub':
opcode = 0b0111010
elif name == 'and':
opcode = 0b1011010
elif name == 'orr':
opcode = 0b1111010
elif name == 'eor':
opcode = 0b0001010
elif name == 'asr':
opcode = 0b1101010
elif name == 'lsr':
opcode = 0b1001010
elif name == 'lsl':
opcode = 0b0101010
else:
raise NotImplementedError()
return (opcode | ((rm & 7) << 7) | ((rn & 7) << 10) | ((rd & 7) << 13))
return bit16
add16 = make_bitwise16_factory('add')
sub16 = make_bitwise16_factory('sub')
and16 = make_bitwise16_factory('and')
orr16 = make_bitwise16_factory('orr')
eor16 = make_bitwise16_factory('eor')
asr16 = make_bitwise16_factory('asr')
lsr16 = make_bitwise16_factory('lsr')
lsl16 = make_bitwise16_factory('lsl')
def make_arith16_immediate_factory(name):
def arith16_immediate(rd=0, rn=0, imm=0):
if name == 'add':
opcode = 0b0010011
elif name == 'sub':
opcode = 0b0110011
else:
raise NotImplementedError()
return (opcode | ((imm & 7) << 7) | ((rn & 7) << 10) | ((rd & 7) << 13))
return arith16_immediate
add16_immediate = make_arith16_immediate_factory('add')
sub16_immediate = make_arith16_immediate_factory('sub')
def make_jump32_factory(and_link):
def jump(rn=0):
opcode = 0b0101011111 if and_link else 0b0101001111
bits_16_20 = 0b0010
return (opcode | ((rn & 7) << 10) | (bits_16_20 << 16) | ((rn & 56) << 23))
return jump
jr32 = make_jump32_factory(False)
jalr32 = make_jump32_factory(True)
def make_jump16_factory(and_link):
def jump(rn=0):
opcode = 0b0101010010 if and_link else 0b0101000010
return (opcode | (rn << 10))
return jump
jr16 = make_jump16_factory(False)
jalr16 = make_jump16_factory(True)
def bcond_factory(is16bit):
def bcond(condition=0, imm=0):
opcode = 0b0000 if is16bit else 0b1000
return (opcode | (condition << 4) | (imm << 8))
return bcond
bcond32 = bcond_factory(False)
bcond16 = bcond_factory(True)
def make_farith32_factory(name):
def farith32(rd=0, rn=0, rm=0):
bits_16_20 = 0b0111
if name == 'add':
opcode = 0b0001111
elif name == 'sub':
opcode = 0b0011111
elif name == 'mul':
opcode = 0b0101111
elif name == 'madd':
opcode = 0b0111111
elif name == 'msub':
opcode = 0b1001111
elif name == 'float':
opcode = 0b1011111
elif name == 'fix':
opcode = 0b1101111
elif name == 'abs':
opcode = 0b1111111
else:
raise NotImplementedError()
return (opcode | ((rm & 7) << 7) | ((rn & 7) << 10) |
((rd & 7) << 13) | (bits_16_20 << 16) |
((rm & 56) << 20) | ((rn & 56) << 23) | ((rd & 56) << 26))
return farith32
fadd32 = make_farith32_factory('add')
fsub32 = make_farith32_factory('sub')
fmul32 = make_farith32_factory('mul')
fmadd32 = make_farith32_factory('madd')
fmsub32 = make_farith32_factory('msub')
float32 = make_farith32_factory('float')
fix32 = make_farith32_factory('fix')
fabs32 = make_farith32_factory('abs')
def make_farith16_factory(name):
def farith16(rd=0, rn=0, rm=0):
if name == 'add':
opcode = 0b0000111
elif name == 'sub':
opcode = 0b0010111
elif name == 'mul':
opcode = 0b0100111
elif name == 'madd':
opcode = 0b0110111
elif name == 'msub':
opcode = 0b1000111
elif name == 'float':
opcode = 0b1010111
elif name == 'fix':
opcode = 0b1100111
elif name == 'abs':
opcode = 0b1110111
else:
raise NotImplementedError()
return (opcode | ((rm & 7) << 7) | ((rn & 7) << 10) |
((rd & 7) << 13))
return farith16
fadd16 = make_farith16_factory('add')
fsub16 = make_farith16_factory('sub')
fmul16 = make_farith16_factory('mul')
fmadd16 = make_farith16_factory('madd')
fmsub16 = make_farith16_factory('msub')
float16 = make_farith16_factory('float')
fix16 = make_farith16_factory('fix')
fabs16 = make_farith16_factory('abs')
def movcond32(condition=0, rd=0, rn=0):
opcode = 0b1111
bits_16_20 = 0b0010
return (opcode | (condition << 4) | ((rn & 7) << 10) |
((rd & 7) << 13) | (bits_16_20 << 16) |
((rn & 56) << 23) | ((rd & 56) << 26))
def movcond16(condition, rd=0, rn=0):
opcode = 0b0010
bits_9_10 = 0b00
return (opcode | (condition << 4) | (bits_9_10 << 8) | (rn << 10) | (rd << 13))
def make_movimm32(is_t):
def mov32_immediate(rd=0, imm=0):
opcode = 0b01011
bit28 = 1 if is_t else 0
return (opcode | ((imm & 255) << 5) | ((rd & 7) << 13) |
((imm & 65280) << 12) | (bit28 << 28) | ((rd & 56) << 26))
return mov32_immediate
movimm32 = make_movimm32(False)
movtimm32 = make_movimm32(True)
def make_mov_special_factory(is16bit, is_from):
# Note that in the MOV 'special' instructions rd and rn are swapped.
# TODO: Find out what M0 and M1 are for.
def mov(rd=0, rn=0):
rn = (reg_map[rn] - 64) if not is_from else rn
rd = (reg_map[rd] - 64) if is_from else rd
if is16bit and is_from:
opcode = 0b0100010010
elif is16bit and not is_from:
opcode = 0b0100000010
elif not is16bit and is_from:
opcode = 0b0100011111
elif not is16bit and not is_from:
opcode = 0b0100001111
bits_16_20 = 0b0000 if is16bit else 0b0010
return (opcode | ((rn & 7) << 10) |
((rd & 7) << 13) | (bits_16_20 << 16) |
((rn & 56) << 23) | ((rd & 56) << 26))
return mov
movts16 = make_mov_special_factory(True, False)
movts32 = make_mov_special_factory(False, False)
movfs16 = make_mov_special_factory(True, True)
movfs32 = make_mov_special_factory(False, True)
def movimm16(rd=0, imm=0):
opcode = 0b00011
return (opcode | (imm << 5) | (rd << 13))
def ldstrdisp16(rd=0, rn=0, imm=0, bb=0, s=0):
# Data size
# 00=byte, 01=half-word, 10=word, 11=double-word
opcode = 0b0100
return (opcode | (s << 4) | (bb << 5) | ((imm & 7) << 7) |
((rn & 7) << 10) | ((rd & 7) << 13))
def make_ldstrdisp32_factory(with_postmodify):
def ldstrdisp32(rd=0, rn=0, sub=0, imm=0, bb=0, s=0):
# Data size
# 00=byte, 01=half-word, 10=word, 11=double-word
opcode = 0b1100
bit25 = 1 if with_postmodify else 0
return (opcode | (s << 4) | (bb << 5) | ((imm & 7) << 7) |
((rn & 7) << 10) | ((rd & 7) << 13) |
((imm & (0xFF << 3)) << 13) | (sub << 24) | (bit25 << 25) |
((rn & 56) << 23) | ((rd & 56) << 26))
return ldstrdisp32
ldstrpmd32 = make_ldstrdisp32_factory(True)
ldstrdisp32 = make_ldstrdisp32_factory(False)
def make_ldstrindex_factory(is16bit):
def ldstrindex(rd=0, rn=0, rm=0, sub=0, bb=0, s=0):
# Data size
# 00=byte, 01=half-word, 10=word, 11=double-word
opcode = 0b0001 if is16bit else 0b1001
bits_22_23 = 0b00
if is16bit:
sub &= 0
return (opcode | (s << 4) | (bb << 5) | ((rm & 7) << 7) |
((rn & 7) << 10) | ((rd & 7) << 13) |
(sub << 20) | (bits_22_23 << 21) |
((rm & 56) << 20) | ((rn & 56) << 23) | ((rd & 56) << 26))
return ldstrindex
ldstrind16 = make_ldstrindex_factory(True)
ldstrind32 = make_ldstrindex_factory(False)
def make_ldstrpm_factory(is16bit):
def ldstrpm(rd=0, rn=0, rm=0, sub=0, bb=0, s=0):
# Data size
# 00=byte, 01=half-word, 10=word, 11=double-word
opcode = 0b0101 if is16bit else 0b1101
bits_22_23 = 0b00
return (opcode | (s << 4) | (bb << 5) | ((rm & 7) << 7) |
((rn & 7) << 10) | ((rd & 7) << 13) |
(sub << 20) | (bits_22_23 << 21) |
((rm & 56) << 20) | ((rn & 56) << 23) | ((rd & 56) << 26))
return ldstrpm
ldstrpm16 = make_ldstrpm_factory(True)
ldstrpm32 = make_ldstrpm_factory(False)
def testset32(rd=0, rn=0, rm=0, sub=0, bb=0):
opcode = 0b01001
bits_22_23 = 0b01
return (opcode | (bb << 5) | ((rm & 7) << 7) |
((rn & 7) << 10) | ((rd & 7) << 13) |
(sub << 20) | (bits_22_23 << 21) |
((rm & 56) << 20) | ((rn & 56) << 23) | ((rd & 56) << 26))
|
|
# -*- encoding: utf-8 -*-
'''
Hubble Nova plugin for running arbitrary commands and checking the output of
those commands
This module is deprecated, and must be explicitly enabled in pillar/minion
config via the hubblestack:nova:enable_command_module (should be set to True
to enable this module). This allows nova to run arbitrary commands via yaml
profiles.
:maintainer: HubbleStack / basepi
:maturity: 2016.7.0
:platform: All
:requires: SaltStack
Sample YAML data, with inline comments:
# Top level key lets the module know it should look at this data
command:
# Unique ID for this set of audits
nodev:
data:
# 'osfinger' grain, for multiplatform support
'Red Hat Enterprise Linux Server-6':
# tag is required
tag: CIS-1.1.10
# `commands` is a list of commands with individual flags
commands:
# Command to be run
- 'grep "[[:space:]]/home[[:space:]]" /etc/fstab':
# Check the output for this pattern
# If match_output not provided, any output will be a match
match_output: nodev
# Use regex when matching the output (default False)
match_output_regex: False
# Invert the success criteria. If True, a match will cause failure (default False)
fail_if_matched: False
- 'mount | grep /home':
match_output: nodev
match_output_regex: False
# Match each line of the output against our pattern
# Any that don't match will make the audit fail (default False)
match_output_by_line: True
- ?
|
echo 'this is a multi-line'
echo 'bash script'
echo 'note the special ? syntax'
:
# Shell through which the script will be run, must be abs path
shell: /bin/bash
match_output: this
# Aggregation strategy for multiple commands. Defaults to 'and', other option is 'or'
aggregation: 'and'
# Catch-all, if no other osfinger match was found
'*':
tag: generic_tag
commands:
- 'grep "[[:space:]]/home[[:space:]]" /etc/fstab':
match_output: nodev
match_output_regex: False
fail_if_matched: False
- 'mount | grep /home':
match_output: nodev
match_output_regex: False
match_output_by_line: True
aggregation: 'and'
# Description will be output with the results
description: '/home should be nodev'
'''
from __future__ import absolute_import
import logging
import fnmatch
import re
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
return True
def audit(data_list, tags, labels, **kwargs):
'''
Run the command audits contained in the data_list
'''
# Consume any module_params from kwargs (Setting False as a fallback)
debug = kwargs.get('nova_debug', False)
cmd_raw = kwargs.get('cmd_raw', False)
__data__ = {}
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__tags__ = _get_tags(__data__)
if debug:
log.debug('command audit __data__:')
log.debug(__data__)
log.debug('command audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
if __tags__ and not __salt__['config.get']('hubblestack:nova:enable_command_module',
False):
ret['Errors'] = ['command module has not been explicitly enabled in '
'config. Please set hubblestack:nova:enable_command_module '
'to True in pillar or minion config to allow this module.']
return ret
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
if 'commands' not in tag_data:
continue
command_results = []
for command_data in tag_data['commands']:
for command, command_args in command_data.iteritems():
if 'shell' in command_args:
cmd_ret = __salt__['cmd.run'](command,
python_shell=True,
shell=command_args['shell'])
else:
cmd_ret = __salt__['cmd.run'](command,
python_shell=True)
found = False
if cmd_ret:
if cmd_raw:
tag_data['raw'] = cmd_ret
found = True
if 'match_output' in command_args:
if command_args.get('match_output_by_line'):
cmd_ret_lines = cmd_ret.splitlines()
else:
cmd_ret_lines = [cmd_ret]
for line in cmd_ret_lines:
if command_args.get('match_output_regex'):
if not re.match(command_args['match_output'], line):
found = False
else: # match without regex
if command_args['match_output'] not in line:
found = False
if command_args.get('fail_if_matched'):
found = not found
command_results.append(found)
aggregation = tag_data.get('aggregation', 'and')
if aggregation.lower() == 'or':
if any(command_results):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
else: # assume 'and' if it's not 'or'
if all(command_results):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the command level
'''
if 'command' not in ret:
ret['command'] = []
if 'command' in data:
for key, val in data['command'].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['command'].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for audit_dict in data.get('command', []):
# command:0
for audit_id, audit_data in audit_dict.iteritems():
# command:0:nodev
tags_dict = audit_data.get('data', {})
# command:0:nodev:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', {})
# command:0:nodev:data:Debian-8
if 'tag' not in tags:
tags['tag'] = ''
tag = tags['tag']
if tag not in ret:
ret[tag] = []
formatted_data = {'tag': tag,
'module': 'command'}
formatted_data.update(audit_data)
formatted_data.update(tags)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
|
|
# vim:set ff=unix expandtab ts=4 sw=4:
from typing import Callable, Tuple, Sequence, Set, Dict
from functools import lru_cache, _CacheInfo, _lru_cache_wrapper
import numpy as np
import matplotlib.pyplot as plt
import inspect
from collections import namedtuple
from numbers import Number
from scipy.integrate import odeint, quad
from scipy.interpolate import lagrange
from scipy.optimize import brentq
from scipy.stats import norm
from string import Template
from sympy import (
gcd,
diag,
lambdify,
DiracDelta,
solve,
Matrix,
Symbol,
Expr,
diff,
simplify,
eye,
ImmutableMatrix
)
from sympy.polys.polyerrors import PolynomialError
from sympy.core.function import UndefinedFunction, Function, sympify
from sympy import Symbol
from collections.abc import Iterable
import networkx as nx
import igraph as ig
from frozendict import frozendict
from .BlockOde import BlockOde
from .myOdeResult import solve_ivp_pwc
ALPHA_14C = 1.18e-12
DECAY_RATE_14C_YEARLY = np.log(2) / 5730
DECAY_RATE_14C_DAILY = DECAY_RATE_14C_YEARLY / 365.25
def combine(m1, m2, m1_to_m2, m2_to_m1, intersect=False):
m1_sv_set, m1_in_fluxes, m1_out_fluxes, m1_internal_fluxes = m1
m2_sv_set, m2_in_fluxes, m2_out_fluxes, m2_internal_fluxes = m2
intersect_sv_set = m1_sv_set & m2_sv_set
if intersect_sv_set and not intersect:
raise(ValueError("How to handle pools %s?" % str(intersect_sv_set)))
sv_set = m1_sv_set | m2_sv_set
# create external in_fluxes
in_fluxes = dict()
# add all external in_fluxes of m1
for k, v in m1_in_fluxes.items():
if k in in_fluxes.keys():
in_fluxes[k] += v
else:
in_fluxes[k] = v
# remove flux from in_flux if it becomes internal
for pool_to in in_fluxes.keys():
for (pool_from, a), flux in m2_to_m1.items():
if a == pool_to:
in_fluxes[pool_to] -= flux
# add all external in_fluxes of m2
for k, v in m2_in_fluxes.items():
if k in in_fluxes.keys():
in_fluxes[k] += v
else:
in_fluxes[k] = v
# remove flux from in_flux if it becomes internal
for pool_to in in_fluxes.keys():
for (pool_from, a), flux in m1_to_m2.items():
if a == pool_to:
in_fluxes[pool_to] -= flux
# create external out_fluxes
out_fluxes = dict()
# add all external out_fluxes from m1
for k, v in m1_out_fluxes.items():
if k in out_fluxes.keys():
out_fluxes[k] += v
else:
out_fluxes[k] = v
# remove flux from out_flux if it becomes internal
for pool_from in out_fluxes.keys():
for (a, pool_to), flux in m1_to_m2.items():
if a == pool_from:
out_fluxes[pool_from] -= flux
# add all external out_fluxes from m2
for k, v in m2_out_fluxes.items():
if k in out_fluxes.keys():
out_fluxes[k] += v
else:
out_fluxes[k] = v
# remove flux from out_flux if it becomes internal
for pool_from in out_fluxes.keys():
for (a, pool_to), flux in m2_to_m1.items():
if a == pool_from:
out_fluxes[pool_from] -= flux
# create internal fluxes
internal_fluxes = dict()
dicts = [m1_internal_fluxes, m2_internal_fluxes, m1_to_m2, m2_to_m1]
for d in dicts:
for k, v in d.items():
if k in internal_fluxes.keys():
internal_fluxes[k] += v
else:
internal_fluxes[k] = v
# overwrite in_fluxes and out_fluxes for intersection pools
for sv in intersect_sv_set:
in_fluxes[sv] = intersect[0][sv]
out_fluxes[sv] = intersect[1][sv]
clean_in_fluxes = {k: v for k, v in in_fluxes.items() if v != 0}
clean_out_fluxes = {k: v for k, v in out_fluxes.items() if v != 0}
clean_internal_fluxes = {k: v for k, v in internal_fluxes.items() if v != 0}
return sv_set, clean_in_fluxes, clean_out_fluxes, clean_internal_fluxes
def extract(m, sv_set, ignore_other_pools=False, supersede=False):
m_sv_set, m_in_fluxes, m_out_fluxes, m_internal_fluxes = m
assert(sv_set.issubset(m_sv_set))
in_fluxes = {pool: flux for pool, flux in m_in_fluxes.items() if pool in sv_set}
out_fluxes = {pool: flux for pool, flux in m_out_fluxes.items() if pool in sv_set}
internal_fluxes = {
(pool_from, pool_to): flux
for (pool_from, pool_to), flux in m_internal_fluxes.items()
if (pool_from in sv_set) and (pool_to in sv_set)
}
for (pool_from, pool_to), flux in m_internal_fluxes.items():
# internal flux becomes influx if not ignored
if not ignore_other_pools:
if (pool_from not in sv_set) and (pool_to in sv_set):
if pool_to in in_fluxes.keys():
in_fluxes[pool_to] += flux
else:
in_fluxes[pool_to] = flux
# internal flux becomes outflux if not ignored
if not ignore_other_pools:
if (pool_from in sv_set) and (pool_to not in sv_set):
if pool_from in out_fluxes.keys():
out_fluxes[pool_from] += flux
else:
out_fluxes[pool_from] = flux
# overwrite in_fluxes and out_fluxes if desired
if supersede:
for sv, flux in supersede[0].items():
in_fluxes[sv] = flux
for sv, flux in supersede[1].items():
out_fluxes[sv] = flux
for (pool_from, pool_to), flux in supersede[2].items():
internal_fluxes[pool_from, pool_to] = flux
clean_in_fluxes = {k: v for k, v in in_fluxes.items() if v != 0}
clean_out_fluxes = {k: v for k, v in out_fluxes.items() if v != 0}
clean_internal_fluxes = {k: v for k, v in internal_fluxes.items() if v != 0}
return sv_set, clean_in_fluxes, clean_out_fluxes, clean_internal_fluxes
def nxgraphs(
state_vector: Tuple[Symbol],
in_fluxes: Dict[Symbol, Expr],
internal_fluxes: Dict[Tuple[Symbol, Symbol], Expr],
out_fluxes: Dict[Symbol, Expr],
) -> nx.DiGraph:
G = nx.DiGraph()
node_names = [str(sv) for sv in state_vector]
G.add_nodes_from(node_names)
in_flux_targets, out_flux_sources = [
[str(k) for k in d.keys()]
for d in (in_fluxes, out_fluxes)
]
virtual_in_flux_sources = ["virtual_in_" + str(t) for t in in_flux_targets]
for n in virtual_in_flux_sources:
G.add_node(n, virtual=True)
for i in range(len(in_flux_targets)):
G.add_edge(
virtual_in_flux_sources[i],
in_flux_targets[i],
expr=in_fluxes[Symbol(in_flux_targets[i])]
)
virtual_out_flux_targets = [
"virtual_out_" + str(t)
for t in out_flux_sources
]
for n in virtual_out_flux_targets:
G.add_node(n, virtual=True)
for i in range(len(out_flux_sources)):
G.add_edge(
out_flux_sources[i],
virtual_out_flux_targets[i],
expr=out_fluxes[Symbol(out_flux_sources[i])]
)
#for c in internal_connections:
for c in internal_fluxes.keys():
G.add_edge(str(c[0]), str(c[1]),expr=internal_fluxes[c])
return G
def igraph_func_plot(
Gnx: nx.DiGraph, # note that Gnx has to have a 'virtual' attribute on some verteces
node_color_func: Callable[[nx.DiGraph,str],str],
edge_color_func: Callable[[nx.DiGraph,str,str],str],
) -> ig.drawing.Plot:
G = ig.Graph.from_networkx(Gnx)
vertex_size = [1 if v['virtual'] else 50 for v in G.vs]
vertex_color= [node_color_func(Gnx,v) for v in Gnx.nodes]
vertex_label = [v['_nx_name'] if not v['virtual'] else '' for v in G.vs]
edge_color = [edge_color_func(Gnx,s,t) for s, t in Gnx.edges]
edge_label= [Gnx.get_edge_data(s,t)['expr'] for s, t in Gnx.edges]
layout = G.layout('sugiyama')
pl = ig.plot(
G,
layout=layout,
vertex_size=vertex_size,
vertex_label=vertex_label,
vertex_color=vertex_color,
vertex_label_size=9,
edge_color=edge_color,
edge_label=edge_label,
edge_label_size=4,
)
return pl
def igraph_plot(
state_vector: Matrix,
in_fluxes: frozendict,
internal_fluxes: frozendict,
out_fluxes: frozendict
) -> ig.drawing.Plot:
Gnx = nxgraphs(state_vector, in_fluxes, internal_fluxes, out_fluxes)
def n_color(
G: nx.DiGraph,
node_name: str
) -> str:
return 'grey'
def e_color(
G: nx.DiGraph,
s: str,
t: str
) -> str:
return "blue" if G.in_degree(s) ==0 else (
'red' if G.out_degree(t) == 0 else 'black'
)
return igraph_func_plot(
Gnx,
node_color_func=n_color,
edge_color_func=e_color
)
def igraph_part_plot(
state_vector: Tuple[Symbol],
in_fluxes: Dict[Symbol, Expr],
internal_fluxes: Dict[Tuple[Symbol, Symbol], Expr],
out_fluxes: Dict[Symbol, Expr],
part_dict: Dict[Set[str], str]
) -> ig.drawing.Plot:
Gnx = nxgraphs(state_vector, in_fluxes, internal_fluxes, out_fluxes)
def n_color(G,node_name):
cs=set({})
for var_set, color in part_dict.items():
var_set_str = frozenset({str(v) for v in var_set})
# we could have multicolored nodes if the variable set overlap
# but igraph does not support it
cs = cs.union(set({color})) if node_name in var_set_str else cs
return 'grey' if len(cs) == 0 else list(cs)[0]
def e_color(
G: nx.DiGraph,
s: str,
t: str
) -> str:
return "blue" if G.in_degree(s) ==0 else (
'red' if G.out_degree(t) == 0 else 'black'
)
return igraph_func_plot(
Gnx,
node_color_func=n_color,
edge_color_func=e_color
)
def to_int_keys_1(flux_by_sym, state_vector):
return {list(state_vector).index(k):v for k,v in flux_by_sym.items()}
def to_int_keys_2(fluxes_by_sym_tup, state_vector):
return{
(list(state_vector).index(k[0]),list(state_vector).index(k[1])):v
for k,v in fluxes_by_sym_tup.items()
}
def in_or_out_flux_tuple(
state_vector,
in_or_out_fluxes_by_symbol
):
keys = in_or_out_fluxes_by_symbol.keys()
def f(ind):
v = state_vector[ind]
return in_or_out_fluxes_by_symbol[v] if v in keys else 0
return Matrix([f(ind) for ind in range(len(state_vector))])
def release_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
):
decomp_rates = []
for pool in range(len(state_vector)):
if pool in out_fluxes_by_index.keys():
decomp_flux = out_fluxes_by_index[pool]
else:
decomp_flux = 0
decomp_flux += sum([flux for (i,j), flux in internal_fluxes_by_index.items()
if i == pool])
decomp_rates.append(simplify(decomp_flux/state_vector[pool]))
R = diag(*decomp_rates)
return R
def release_operator_2(
out_fluxes_by_symbol,
internal_fluxes_by_symbol,
state_vector
):
return release_operator_1(
to_int_keys_1(out_fluxes_by_symbol, state_vector),
to_int_keys_2(internal_fluxes_by_symbol,state_vector),
state_vector
)
def tranfer_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
):
R = release_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
)
# calculate transition operator
return transfer_operator_3(
internal_fluxes_by_index,
R,
state_vector
)
def transfer_operator_2(
out_fluxes_by_symbol,
internal_fluxes_by_symbol,
state_vector
):
return tranfer_operator_1(
to_int_keys_1( out_fluxes_by_symbol, state_vector),
to_int_keys_2( internal_fluxes_by_symbol, state_vector),
state_vector
)
def transfer_operator_3(
# this is just a shortcut if we know R already
internal_fluxes_by_index,
release_operator,
state_vector
):
# calculate transition operator
T = -eye(len(state_vector))
for (i, j), flux in internal_fluxes_by_index.items():
T[j, i] = flux/state_vector[i]/release_operator[i, i]
return T
def compartmental_matrix_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
):
C = -1*release_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
)
for (i, j), flux in internal_fluxes_by_index.items():
C[j, i] = flux/state_vector[i]
return C
def compartmental_matrix_2(
out_fluxes_by_symbol,
internal_fluxes_by_symbol,
state_vector
):
return compartmental_matrix_1(
to_int_keys_1( out_fluxes_by_symbol, state_vector),
to_int_keys_2( internal_fluxes_by_symbol, state_vector),
state_vector
)
def in_fluxes_by_index(state_vector, u):
return {
pool_nr: u[pool_nr]
for pool_nr in range(state_vector.rows)
}
def in_fluxes_by_symbol(state_vector,u):
return {
state_vector[pool_nr]: u[pool_nr]
for pool_nr in range(state_vector.rows)
if u[pool_nr] != 0
}
def out_fluxes_by_index(state_vector,B):
output_fluxes = dict()
# calculate outputs
for pool in range(state_vector.rows):
outp = -sum(B[:, pool]) * state_vector[pool]
s_outp = simplify(outp)
if s_outp:
output_fluxes[pool] = s_outp
return output_fluxes
def out_fluxes_by_symbol(state_vector,B):
fbi = out_fluxes_by_index(state_vector,B)
return {
state_vector[pool_nr]: flux
for pool_nr, flux in fbi.items()
}
def internal_fluxes_by_index(state_vector,B):
# calculate internal fluxes
internal_fluxes = dict()
pipes = [(i,j) for i in range(state_vector.rows)
for j in range(state_vector.rows) if i != j]
for pool_from, pool_to in pipes:
flux = B[pool_to, pool_from] * state_vector[pool_from]
s_flux = simplify(flux)
if s_flux:
internal_fluxes[(pool_from, pool_to)] = s_flux
return internal_fluxes
def internal_fluxes_by_symbol(state_vector,B):
fbi = internal_fluxes_by_index(state_vector,B)
return {
(state_vector[tup[0]],state_vector[tup[1]]): flux
for tup,flux in fbi.items()
}
#def fluxes_by_symbol(state_vector, fluxes_by_index):
# internal_fluxes, out_fluxes = fluxes_by_index
def warning(txt):
print('############################################')
calling_frame = inspect.getouterframes(inspect.currentframe(), 2)
func_name = calling_frame[1][3]
print("Warning in function {0}:".format(func_name))
print(txt)
def deprecation_warning(txt):
print('############################################')
calling_frame = inspect.getouterframes(inspect.currentframe(), 2)
func_name = calling_frame[1][3]
print("The function {0} is deprecated".format(func_name))
print(txt)
def flux_dict_string(d, indent=0):
s = ""
for k, val in d.items():
s += ' '*indent+str(k)+": "+str(val)+"\n"
return s
def func_subs(t, Func_expr, func, t0):
"""
returns the function part_func
where part_func(_,_,...) =func(_,t=t0,_..) (func partially applied to t0)
The position of argument t in the argument list is found
by examining the Func_expression argument.
Args:
t (sympy.symbol): the symbol to be replaced by t0
t0 (value) : the value to which the function will be applied
func (function) : a python function
Func_exprs (sympy.Function) : An expression for an undefined Function
"""
assert(isinstance(type(Func_expr), UndefinedFunction))
pos = Func_expr.args.index(t)
def frozen(*args):
# tuples are immutable
L = list(args)
L.insert(pos, t0)
new_args = tuple(L)
return func(*new_args)
return frozen
def jacobian(vec, state_vec):
dim1 = vec.rows
dim2 = state_vec.rows
return Matrix(dim1, dim2, lambda i, j: diff(vec[i], state_vec[j]))
# fixme: test
def has_pw(expr):
if expr.is_Matrix:
for c in list(expr):
if has_pw(c):
return True
return False
if expr.is_Piecewise:
return True
for a in expr.args:
if has_pw(a):
return True
return False
def is_DiracDelta(expr):
"""Check if expr is a Dirac delta function."""
if len(expr.args) != 1:
return False
arg = expr.args[0]
return DiracDelta(arg) == expr
def parse_input_function(u_i, time_symbol):
"""Return an ordered list of jumps in the input function u.
Args:
u (SymPy expression): input function in :math:`\\dot{x} = B\\,x + u`
Returns:
ascending list of jumps in u
"""
impulse_times = []
pieces = []
def rek(expr, imp_t, p):
if hasattr(expr, 'args'):
for arg in expr.args:
if is_DiracDelta(arg):
dirac_arg = arg.args[0]
zeros = solve(dirac_arg)
imp_t += zeros
if arg.is_Piecewise:
for pw_arg in arg.args:
cond = pw_arg[1]
# 'if not cond' led to strange behavior
if cond != True: # noqa: E712
atoms = cond.args
zeros = solve(atoms[0] - atoms[1])
p += zeros
rek(arg, imp_t, p)
rek(u_i, impulse_times, pieces)
impulses = []
impulse_times = sorted(impulse_times)
for impulse_time in impulse_times:
intensity = u_i.coeff(DiracDelta(impulse_time-time_symbol))
impulses.append({'time': impulse_time, 'intensity': intensity})
jump_times = sorted(pieces + impulse_times)
return (impulses, jump_times)
def factor_out_from_matrix(M):
if has_pw(M):
return(1)
try:
return gcd(list(M))
except(PolynomialError):
# print('no factoring out possible')
# fixme: does not work if a function of X, t is in the expressios,
# we could make it work...if we really wanted to
return 1
def numerical_function_from_expression(expr, tup, parameter_dict, func_set):
# the function returns a function that given numeric arguments
# returns a numeric result.
# This is a more specific requirement than a function returned by lambdify
# which can still return symbolic
# results if the tuple argument to lambdify does not contain all free
# symbols of the lambdified expression.
# To avoid this case here we check this.
expr_par = expr.subs(parameter_dict)
ss_expr = expr_par.free_symbols
cut_func_set = make_cut_func_set(func_set)
ss_allowed = set(
[s for s in tup]
)
if not(ss_expr.issubset(ss_allowed)):
raise Exception(
"""The following free symbols: {1} of the expression: {0}
are not arguments.
""".format(ss_expr, ss_expr.difference(ss_allowed))
)
expr_func = lambdify(tup, expr_par, modules=[cut_func_set, 'numpy'])
def expr_func_safe_0_over_0(*val):
with np.errstate(invalid='raise'):
try:
res = expr_func(*val)
except FloatingPointError as e:
if e.args[0] == 'invalid value encountered in double_scalars':
with np.errstate(invalid='ignore'):
res = expr_func(*val)
res = np.nan_to_num(res, copy=False)
return res
return expr_func_safe_0_over_0
def numerical_rhs(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_dict
):
FL = numerical_function_from_expression(
rhs,
(time_symbol,)+tuple(state_vector),
parameter_dict,
func_dict
)
# 2.) Write a wrapper that transformes Matrices to numpy.ndarrays and
# accepts array instead of the separate arguments for the states)
def num_rhs(t, X):
Y = np.array([x for x in X]) #
Fval = FL(t, *Y)
return Fval.reshape(X.shape,)
return num_rhs
def numerical_array_func(
state_vector,
time_symbol, # could also be the iteration symbol
expr,
parameter_dict,
func_dict
):
FL = numerical_function_from_expression(
expr,
(time_symbol,)+tuple(state_vector),
parameter_dict,
func_dict
)
# 2.) Write a wrapper that transformes Matrices to numpy.ndarrays and
# accepts array instead of the separate arguments for the states)
def num_arr_fun(t, X):
Y = np.array([x for x in X]) #
Fval = FL(t, *Y)
return Fval.reshape(expr.shape,)
return num_arr_fun
def numerical_rhs_old(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_set,
times
):
FL = numerical_function_from_expression(
rhs,
tuple(state_vector) + (time_symbol,),
parameter_dict,
func_set
)
# 2.) Write a wrapper that transformes Matrices numpy.ndarrays and accepts
# array instead of the separate arguments for the states)
def num_rhs(X, t):
Fval = FL(*X, t)
return Fval.reshape(X.shape,)
def bounded_num_rhs(X, t):
# fixme 1:
# maybe odeint (or another integrator)
# can be told >>not<< to look outside
# the interval
# fixme 2:
# actually the times vector is not the smallest
# possible allowed set but the intersection of
# all the intervals where the
# time dependent functions are defined
# this should be tested in init
t_max = times[-1]
# fixme: we should die hard here, because now we think we can compute
# the state transition operator till any time in the future,
# but it is actually biased by the fact, that we use the last value
# over and over again
# and hence assume some "constant" future
if t > t_max:
res = num_rhs(X, t_max)
else:
res = num_rhs(X, t)
# print('brhs', 't', t, 'X', X, 'res', res)
# print('t', t)
return res
return bounded_num_rhs
def numsol_symbolic_system_old(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_set,
start_values,
times
):
nr_pools = len(state_vector)
if times[0] == times[-1]:
return start_values.reshape((1, nr_pools))
num_rhs = numerical_rhs_old(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_set,
times
)
return odeint(num_rhs, start_values, times, mxstep=10000)
def numsol_symbolical_system(
state_vector,
time_symbol,
rhs,
parameter_dicts,
func_dicts,
start_values,
times,
disc_times=()
):
assert(isinstance(parameter_dicts, Iterable))
assert(isinstance(func_dicts, Iterable))
nr_pools = len(state_vector)
t_min = times[0]
t_max = times[-1]
if times[0] == times[-1]:
return start_values.reshape((1, nr_pools))
num_rhss = tuple(
numerical_rhs(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_dict
)
for parameter_dict, func_dict in zip(parameter_dicts, func_dicts)
)
res = solve_ivp_pwc(
rhss=num_rhss,
t_span=(t_min, t_max),
y0=start_values,
t_eval=tuple(times),
disc_times=disc_times
)
# adapt to the old ode_int interface
# since our code at the moment expects it
values = np.rollaxis(res.y, -1, 0)
return (values, res.sol)
def arrange_subplots(n):
if n <= 3:
rows = 1
cols = n
if n == 4:
rows = 2
cols = 2
if n >= 5:
rows = n // 3
if n % 3 != 0:
rows += 1
cols = 3
return (rows, cols)
def melt(ndarr, identifiers=None):
shape = ndarr.shape
if identifiers is None:
identifiers = [range(shape[dim]) for dim in range(len(shape))]
def rek(struct, ids, melted_list, dim):
if type(struct) != np.ndarray:
melted_list.append(ids + [struct])
else:
shape = struct.shape
for k in range(shape[0]):
rek(struct[k], ids + [identifiers[dim][k]], melted_list, dim+1)
melted_list = []
rek(ndarr, [], melted_list, 0)
rows = len(melted_list)
cols = len(melted_list[0])
melted = np.array(melted_list).reshape((rows, cols))
return melted
# fixme: test
# compute inverse of CDF at u for quantiles or generation of random variables
#def generalized_inverse_CDF(CDF, u, start_dist=1e-4, tol=1e-8):
def generalized_inverse_CDF(CDF, u, x1=0.0, tol=1e-8):
y1 = -1
def f(a):
# print("HR 398", x1, y1, u)
return u-CDF(a)
x0 = 0.0
y1 = f(x1)
if (y1 <= 0):
if x1 == 0.0:
# print("schon fertig", "\n"*200)
return x1
else:
x1 = 0.0
y1 = f(x1)
if y1 <= 0:
return x1
# go so far to the right such that CDF(x1) > u, the bisect in
# interval [0, x1]
while y1 >= 0:
x0 = x1
x1 = x1*2 + 0.1
y1 = f(x1)
if np.isnan(y1):
res = np.nan
else:
res, root_results = brentq(f, x0, x1, xtol=tol, full_output=True)
if not root_results.converged:
print("quantile convegence failed")
# if f(res) > tol: res = np.nan
# print('gi_res', res)
# print('finished', method_f.__name__, 'on [0,', x1, ']')
return res
# draw a random variable with given CDF
def draw_rv(CDF):
return generalized_inverse_CDF(CDF, np.random.uniform())
# return function g, such that g(normally distributed sv) is distributed
# according to CDF
def stochastic_collocation_transform(M, CDF):
# collocation points for normal distribution,
# taken from Table 10 in Appendix 3 of Grzelak2015SSRN
cc_data = {
2: [1],
3: [0.0, 1.7321],
4: [0.7420, 2.3344],
5: [0.0, 1.3556, 2.8570],
6: [0.6167, 1.8892, 3.3243],
7: [0.0, 1.1544, 2.3668, 3.7504],
8: [0.5391, 1.6365, 2.8025, 4.1445],
9: [0.0, 1.0233, 2.0768, 3.2054, 4.5127],
10: [0.4849, 1.4660, 2.8463, 3.5818, 4.8595], # noqa: E131
11: [0.0, 0.9289, 1.8760, 2.8651, 3.9362, 5.1880] # noqa: E131
}
if M not in cc_data.keys():
return None
cc_points = [-x for x in reversed(cc_data[M]) if x != 0.0] + cc_data[M]
cc_points = np.array(cc_points)
# print('start computing collocation transform')
ys = np.array(
[generalized_inverse_CDF(CDF, norm.cdf(x)) for x in cc_points]
)
# print('ys', ys)
# print('finished computing collocation transform')
return lagrange(cc_points, ys)
# Metropolis-Hastings sampling for PDFs with nonnegative support
# no thinning, no burn-in period
def MH_sampling(N, PDF, start=1.0):
xvec = np.ndarray((N,))
x = start
PDF_x = PDF(x)
norm_cdf_x = norm.cdf(x)
for i in range(N):
xs = -1.0
while xs <= 0:
xs = x + np.random.normal()
PDF_xs = PDF(xs)
A1 = PDF_xs/PDF_x
norm_cdf_xs = norm.cdf(xs)
A2 = norm_cdf_x/norm_cdf_xs
A = A1 * A2
if np.random.uniform() < A:
x = xs
PDF_x = PDF_xs
norm_cdf_x = norm_cdf_xs
xvec[i] = x
return xvec
def save_csv(filename, melted, header):
np.savetxt(
filename,
melted,
header=header,
delimiter=',',
fmt="%10.8f",
comments=''
)
def load_csv(filename):
return np.loadtxt(filename, skiprows=1, delimiter=',')
def tup2str(tup):
# uses for stoichiometric models
string = Template("${f}_${s}").substitute(f=tup[0], s=tup[1])
return(string)
# use only every (k_1,k_2,...,k_n)th element of the n-dimensional numpy array
# data,
# strides is a list of k_j of length n
# always inlcude first and last elements
def stride(data, strides):
if isinstance(strides, int):
strides = [strides]
index_list = []
for dim in range(data.ndim):
n = data.shape[dim]
stride = strides[dim]
ind = np.arange(0, n, stride).tolist()
if (n-1) % stride != 0:
ind.append(n-1)
index_list.append(ind)
return data[np.ix_(*index_list)]
def is_compartmental(M):
gen = range(M.shape[0])
return all(
[
M.is_square,
all([M[j, j] <= 0 for j in gen]),
all([sum(M[:, j]) <= 0 for j in gen])
]
)
def make_cut_func_set(func_set):
def unify_index(expr):
# for the case Function('f'):f_numeric
if isinstance(expr, UndefinedFunction):
res = str(expr)
# for the case {f(x, y): f_numeric} f(x, y)
elif isinstance(expr, Symbol):
res = str(expr)
elif isinstance(expr, Function):
res = str(type(expr))
elif isinstance(expr, str):
expr = sympify(expr)
res = unify_index(expr)
else:
print(type(expr))
raise(TypeError(
"""
funcset indices should be indexed by instances of
sympy.core.functions.UndefinedFunction
"""
))
return res
cut_func_set = {unify_index(key): val for key, val in func_set.items()}
return cut_func_set
def f_of_t_maker(sol_funcs, ol):
def ot(t):
sv = [sol_funcs[i](t) for i in range(len(sol_funcs))]
tup = tuple(sv) + (t,)
res = ol(*tup)
return res
return ot
def const_of_t_maker(const):
def const_arr_fun(possible_vec_arg):
if isinstance(possible_vec_arg, Number):
return const # also a number
else:
return const*np.ones_like(possible_vec_arg)
return const_arr_fun
def x_phi_ode(
srm,
parameter_dicts,
func_dicts,
x_block_name='x',
phi_block_name='phi',
disc_times=()
):
nr_pools = srm.nr_pools
sol_rhss = []
for pd, fd in zip(parameter_dicts, func_dicts):
sol_rhs = numerical_rhs(
srm.state_vector,
srm.time_symbol,
srm.F,
pd,
fd
)
sol_rhss.append(sol_rhs)
B_sym = srm.compartmental_matrix
tup = (srm.time_symbol,) + tuple(srm.state_vector)
B_funcs_non_lin = []
for pd, fd in zip(parameter_dicts, func_dicts):
B_func_non_lin = numerical_function_from_expression(
B_sym,
tup,
pd,
fd
)
B_funcs_non_lin.append(B_func_non_lin)
def Phi_rhs_maker(B_func_non_lin):
def Phi_rhs(t, x, Phi_2d):
return np.matmul(B_func_non_lin(t, *x), Phi_2d)
return Phi_rhs
Phi_rhss = []
for B_func_non_lin in B_funcs_non_lin:
Phi_rhss.append(Phi_rhs_maker(B_func_non_lin))
functionss = []
for sol_rhs, Phi_rhs in zip(sol_rhss, Phi_rhss):
functions = [
(sol_rhs, [srm.time_symbol.name, x_block_name]),
(Phi_rhs, [srm.time_symbol.name, x_block_name, phi_block_name])
]
functionss.append(functions)
return BlockOde(
time_str=srm.time_symbol.name,
block_names_and_shapes=[
(x_block_name, (nr_pools,)),
(phi_block_name, (nr_pools, nr_pools,))
],
functionss=functionss,
disc_times=disc_times
)
def integrate_array_func_for_nested_boundaries(
f: Callable[[float], np.ndarray],
integrator: Callable[
[
Callable[[float], np.ndarray],
float,
float
],
np.ndarray
], # e.g. array_quad_result
tuples: Sequence[Tuple[float, float]]
) -> Sequence[float]:
# we assume that the first (a,b) tuple contains the second,
# the second the third and so on from outside to inside
def compute(f, tuples, results: Sequence[float]):
(a_out, b_out), *tail = tuples
if len(tail) == 0:
# number=quad(f, a_out, b_out)[0]
arr = integrator(f, a_out, b_out)
else:
(a_in, b_in) = tail[0]
results = compute(f, tail, results)
arr = (
integrator(f, a_out, a_in)
+ results[0]
+ integrator(f, b_in, b_out)
)
results = [arr] + results
return results
return compute(f, tuples, [])
def array_quad_result(
f: Callable[[float], np.ndarray],
a: float,
b: float,
epsrel=1e-3, # epsabs would be a dangerous default
*args,
**kwargs
) -> np.ndarray:
# integrates a vectorvalued function of a single argument
# we transform the result array of the function into a one dimensional
# vector compute the result for every component
# and reshape the result to the form of the integrand
test = f(a)
n = len(test.flatten())
vec = np.array(
[quad(
lambda t:f(t).reshape(n,)[i],
a,
b,
*args,
epsrel=epsrel,
**kwargs
)[0] for i in range(n)]
)
return vec.reshape(test.shape)
def array_integration_by_ode(
f: Callable[[float], np.ndarray],
a: float,
b: float,
*args,
**kwargs
) -> np.ndarray:
# another integrator like array_quad_result
test = f(a)
n = len(test.flatten())
def rhs(tau, X):
# although we do not need X we have to provide a
# righthandside s uitable for solve_ivp
# avoid overshooting if the solver
# tries to look where the integrand might not be defined
if tau < a or tau > b:
return 0
else:
return f(tau).flatten()
ys = solve_ivp_pwc(
rhss=(rhs,),
y0=np.zeros(n),
t_span=(a, b)
).y
val = ys[:, -1].reshape(test.shape)
return val
def array_integration_by_values(
f: Callable[[float], np.ndarray],
taus: np.ndarray,
*args,
**kwargs,
) -> np.ndarray:
# only allow taus as vector
assert(len(taus.shape) == 1)
assert(len(taus) > 0)
test = f(taus[0])
# create a big 2 dimensional array suitable for trapz
integrand_vals = np.stack([f(tau).flatten() for tau in taus], 1)
vec = np.trapz(y=integrand_vals, x=taus)
return vec.reshape(test.shape)
def x_phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name):
x_s = np.array(x_s)
nr_pools = len(x_s)
start_Phi_2d = np.identity(nr_pools)
start_blocks = [
(x_block_name, x_s),
(phi_block_name, start_Phi_2d)
]
blivp = block_ode.blockIvp(start_blocks)
return blivp
def phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name):
blivp = x_phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name)
phi_func = blivp.block_solve_functions(t_span=(s, t_max))[phi_block_name]
return phi_func
@lru_cache()
def x_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name):
blivp = x_phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name)
x_func = blivp.block_solve_functions(t_span=(s, t_max))[x_block_name]
return x_func
_CacheStats = namedtuple(
'CacheStats',
['hitss', 'missess', 'currsizes', 'hitting_ratios']
)
def custom_lru_cache_wrapper(maxsize=None, typed=False, stats=False):
if stats:
hitss = []
missess = []
currsizes = []
hitting_ratios = []
def decorating_function(user_function):
func = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
def wrapper(*args, **kwargs):
nonlocal stats, hitss, missess, currsizes, hitting_ratios
result = func(*args, **kwargs)
if stats:
hitss.append(func.cache_info().hits)
missess.append(func.cache_info().misses)
currsizes.append(func.cache_info().currsize)
hitting_ratios.append(
round(hitss[-1]/(hitss[-1]+missess[-1])*100.0, 2)
)
return result
wrapper.cache_info = func.cache_info
if stats:
def cache_stats():
nonlocal hitss, missess, currsizes
return _CacheStats(hitss, missess, currsizes, hitting_ratios)
wrapper.cache_stats = cache_stats
def plot_hitss():
nonlocal hitss
plt.plot(range(len(hitss)), hitss)
plt.title('Hitss')
plt.show()
wrapper.plot_hitss = plot_hitss
def plot_hit_history():
nonlocal hitss
plt.scatter(
range(len(hitss)-1),
np.diff(hitss),
s=1,
alpha=0.2
)
plt.title('Hit history')
plt.show()
wrapper.plot_hit_history = plot_hit_history
def plot_hitting_ratios():
nonlocal hitss, hitting_ratios
plt.plot(
range(len(hitss)),
hitting_ratios
)
plt.title('Hitting ratios')
plt.show()
wrapper.plot_hitting_ratios = plot_hitting_ratios
def plot_currsizes():
nonlocal currsizes
plt.plot(
range(len(currsizes)),
currsizes
)
plt.title('Currsizes')
plt.show()
wrapper.plot_currsizes = plot_currsizes
def plot_hitting_ratios_over_currsizes():
nonlocal hitting_ratios, currsizes
plt.plot(
range(len(hitting_ratios)),
[hitting_ratios[i]/currsizes[i]
for i in range(len(hitting_ratios))]
)
plt.title('Hitting ratios over currsizes')
plt.show()
wrapper.plot_hitting_ratios_over_currsizes =\
plot_hitting_ratios_over_currsizes
def plot_hitting_ratios_vs_currsizes():
nonlocal hitting_ratios, currsizes
plt.plot(
currsizes,
hitting_ratios
)
plt.title('Hitting ratios vs currsizes')
plt.show()
wrapper.plot_hitting_ratios_vs_currsizes =\
plot_hitting_ratios_vs_currsizes
def cache_clear():
nonlocal hitss, missess, currsizes
hitss = []
missess = []
currsizes = []
func.cache_clear()
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def print_quantile_error_statisctics(qs_ode, qs_pi):
print('ODE :', ['{: 7.2f}'.format(v) for v in qs_ode])
print('Expl. :', ['{: 7.2f}'.format(v) for v in qs_pi])
abs_err = np.abs(qs_ode-qs_pi)
print('abs. err. :', ['{: 7.2f}'.format(v) for v in abs_err])
rel_err = np.abs(qs_ode-qs_pi)/qs_pi * 100
print('rel. err. (%):', ['{: 7.2f}'.format(v) for v in rel_err])
print()
print('mean abs. err :', '{: 6.2f}'.format(abs_err.mean()))
print('mean rel. err (%):', '{: 6.2f}'.format(rel_err.mean()))
print('max. abs. err :', '{: 6.2f}'.format(np.max(abs_err)))
print('max. rel. err (%):', '{: 6.2f}'.format(np.max(rel_err)))
print()
def net_Fs_from_discrete_Bs_and_xs(Bs, xs):
nr_pools = xs.shape[1]
nt = len(Bs)
net_Fs = np.zeros((nt, nr_pools, nr_pools))
for k in range(nt):
for j in range(nr_pools):
for i in range(nr_pools):
if i != j:
net_Fs[k, i, j] = Bs[k, i, j] * xs[k, j]
return net_Fs
def net_Rs_from_discrete_Bs_and_xs(Bs, xs, decay_corr=None):
nr_pools = xs.shape[1]
nt = len(Bs)
if decay_corr is None:
decay_corr = np.ones((nt,))
net_Rs = np.zeros((nt, nr_pools))
for k in range(nt):
for j in range(nr_pools):
net_Rs[k, j] = (1-sum(Bs[k, :, j])*decay_corr[k]) * xs[k, j]
return net_Rs
def net_Us_from_discrete_Bs_and_xs(Bs, xs):
nr_pools = xs.shape[1]
nt = len(Bs)
net_Us = np.zeros((nt, nr_pools))
for k in range(nt):
net_Us[k] = xs[k+1] - Bs[k] @ xs[k]
return net_Us
def check_parameter_dict_complete(model, parameter_dict, func_set):
"""Check if the parameter set the function set are complete
to enable a model run.
Args:
model (:class:`~.smooth_reservoir_model.SmoothReservoirModel`):
The reservoir model on which the model run bases.
parameter_dict (dict): ``{x: y}`` with ``x`` being a SymPy symbol
and ``y`` being a numerical value.
func_set (dict): ``{f: func}`` with ``f`` being a SymPy symbol and
``func`` being a Python function. Defaults to ``dict()``.
Returns:
free_symbols (set): set of free symbols, parameter_dict is complete if
``free_symbols`` is the empty set
"""
free_symbols = model.F.subs(parameter_dict).free_symbols
# print('fs', free_symbols)
free_symbols -= {model.time_symbol}
# print(free_symbols)
free_symbols -= set(model.state_vector)
# print(free_symbols)
# remove function names, are given as strings
free_names = set([symbol.name for symbol in free_symbols])
func_names = set([key for key in func_set.keys()])
free_names = free_names - func_names
return free_names
def F_Delta_14C(C12, C14, alpha=None):
if alpha is None:
alpha = ALPHA_14C
C12[C12 == 0] = np.nan
return (C14/C12/alpha - 1) * 1000
def densities_to_distributions(
densities: Callable[[float],np.ndarray],
nr_pools: int
)->Callable[[float],np.ndarray]:
def distributions(A: np.float) ->np.ndarray:
return np.array(
[
quad(
lambda a:densities(a)[i],
-np.inf,
A
)[0]
for i in range(nr_pools)
]
)
return distributions
def pool_wise_bin_densities_from_smooth_densities_and_index(
densities: Callable[[float],np.ndarray],
nr_pools: int,
dt: float,
)->Callable[[int],np.ndarray]:
def content(ai:int)->np.ndarray:
da = dt
return np.array(
[
quad(
lambda a:densities(a)[i],
ai*da,
(ai+1)*da
)[0] / da
for i in range(nr_pools)
]
)
return content
def pool_wise_bin_densities_from_smooth_densities_and_indices(
densities: Callable[[float],np.ndarray],
nr_pools: int,
dt: float,
)->Callable[[np.ndarray],np.ndarray]:
bin_density = pool_wise_bin_densities_from_smooth_densities_and_index(
densities,
nr_pools,
dt
)
# vectorize it
def bin_densities(age_bin_indices: np.ndarray)->np.ndarray:
return np.stack(
[
bin_density(ai)
for ai in age_bin_indices
],
axis=1
)
return bin_densities
def negative_indicies_to_zero(
f: Callable[[np.ndarray],np.ndarray]
)->Callable[[np.ndarray],np.ndarray]:
def wrapper(age_bin_indices):
arr_true = f(age_bin_indices)
nr_pools = arr_true.shape[0]
return np.stack(
[
np.where(
age_bin_indices >=0,
arr_true[ip,:],
0
)
for ip in range(nr_pools)
]
)
return wrapper
# make sure that the start age distribution
# yields zero for negative ages or indices
def p0_maker(
start_age_densities_of_bin: Callable[[int],np.ndarray],
):
def p0(ai):
res = start_age_densities_of_bin(ai)
if ai >= 0:
return res
else:
return np.zeros_like(res)
return p0
def discrete_time_dict(
cont_time: Symbol,
delta_t: Symbol,
iteration: Symbol
)->Dict:
return {cont_time: delta_t*iteration}
def euler_forward_B_sym(
B_sym_cont: Expr,
cont_time: Symbol,
delta_t: Symbol,
iteration: Symbol
)-> Expr:
B_sym_discrete = B_sym_cont.subs(
discrete_time_dict(
cont_time,
delta_t,
iteration
)
)
return (B_sym_discrete * delta_t)
def euler_forward_net_flux_sym(
flux_sym_cont: Expr,
cont_time: Symbol,
delta_t: Symbol,
iteration: Symbol
)-> Expr:
flux_sym_discrete = flux_sym_cont.subs(
discrete_time_dict(
cont_time,
delta_t,
iteration
)
)
return flux_sym_discrete * delta_t
# fixme mm 2-11-2022
# this function is identical to euler_forward_net_flux_sym and should
# be replaced wherever it is called
def euler_forward_net_u_sym(
u_sym_cont: Expr,
cont_time: Symbol,
delta_t: Symbol,
iteration: Symbol
)-> Expr:
u_sym_discrete = u_sym_cont.subs(
discrete_time_dict(
cont_time,
delta_t,
iteration
)
)
return u_sym_discrete * delta_t
|
|
from pybrain.datasets import ClassificationDataSet, UnsupervisedDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from tr_utils import append_to_arr
from train_files import TrainFiles
import numpy as np
import time
from SupervisedLearning import SKSupervisedLearning
from sklearn.metrics import log_loss
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn.decomposition.pca import PCA
def _createDataSet(X, Y, one_based):
labels = np.unique(Y)
alldata = ClassificationDataSet(X.shape[1], nb_classes = labels.shape[0], class_labels = labels)
shift = 1 if one_based else 0
for i in range(X.shape[0]):
alldata.addSample(X[i], Y[i] - shift)
alldata._convertToOneOfMany()
return alldata
def _createUnsupervisedDataSet(X):
alldata = UnsupervisedDataSet(X.shape[1])
for i in X:
alldata.addSample(i)
return alldata
def createDataSets(X_train, Y_train, X_test, Y_test, one_based = True):
"""
Creates the data set. Handles one-based classifications (PyBrain uses zero-based ones).
"""
trndata = _createDataSet(X_train, Y_train, one_based)
tstdata = _createDataSet(X_test, Y_test, one_based)
return trndata, tstdata
def nn_log_loss(fnn, data):
proba = fnn.activateOnDataset(data)
return log_loss(data['target'], proba)
def train(trndata, tstdata, epochs = 100, test_error = 0.2, weight_decay = 0.0001, momentum = 0.5):
"""
FF neural net
"""
fnn = buildNetwork(trndata.indim, trndata.indim / 4, trndata.outdim, outclass = SoftmaxLayer)
trainer = BackpropTrainer(fnn, trndata, momentum = momentum, weightdecay = weight_decay)
epoch_delta = 1
stop = False
trnResults = np.array([])
tstResults = np.array([])
totEpochs = np.array([])
trnLogLoss = np.array([])
tstLogLoss = np.array([])
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,8))
#hold(True) # overplot on
#plt.ion()
while not stop:
trainer.trainEpochs(epoch_delta)
trnresult = percentError( trainer.testOnClassData(),
trndata['class'] )
tstresult = percentError( trainer.testOnClassData(
dataset=tstdata ), tstdata['class'] )
tstLogLoss = append_to_arr(tstLogLoss, nn_log_loss(fnn, tstdata))
trnLogLoss = append_to_arr(trnLogLoss, nn_log_loss(fnn, trndata))
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult, \
" test logloss: %2.4f" % tstLogLoss[-1], \
" train logloss: %2.4f" % trnLogLoss[-1]
trnResults = append_to_arr(trnResults, trnresult)
tstResults = append_to_arr(tstResults, tstresult)
totEpochs = append_to_arr(totEpochs, trainer.totalepochs)
plt.sca(ax1)
plt.cla()
ax1.plot(totEpochs, trnResults, label = 'Train')
ax1.plot(totEpochs, tstResults, label = 'Test')
plt.sca(ax2)
plt.cla()
ax2.plot(totEpochs, trnLogLoss, label = 'Train')
ax2.plot(totEpochs, tstLogLoss, label = 'Test')
ax1.legend()
ax2.legend()
plt.draw()
time.sleep(0.1)
plt.pause(0.0001)
stop = (tstLogLoss[-1] <= test_error or trainer.totalepochs >= epochs)
return fnn
def predict_nn(trndata, epochs = 300, test_error = 0.0147, weight_decay = 0.0001, momentum = 0.15):
"""
FF neural net
"""
fnn = buildNetwork(trndata.indim, trndata.indim / 4, trndata.outdim, outclass = SoftmaxLayer)
trainer = BackpropTrainer(fnn, trndata, momentum = momentum, weightdecay = weight_decay)
epoch_delta = 1
stop = False
totEpochs = np.array([])
trnResults = np.array([])
trnLogLoss = np.array([])
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,8))
while not stop:
trainer.trainEpochs(epoch_delta)
trnresult = percentError( trainer.testOnClassData(),
trndata['class'] )
trnLogLoss = append_to_arr(trnLogLoss, nn_log_loss(fnn, trndata))
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" train logloss: %2.4f" % trnLogLoss[-1]
trnResults = append_to_arr(trnResults, trnresult)
totEpochs = append_to_arr(totEpochs, trainer.totalepochs)
plt.sca(ax1)
plt.cla()
ax1.plot(totEpochs, trnResults, label = 'Train')
plt.sca(ax2)
plt.cla()
ax2.plot(totEpochs, trnLogLoss, label = 'Train')
ax1.legend()
ax2.legend()
plt.draw()
time.sleep(0.1)
plt.pause(0.0001)
stop = (trnLogLoss[-1] <= test_error or trainer.totalepochs >= epochs)
return fnn
train_path_mix = "/kaggle/malware/train/mix_lbp"
train_path_freq = "/kaggle/malware/train/instr_freq"
labels_file = "/kaggle/malware/trainLabels.csv"
csv_file = "/kaggle/malware/mix_lbp.csv"
def do_train():
X, Y, Xt, Yt = TrainFiles.from_csv(csv_file)
sl = SKSupervisedLearning(SVC, X, Y, Xt, Yt)
sl.fit_standard_scaler()
#pca = PCA(250)
#pca.fit(np.r_[sl.X_train_scaled, sl.X_test_scaled])
#X_pca = pca.transform(sl.X_train_scaled)
#X_pca_test = pca.transform(sl.X_test_scaled)
##construct a dataset for RBM
#X_rbm = X[:, 257:]
#Xt_rbm = X[:, 257:]
#rng = np.random.RandomState(123)
#rbm = RBM(X_rbm, n_visible=X_rbm.shape[1], n_hidden=X_rbm.shape[1]/4, numpy_rng=rng)
#pretrain_lr = 0.1
#k = 2
#pretraining_epochs = 200
#for epoch in xrange(pretraining_epochs):
# rbm.contrastive_divergence(lr=pretrain_lr, k=k)
# cost = rbm.get_reconstruction_cross_entropy()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
trndata, tstdata = createDataSets(X, Y, X_test, Yt)
fnn = train(trndata, tstdata, epochs = 1000, test_error = 0.025, momentum = 0.15, weight_decay = 0.0001)
def do_predict(X_train, Y_train, X_test):
trndata = _createDataSet(X_train, Y_train, one_based = True)
tstdata = _createUnsupervisedDataSet(X_test)
fnn = predict_nn(trndata)
proba = fnn.activateOnDataset(tstdata)
def do_train_with_freq():
tf_mix = TrainFiles(train_path = train_path_mix, labels_file = labels_file, test_size = 0.)
tf_freq = TrainFiles(train_path = train_path_freq, labels_file = labels_file, test_size = 0.)
X_m, Y_m, _, _ = tf_mix.prepare_inputs()
X_f, Y_f, _, _ = tf_freq.prepare_inputs()
X = np.c_[X_m, X_f]
Y = Y_f
X, Xt, Y, Yt = train_test_split(X, Y, test_size = 0.1)
sl = SKSupervisedLearning(SVC, X, Y, Xt, Yt)
sl.fit_standard_scaler()
pca = PCA(250)
pca.fit(np.r_[sl.X_train_scaled, sl.X_test_scaled])
X_pca = pca.transform(sl.X_train_scaled)
X_pca_test = pca.transform(sl.X_test_scaled)
#sl.train_params = {'C': 100, 'gamma': 0.0001, 'probability' : True}
#print "Start SVM: ", time_now_str()
#sl_ll_trn, sl_ll_tst = sl.fit_and_validate()
#print "Finish Svm: ", time_now_str()
##construct a dataset for RBM
#X_rbm = X[:, 257:]
#Xt_rbm = X[:, 257:]
#rng = np.random.RandomState(123)
#rbm = RBM(X_rbm, n_visible=X_rbm.shape[1], n_hidden=X_rbm.shape[1]/4, numpy_rng=rng)
#pretrain_lr = 0.1
#k = 2
#pretraining_epochs = 200
#for epoch in xrange(pretraining_epochs):
# rbm.contrastive_divergence(lr=pretrain_lr, k=k)
# cost = rbm.get_reconstruction_cross_entropy()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
trndata, tstdata = createDataSets(X_pca, Y, X_pca_test, Yt)
fnn = train(trndata, tstdata, epochs = 1000, test_error = 0.025, momentum = 0.2, weight_decay = 0.0001)
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import six
from orquesta import statuses as wf_statuses
import st2tests
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from local_runner import local_shell_command_runner
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as ac_const
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.runners import utils as runners_utils
from st2common.services import action as ac_svc
from st2common.services import workflows as wf_svc
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
from st2common.models.db.workflow import WorkflowExecutionDB
from st2common.models.db.workflow import TaskExecutionDB
from st2common.models.db.execution_queue import ActionExecutionSchedulingQueueItemDB
TEST_PACK = "orquesta_tests"
TEST_PACK_PATH = (
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + TEST_PACK
)
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/core",
]
RUNNER_RESULT_FAILED = (
ac_const.LIVEACTION_STATUS_FAILED,
{"127.0.0.1": {"hostname": "foobar"}},
{},
)
@mock.patch.object(
publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None)
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_create",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create
),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_state",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state
),
)
class OrquestaErrorHandlingTest(st2tests.WorkflowTestCase):
ensure_indexes = True
ensure_indexes_models = [
WorkflowExecutionDB,
TaskExecutionDB,
ActionExecutionSchedulingQueueItemDB,
]
@classmethod
def setUpClass(cls):
super(OrquestaErrorHandlingTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False, fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def test_fail_inspection(self):
expected_errors = [
{
"type": "content",
"message": 'The action "std.noop" is not registered in the database.',
"schema_path": r"properties.tasks.patternProperties.^\w+$.properties.action",
"spec_path": "tasks.task3.action",
},
{
"type": "context",
"language": "yaql",
"expression": "<% ctx().foobar %>",
"message": 'Variable "foobar" is referenced before assignment.',
"schema_path": r"properties.tasks.patternProperties.^\w+$.properties.input",
"spec_path": "tasks.task1.input",
},
{
"type": "expression",
"language": "yaql",
"expression": "<% <% succeeded() %>",
"message": (
"Parse error: unexpected '<' at "
"position 0 of expression '<% succeeded()'"
),
"schema_path": (
r"properties.tasks.patternProperties.^\w+$."
"properties.next.items.properties.when"
),
"spec_path": "tasks.task2.next[0].when",
},
{
"type": "syntax",
"message": (
"[{'cmd': 'echo <% ctx().macro %>'}] is "
"not valid under any of the given schemas"
),
"schema_path": r"properties.tasks.patternProperties.^\w+$.properties.input.oneOf",
"spec_path": "tasks.task2.input",
},
]
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "fail-inspection.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertIn("errors", lv_ac_db.result)
self.assertListEqual(lv_ac_db.result["errors"], expected_errors)
def test_fail_input_rendering(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% abs(4).value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-input-rendering.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert action execution for task is not started and workflow failed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(tk_ex_dbs), 0)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_vars_rendering(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% abs(4).value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-vars-rendering.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert action execution for task is not started and workflow failed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(tk_ex_dbs), 0)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_start_task_action(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% ctx().func.value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
"task_id": "task1",
"route": 0,
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-start-task-action.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert action execution for task is not started and workflow failed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(tk_ex_dbs), 0)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_start_task_input_expr_eval(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% ctx().msg1.value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
"task_id": "task1",
"route": 0,
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_file = "fail-start-task-input-expr-eval.yaml"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_file)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert action execution for task is not started and workflow failed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(tk_ex_dbs), 0)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_start_task_input_value_type(self):
if six.PY3:
msg = "Value \"{'x': 'foobar'}\" must either be a string or None. Got \"dict\"."
else:
msg = "Value \"{u'x': u'foobar'}\" must either be a string or None. Got \"dict\"."
msg = "ValueError: " + msg
expected_errors = [
{"type": "error", "message": msg, "task_id": "task1", "route": 0}
]
expected_result = {"output": None, "errors": expected_errors}
wf_file = "fail-start-task-input-value-type.yaml"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_file)
wf_input = {"var1": {"x": "foobar"}}
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters=wf_input
)
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert workflow and task executions failed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
tk_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
self.assertEqual(tk_ex_db.status, wf_statuses.FAILED)
self.assertDictEqual(tk_ex_db.result, {"errors": expected_errors})
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_next_task_action(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% ctx().func.value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
"task_id": "task2",
"route": 0,
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "fail-task-action.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert task1 is already completed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_db.id)
)[0]
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_db.liveaction["id"])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Manually handle action execution completion for task1 which has an error in publish.
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert task1 succeeded but workflow failed.
tk_ex_db = wf_db_access.TaskExecution.get_by_id(tk_ex_db.id)
self.assertEqual(tk_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_next_task_input_expr_eval(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% ctx().msg2.value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
"task_id": "task2",
"route": 0,
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-task-input-expr-eval.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert task1 is already completed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_db.id)
)[0]
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_db.liveaction["id"])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Manually handle action execution completion for task1 which has an error in publish.
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert task1 succeeded but workflow failed.
tk_ex_db = wf_db_access.TaskExecution.get_by_id(tk_ex_db.id)
self.assertEqual(tk_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_next_task_input_value_type(self):
if six.PY3:
msg = "Value \"{'x': 'foobar'}\" must either be a string or None. Got \"dict\"."
else:
msg = "Value \"{u'x': u'foobar'}\" must either be a string or None. Got \"dict\"."
msg = "ValueError: " + msg
expected_errors = [
{"type": "error", "message": msg, "task_id": "task2", "route": 0}
]
expected_result = {"output": None, "errors": expected_errors}
wf_file = "fail-task-input-value-type.yaml"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_file)
wf_input = {"var1": {"x": "foobar"}}
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters=wf_input
)
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert task1 is already completed and workflow execution is still running.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Manually handle action execution completion for task1 which has an error in publish.
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
# Assert workflow execution and task2 execution failed.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
tk2_ex_db = wf_db_access.TaskExecution.query(task_id="task2")[0]
self.assertEqual(tk2_ex_db.status, wf_statuses.FAILED)
self.assertDictEqual(tk2_ex_db.result, {"errors": expected_errors})
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_task_execution(self):
expected_errors = [
{
"type": "error",
"message": "Execution failed. See result for details.",
"task_id": "task1",
"result": {
"stdout": "",
"stderr": "boom!",
"return_code": 1,
"failed": True,
"succeeded": False,
},
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-task-execution.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Process task1.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
# Assert workflow state and result.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_task_transition(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to resolve key 'foobar' in expression "
"'<% succeeded() and result().foobar %>' from context."
),
"task_transition_id": "task2__t0",
"task_id": "task1",
"route": 0,
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-task-transition.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert task1 is already completed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_db.id)
)[0]
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_db.liveaction["id"])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Manually handle action execution completion for task1 which has an error in publish.
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert task1 succeeded but workflow failed.
tk_ex_db = wf_db_access.TaskExecution.get_by_id(tk_ex_db.id)
self.assertEqual(tk_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_task_publish(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% foobar() %>'. NoFunctionRegisteredException: "
'Unknown function "foobar"'
),
"task_transition_id": "task2__t0",
"task_id": "task1",
"route": 0,
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-task-publish.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert task1 is already completed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_db.id)
)[0]
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_db.liveaction["id"])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Manually handle action execution completion for task1 which has an error in publish.
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert task1 succeeded but workflow failed.
tk_ex_db = wf_db_access.TaskExecution.get_by_id(tk_ex_db.id)
self.assertEqual(tk_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_output_rendering(self):
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% abs(4).value %>'. NoFunctionRegisteredException: "
'Unknown function "#property#value"'
),
}
]
expected_result = {"output": None, "errors": expected_errors}
wf_meta = base.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-output-rendering.yaml"
)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert task1 is already completed.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
tk_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_db.id)
)[0]
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_db.liveaction["id"])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Manually handle action execution completion for task1 which has an error in publish.
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert task1 succeeded but workflow failed.
tk_ex_db = wf_db_access.TaskExecution.get_by_id(tk_ex_db.id)
self.assertEqual(tk_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_output_on_error(self):
expected_output = {"progress": 25}
expected_errors = [
{
"type": "error",
"task_id": "task2",
"message": "Execution failed. See result for details.",
"result": {
"failed": True,
"return_code": 1,
"stderr": "",
"stdout": "",
"succeeded": False,
},
}
]
expected_result = {"errors": expected_errors, "output": expected_output}
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "output-on-error.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
# Assert task1 is already completed and workflow execution is still running.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert task2 is already completed and workflow execution has failed.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task2"}
tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk2_ex_db.id)
)[0]
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"])
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
wf_svc.handle_action_execution_completion(tk2_ac_ex_db)
# Check output and result for expected value(s).
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
self.assertDictEqual(wf_ex_db.output, expected_output)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(lv_ac_db.result, expected_result)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_fail_manually(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "fail-manually.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
# Assert task1 and workflow execution failed due to fail in the task transition.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
# Assert log task is scheduled even though the workflow execution failed manually.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "log"}
tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk2_ex_db.id)
)[0]
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"])
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
wf_svc.handle_action_execution_completion(tk2_ac_ex_db)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
# Check errors and output.
expected_errors = [
{
"task_id": "fail",
"type": "error",
"message": "Execution failed. See result for details.",
},
{
"task_id": "task1",
"type": "error",
"message": "Execution failed. See result for details.",
"result": {
"failed": True,
"return_code": 1,
"stderr": "",
"stdout": "",
"succeeded": False,
},
},
]
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
def test_fail_manually_with_recovery_failure(self):
wf_file = "fail-manually-with-recovery-failure.yaml"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_file)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
# Assert task1 and workflow execution failed due to fail in the task transition.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
# Assert recover task is scheduled even though the workflow execution failed manually.
# The recover task in the workflow is setup to fail.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "recover"}
tk2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk2_ex_db.id)
)[0]
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_db.liveaction["id"])
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
wf_svc.handle_action_execution_completion(tk2_ac_ex_db)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
# Check errors and output.
expected_errors = [
{
"task_id": "fail",
"type": "error",
"message": "Execution failed. See result for details.",
},
{
"task_id": "recover",
"type": "error",
"message": "Execution failed. See result for details.",
"result": {
"failed": True,
"return_code": 1,
"stderr": "",
"stdout": "",
"succeeded": False,
},
},
{
"task_id": "task1",
"type": "error",
"message": "Execution failed. See result for details.",
"result": {
"failed": True,
"return_code": 1,
"stderr": "",
"stdout": "",
"succeeded": False,
},
},
]
self.assertListEqual(
self.sort_workflow_errors(wf_ex_db.errors), expected_errors
)
@mock.patch.object(
runners_utils, "invoke_post_run", mock.MagicMock(return_value=None)
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(side_effect=[RUNNER_RESULT_FAILED]),
)
def test_include_result_to_error_log(self):
username = "stanley"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
wf_input = {"who": "Thanos"}
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters=wf_input
)
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
wf_ex_dbs = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)
wf_ex_db = wf_ex_dbs[0]
# Assert task1 is already completed.
query_filters = {"workflow_execution": str(wf_ex_db.id), "task_id": "task1"}
tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
tk1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk1_ex_db.id)
)[0]
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction["id"])
self.assertEqual(tk1_lv_ac_db.context.get("user"), username)
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
# Action execution result can contain dotted notation so ensure this is tested.
result = {"127.0.0.1": {"hostname": "foobar"}}
self.assertDictEqual(tk1_lv_ac_db.result, result)
# Manually handle action execution completion.
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
# Assert task and workflow failed.
tk1_ex_db = wf_db_access.TaskExecution.get_by_id(tk1_ex_db.id)
self.assertEqual(tk1_ex_db.status, wf_statuses.FAILED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
# Assert result is included in the error log.
expected_errors = [
{
"message": "Execution failed. See result for details.",
"type": "error",
"task_id": "task1",
"result": {"127.0.0.1": {"hostname": "foobar"}},
}
]
self.assertListEqual(wf_ex_db.errors, expected_errors)
|
|
import sys
import pytz
import time
import calendar
import configparser
import cmd
from datetime import date, datetime, timedelta
from strategy import strategy
from exchange import cb_exchange_sim, cb_exchange
class runner(cmd.Cmd):
def __init__(self):
"""Create a new runner with provided CLI commands. Default commands are:
\n1. exit: quit autotrader
\n2. help: display all commands
\n3. price: display the most recent bitcoin price
\n4. run: start trading on live data
\n5. backtest: run a backtest on historic data
\n6. load: load a new strategy
"""
print(" __ _ __ _ __ ")
print(" / /_ (_) /__________ _(_)___/ /__ _____")
print(" / __ \/ / __/ ___/ __ `/ / __ / _ \/ ___/")
print(" / /_/ / / /_/ / / /_/ / / /_/ / __/ / ")
print("/_.___/_/\__/_/ \__,_/_/\__,_/\___/_/ ")
print("")
print("Welcome to bitraider v0.0.4, an algorithmic Bitcoin trader!")
cmd.Cmd.__init__(self)
self.prompt = '> '
self.intro = "Type a command to get started or type \'help\'"
# Init config
self.config_path = "settings.ini"
self.config = configparser.ConfigParser()
try:
self.config.read(self.config_path)
except Exception as err:
print(str(err))
# Set up strategy
self.strategies = {}
"""The currently loaded strategies"""
# Try to load a default strategy, if one exists
try:
default_strategy_module = self.config.get("default_strategy", "module")
default_strategy_class = self.config.get("default_strategy", "class")
self.load_strategy(default_strategy_module, default_strategy_class)
except Exception as err:
#print(str(err))
print("No default strategy configured. Run "
"\'config default\' to set one")
try:
self.config.add_section("default_strategy")
except:
pass
self.exchange = cb_exchange_sim(1000, 1)
self.accounts = None
# Get auth credentials from settings.ini, if they exist, authorize
try:
self.auth_key = self.config.get("auth", "key")
self.auth_secret = self.config.get("auth", "secret")
self.auth_password = self.config.get("auth", "password")
self.authenticate()
except Exception as err:
#print(str(err))
print("No authentication configured. Run "
"\'config auth\' to set it")
try:
self.config.add_section("auth")
except:
pass
if self.accounts is not None:
print(str(len(self.accounts))+" accounts were found.")
for i in range(0, len(self.accounts)):
try:
print("Account ID: "+str(self.accounts[i]['id'])+" Available Funds: "+str(self.accounts[i]['available'])+" "+str(self.accounts[i]['currency'])+"")
except Exception as err:
print("Something went wrong while trying to authenticate with the provided credentials. Try running config>auth again.")
def do_exit(self, line):
sys.exit()
def do_price(self, line):
self.print_curr_price()
def do_run(self, line):
self.set_ticker_on()
def do_list(self, line):
self.list_strategies()
def do_config(self, option):
"""usage: \'config \' [option]"""
if option is None:
print("error: no cofiguration option specified")
else:
if option == "auth":
if self.accounts is not None:
print("Are you sure? Reconfiguring auth will wipe your current auth settings. [y/n]")
raw_input = input("> ")
if raw_input == "y":
self.authenticate_exchage()
elif raw_input == "n":
print("Exiting to main menu")
pass
else:
self.authenticate_exchange()
elif option == "default":
print("Type the filename (without .py) containing the class which inherits from bitraider.strategy:")
option = input("> ")
filename = str(option)
self.config.set("default_strategy", "module", filename)
print("Type the name of the class within "+str(option)+" representing the strategy to load:")
option = input("> ")
loaded_strategy = str(option)
if self.strategies is not None:
if loaded_strategy in self.strategies.keys():
print("Error: "+loaded_strategy+" is already loaded")
option = input("> ")
loaded_strategy = str(option)
self.config.set("default_strategy", "class", loaded_strategy)
with open(self.config_path, "wb") as config_file:
self.config.write(config_file)
self.load_strategy(filename, loaded_strategy)
def do_load(self, option):
print("Type the filename (without .py) containing the class which inherits from bitraider.strategy:")
raw_input = input("> ")
filename = str(raw_input)
print("Type the name of the class within "+str(raw_input)+" representing the strategy to load:")
raw_input = input("> ")
loaded_strategy = str(raw_input)
self.load_strategy(filename, loaded_strategy)
def do_backtest(self, option):
strategy_to_backtest = ""
print("Enter the class name of the strategy to backtest, or press enter to\n"
"backtest on the default strategy.")
raw_input = input("> ")
if raw_input == "":
print("Performing backest on default strategy: "+str(self.config.get("default_strategy" ,"class")))
strategy_to_backtest = str(self.config.get("default_strategy", "class"))
else:
strategy_to_backtest = str(raw_input)
usd = 1000
btc = 1
days_back_in_time = 7
print("Enter the number of days back in time to backtest on: ")
raw_input = input("> ")
if raw_input == "":
print("Performing backtest on default of 7 days.")
else:
days_back_in_time = float(raw_input)
print("Performing backtest on last "+str(days_back_in_time)+" days.")
curr_time = datetime.now(tz=self.curr_timezone)
start_time = curr_time - timedelta(seconds=86400*days_back_in_time)
start_time = start_time.isoformat(' ')
end_time = curr_time.isoformat(' ')
print("Enter the initial USD amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting USD amount of $1,000")
else:
usd = float(raw_input)
print("Using starting USD amount of $"+str(usd))
print("Enter the initial BTC amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting BTC amount of 1")
else:
btc = float(raw_input)
print("Using starting BTC amount of "+str(btc))
if strategy_to_backtest is not "":
self.strategies[strategy_to_backtest].exchange = cb_exchange_sim(start_usd=usd, start_btc=btc)
historic_data = self.strategies[strategy_to_backtest].exchange.get_historic_rates(start_time=start_time, end_time=end_time, granularity=self.strategies[strategy_to_backtest].interval)
if type(historic_data) is not list:
print("API error: "+str(historic_data.get("message", "")))
print("Unable to backtest")
pass
else:
print("Backtesting from "+str(start_time)+" to "+str(end_time))
print("with "+str(len(historic_data))+" timeslices of length "+str(self.strategies[strategy_to_backtest].interval)+" seconds each")
self.strategies[strategy_to_backtest].backtest_strategy(historic_data)
def do_optimize(self, line):
usd = 1000
btc = 1
days_back_in_time = 7
print("Enter the class name of the strategy to be optimized:")
raw_input = input("> ")
print(self.strategies.keys())
if raw_input not in self.strategies.keys():
print("Error: not found")
pass
strategy_to_optimize = raw_input
print("Enter the timeframe to optimize for i.e. the time to simulate over:")
days_back_in_time = 7
raw_input = input("> ")
if raw_input == "":
print("Performing optimization for default of last 7 days.")
else:
days_back_in_time = float(raw_input)
print("Performing optimization based on last "+str(days_back_in_time)+" days.")
curr_time = datetime.now(tz=self.curr_timezone)
start_time = curr_time - timedelta(seconds=86400*days_back_in_time)
start_time = start_time.isoformat(' ')
end_time = curr_time.isoformat(' ')
print("Enter the initial USD amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting USD amount of $1,000")
else:
usd = float(raw_input)
print("Using starting USD amount of $"+str(usd))
print("Enter the initial BTC amount:")
raw_input = input("> ")
if raw_input == "":
print("Using default starting BTC amount of 1")
else:
btc = float(raw_input)
print("Using starting BTC amount of "+str(btc))
strategy = strategy_to_optimize
strategy_attributes = dir(self.strategies[strategy])
bounds_by_attribute = {}
print("Note: strategy interval cannot be optimized due to API restraints")
self.strategies[strategy].exchange = cb_exchange_sim(start_usd=usd, start_btc=btc)
historic_data = self.strategies[strategy].exchange.get_historic_rates(start_time=start_time, end_time=end_time, granularity=self.strategies[strategy].interval)
if type(historic_data) is not list:
print("API error: "+str(historic_data.get("message", "")))
print("Unable to optimize. Try changing strategy's interval")
pass
else:
print("Optimizing based on time frame of "+str(start_time)+" to "+str(end_time))
print("with "+str(len(historic_data))+" timeslices of length "+str(self.strategies[strategy].interval)+" seconds each")
for attribute in strategy_attributes:
if "_" not in str(attribute) and str(attribute) != "interval":
# Optimizing for interval would poll API too frequently
print("Enter the lower bound for attribute: "+str(attribute)+", or press enter to skip:")
raw_input = input("> ")
if raw_input == "":
pass
else:
lower_bound = float(raw_input)
print("Enter the upper bound for attribute: "+str(attribute)+":")
raw_input = input("> ")
upper_bound = float(raw_input)
print("Enter the granularity of this attribute i.e. how many different values to try:")
raw_input = input("> ")
granularity = float(raw_input)
if upper_bound is not None and lower_bound is not None:
bounds_by_attribute[str(attribute)] = {"lower":lower_bound, "upper":upper_bound, "granularity":granularity}
#self.strategies[strategy][attribute] = float(lower_bound)
attribute_vals_by_id = {}
config_id = 0
# Initialize attribute_vals_by id
for attribute in bounds_by_attribute.keys():
num_shades_of_attr = int(bounds_by_attribute[attribute].get("granularity"))
increment = (float(upper_bound) - float(lower_bound))/num_shades_of_attr
attr_val = float(lower_bound)
for shade in range(num_shades_of_attr):
attribute_vals_by_id[str(config_id)] = {}
attribute_vals_by_id[str(config_id)][attribute] = attr_val
config_id += 1
# Fill in all possible values for the attributes
config_id = 0
for attribute in bounds_by_attribute.keys():
num_shades_of_attr = int(bounds_by_attribute[attribute].get("granularity"))
increment = (float(upper_bound) - float(lower_bound))/num_shades_of_attr
step = 0
attr_val = float(lower_bound) + (increment*step)
for shade in range(num_shades_of_attr):
attribute_vals_by_id[str(config_id)][attribute] = attr_val
config_id += 1
step += 1
performance_by_id = {}
performance_vs_mkt = 0
strategy_performance = 0
mkt_performance = 0
# Change the attribute values for this strategy, updating when the performance is highest
for configuration in attribute_vals_by_id.keys():
for attribute in attribute_vals_by_id[configuration]:
setattr(self.strategies[strategy], attribute, attribute_vals_by_id[configuration][attribute])
performance_vs_mkt, strategy_performance, mkt_performance = self.strategies[strategy].backtest_strategy(historic_data)
performance_by_id[str(configuration)] = performance_vs_mkt
best_config = "0"
for configuration in performance_by_id.keys():
if performance_by_id[configuration] > performance_by_id[best_config]:
best_config = configuration
print("The best performing strategy configuration is: "+str(attribute_vals_by_id[best_config]))
print("With a performance vs market of: "+str(performance_by_id[best_config]))
# End python cmd funtions
def authenticate_exchange(self):
print("Paste in your CoinbaseExchange API key:")
raw_input = input("> ")
self.auth_key = raw_input
print("Paste in your CoinbaseExchange API secret:")
raw_input = input("> ")
self.auth_secret = raw_input
print("Paste in your CoinbaseExchange API passphrase:")
raw_input = input("> ")
if raw_input is not "":
self.auth_password = raw_input
self.config.set("auth", "key", self.auth_key)
self.config.set("auth", "secret", self.auth_secret)
self.config.set("auth", "password", self.auth_password)
with open(self.config_path, "w", encoding='utf-8') as config_file:
self.config.write(config_file)
self.authenticate()
def authenticate(self):
#try:
self.exchange = cb_exchange(self.auth_key, self.auth_secret, self.auth_password)
self.accounts = self.exchange.list_accounts()
#except Exception as err:
# print("Error! Only unauthorized endpoints are available.")
# print("error: "+str(err))
# print("If you would like bitraider to walk you through authentication, enter the commands: \'config\' > \'auth\'")
def set_ticker_on(self):
strategy = self.strategies[0]
start_time = time.time()
lower_bound = start_time
upper_bound = start_time + strategy.interval
elapsed_time = 0.0
last_intervals_trades = []
while True:
curr_time = time.time()
elapsed_time = curr_time - start_time
if elapsed_time % strategy.interval == 0:
# if we've reached a new interval, calculate data for the last interval and pass
# it onto the strategy
latest_trades = self.exchange.get_trades('BTC-USD')
interval_data = []
last_intervals_low = 999999999
last_intervals_high = 0.0
last_intervals_close = 0.0
last_intervals_close = 0.0
last_intervals_volume = 0.0
for trade in latest_trades:
# starting with the most recent trade, get trades for the last interval
datestring = str(trade.get("time"))[:-3]
trade_time = float(calendar.timegm(datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S.%f").timetuple()))
if trade_time >= lower_bound and trade_time <= upper_bound:
last_intervals_trades.append(trade)
if float(trade.get('price')) > last_intervals_high:
last_intervals_high = float(trade.get('price'))
if float(trade.get('price')) < last_intervals_low:
last_intervals_low = float(trade.get('price'))
last_intervals_volume += float(trade.get('size'))
if len(last_intervals_trades) > 0:
last_intervals_close = float(last_intervals_trades[0].get('price'))
last_intervals_open = float(last_intervals_trades[-1].get('price'))
interval_start_time = curr_time - strategy.interval
interval_data.extend([interval_start_time, last_intervals_low, last_intervals_high,
last_intervals_open, last_intervals_close, last_intervals_volume])
print("last_intervals_trades: "+str(last_intervals_trades))
print("elapsed: "+str(elapsed_time))
last_intervals_trades = []
lower_bound += strategy.interval
upper_bound += strategy.interval
# Here's where the magic happens:
#strategy.trade(interval_data)
def run(self):
# Time Configuration
self.curr_time = time.time() # Seconds since Jan 1st, 1970
self.curr_timezone = pytz.timezone("US/Central")
self.cmdloop()
def print_curr_price(self):
"""Print the most recent price."""
print(self.exchange.get_last_trade('BTC-USD')['price'])
def load_strategy(self, module, cls):
"""Load a user-defined strategy from a file.
\n`module`: the filename in the current directory containing the strategy class which
inherits from bitraider.strategy (does not include .py)
\n`cls`: the classname within the file to load
"""
import_string = module+"."+cls
classname = str(cls)
_temp = __import__(module)
loaded_strategy_ = getattr(_temp, cls)
instance_of_loaded_strategy = loaded_strategy_()
self.strategies[classname] = instance_of_loaded_strategy
print("Loaded strategy: "+str(cls)+" from file: "+str(module)+".py")
def run():
my_runner = runner()
my_runner.run()
if __name__=="__main__":
my_runner = runner()
my_runner.run()
|
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.utils.encoding import force_str
from django.utils.six import StringIO
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
if hasattr(out, 'isatty') and out.isatty():
self.style_func = style_func
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = [f for f in (style_func, self.style_func, lambda x:x)
if f is not None][0]
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of being
forcibly set to 'en-us'.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrim.auth
permissions) as making the locale differ from the de facto default
'en-us' might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to set the locale needs access to
settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Raise on exception'),
make_option('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output."),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
try:
self.execute(*args, **options.__dict__)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# self.stderr is not guaranteed to be set here
stderr = getattr(self, 'stderr', OutputWrapper(sys.stderr, self.style.ERROR))
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``, except if force-skipped).
"""
self.stdout = OutputWrapper(options.get('stdout', sys.stdout))
if options.get('no_color'):
self.style = no_style()
self.stderr = OutputWrapper(options.get('stderr', sys.stderr))
else:
self.stderr = OutputWrapper(options.get('stderr', sys.stderr), self.style.ERROR)
if self.can_import_settings:
from django.conf import settings
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Switch to US English, because django-admin.py creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.activate('en-us')
try:
if self.requires_model_validation and not options.get('skip_validation'):
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;"))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found" % (num_errors, '' if num_errors == 1 else 's'))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
|
|
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import sys
import csv
import re
import logging
from ctypes import c_int32
from collections import defaultdict
import argparse
from wlauto.utils.trace_cmd import TraceCmdTrace, TRACE_MARKER_START, TRACE_MARKER_STOP
logger = logging.getLogger('power')
UNKNOWN_FREQUENCY = -1
INIT_CPU_FREQ_REGEX = re.compile(r'CPU (?P<cpu>\d+) FREQUENCY: (?P<freq>\d+) kHZ')
class CorePowerTransitionEvent(object):
kind = 'transition'
__slots__ = ['timestamp', 'cpu_id', 'frequency', 'idle_state']
def __init__(self, timestamp, cpu_id, frequency=None, idle_state=None):
if (frequency is None) == (idle_state is None):
raise ValueError('Power transition must specify a frequency or an idle_state, but not both.')
self.timestamp = timestamp
self.cpu_id = cpu_id
self.frequency = frequency
self.idle_state = idle_state
def __str__(self):
return 'cpu {} @ {} -> freq: {} idle: {}'.format(self.cpu_id, self.timestamp,
self.frequency, self.idle_state)
def __repr__(self):
return 'CPTE(c:{} t:{} f:{} i:{})'.format(self.cpu_id, self.timestamp,
self.frequency, self.idle_state)
class CorePowerDroppedEvents(object):
kind = 'dropped_events'
__slots__ = ['cpu_id']
def __init__(self, cpu_id):
self.cpu_id = cpu_id
def __str__(self):
return 'DROPPED EVENTS on CPU{}'.format(self.cpu_id)
__repr__ = __str__
class TraceMarkerEvent(object):
kind = 'marker'
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __str__(self):
return 'MARKER: {}'.format(self.name)
class CpuPowerState(object):
__slots__ = ['frequency', 'idle_state']
@property
def is_idling(self):
return self.idle_state is not None and self.idle_state >= 0
@property
def is_active(self):
return self.idle_state == -1
def __init__(self, frequency=None, idle_state=None):
self.frequency = frequency
self.idle_state = idle_state
def __str__(self):
return 'CP(f:{} i:{})'.format(self.frequency, self.idle_state)
__repr__ = __str__
class SystemPowerState(object):
__slots__ = ['timestamp', 'cpus']
@property
def num_cores(self):
return len(self.cpus)
def __init__(self, num_cores):
self.timestamp = None
self.cpus = []
for _ in xrange(num_cores):
self.cpus.append(CpuPowerState())
def copy(self):
new = SystemPowerState(self.num_cores)
new.timestamp = self.timestamp
for i, c in enumerate(self.cpus):
new.cpus[i].frequency = c.frequency
new.cpus[i].idle_state = c.idle_state
return new
def __str__(self):
return 'SP(t:{} Cs:{})'.format(self.timestamp, self.cpus)
__repr__ = __str__
class PowerStateProcessor(object):
"""
This takes a stream of power transition events and yields a timeline stream
of system power states.
"""
@property
def cpu_states(self):
return self.power_state.cpus
@property
def current_time(self):
return self.power_state.timestamp
@current_time.setter
def current_time(self, value):
self.power_state.timestamp = value
def __init__(self, core_clusters, num_idle_states,
first_cluster_state=sys.maxint, first_system_state=sys.maxint,
wait_for_start_marker=False):
self.power_state = SystemPowerState(len(core_clusters))
self.requested_states = defaultdict(lambda: -1) # cpu_id -> requeseted state
self.wait_for_start_marker = wait_for_start_marker
self._saw_start_marker = False
idle_state_domains = build_idle_domains(core_clusters,
num_states=num_idle_states,
first_cluster_state=first_cluster_state,
first_system_state=first_system_state)
# This tells us what other cpus we need to update when we see an idle
# state transition event
self.idle_related_cpus = defaultdict(list) # (cpu, idle_state) --> relate_cpus_list
for state_id, idle_state_domain in enumerate(idle_state_domains):
for cpu_group in idle_state_domain:
for cpu in cpu_group:
related = set(cpu_group) - set([cpu])
self.idle_related_cpus[(cpu, state_id)] = related
def process(self, event_stream):
for event in event_stream:
next_state = self.update_power_state(event)
if self._saw_start_marker or not self.wait_for_start_marker:
yield next_state
def update_power_state(self, event):
"""
Update the tracked power state based on the specified event and
return updated power state.
"""
if event.kind == 'transition':
self._process_transition(event)
elif event.kind == 'dropped_events':
self._process_dropped_events(event)
elif event.kind == 'marker':
if event.name == 'START':
self._saw_start_marker = True
elif event.name == 'STOP':
self._saw_start_marker = False
else:
raise ValueError('Unexpected event type: {}'.format(event.kind))
return self.power_state.copy()
def _process_transition(self, event):
self.current_time = event.timestamp
if event.idle_state is None:
self.cpu_states[event.cpu_id].frequency = event.frequency
else:
if event.idle_state == -1:
self._process_idle_exit(event)
else:
self._process_idle_entry(event)
def _process_dropped_events(self, event):
self.cpu_states[event.cpu_id].frequency = None
old_idle_state = self.cpu_states[event.cpu_id].idle_state
self.cpu_states[event.cpu_id].idle_state = None
related_ids = self.idle_related_cpus[(event.cpu_id, old_idle_state)]
for rid in related_ids:
self.cpu_states[rid].idle_state = None
def _process_idle_entry(self, event):
if self.cpu_states[event.cpu_id].is_idling:
raise ValueError('Got idle state entry event for an idling core: {}'.format(event))
self._try_transition_to_idle_state(event.cpu_id, event.idle_state)
def _process_idle_exit(self, event):
if self.cpu_states[event.cpu_id].is_active:
raise ValueError('Got idle state exit event for an active core: {}'.format(event))
self.requested_states.pop(event.cpu_id, None) # remove outstanding request if there is one
old_state = self.cpu_states[event.cpu_id].idle_state
self.cpu_states[event.cpu_id].idle_state = -1
if self.cpu_states[event.cpu_id].frequency is None:
self.cpu_states[event.cpu_id].frequency = UNKNOWN_FREQUENCY
related_ids = self.idle_related_cpus[(event.cpu_id, old_state)]
if old_state is not None:
new_state = old_state - 1
for rid in related_ids:
if self.cpu_states[rid].idle_state > new_state:
self._try_transition_to_idle_state(rid, new_state)
def _try_transition_to_idle_state(self, cpu_id, idle_state):
related_ids = self.idle_related_cpus[(cpu_id, idle_state)]
idle_state = idle_state
# Tristate: True - can transition, False - can't transition,
# None - unknown idle state on at least one related cpu
transition_check = self._can_enter_state(related_ids, idle_state)
if not transition_check:
# If we can't enter an idle state right now, record that we've
# requested it, so that we may enter it later (once all related
# cpus also want a state at least as deep).
self.requested_states[cpu_id] = idle_state
if transition_check is None:
# Unknown state on a related cpu means we're not sure whether we're
# entering requested state or a shallower one
self.cpu_states[cpu_id].idle_state = None
return
# Keep trying shallower states until all related
while not self._can_enter_state(related_ids, idle_state):
idle_state -= 1
related_ids = self.idle_related_cpus[(cpu_id, idle_state)]
self.cpu_states[cpu_id].idle_state = idle_state
for rid in related_ids:
self.cpu_states[rid].idle_state = idle_state
if self.requested_states[rid] == idle_state:
del self.requested_states[rid] # request satisfied, so remove
def _can_enter_state(self, related_ids, state):
"""
This is a tri-state check. Returns ``True`` if related cpu states allow transition
into this state, ``False`` if related cpu states don't allow transition into this
state, and ``None`` if at least one of the related cpus is in an unknown state
(so the decision of whether a transition is possible cannot be made).
"""
for rid in related_ids:
rid_requested_state = self.requested_states[rid]
rid_current_state = self.cpu_states[rid].idle_state
if rid_current_state is None:
return None
if rid_current_state < state and rid_requested_state < state:
return False
return True
def stream_cpu_power_transitions(events):
for event in events:
if event.name == 'cpu_idle':
state = c_int32(event.state).value
yield CorePowerTransitionEvent(event.timestamp, event.cpu_id, idle_state=state)
elif event.name == 'cpu_frequency':
yield CorePowerTransitionEvent(event.timestamp, event.cpu_id, frequency=event.state)
elif event.name == 'DROPPED EVENTS DETECTED':
yield CorePowerDroppedEvents(event.cpu_id)
elif event.name == 'print':
if TRACE_MARKER_START in event.text:
yield TraceMarkerEvent('START')
elif TRACE_MARKER_STOP in event.text:
yield TraceMarkerEvent('STOP')
else:
match = INIT_CPU_FREQ_REGEX.search(event.text)
if match:
yield CorePowerTransitionEvent(event.timestamp,
int(match.group('cpu')),
frequency=int(match.group('freq')))
def gather_core_states(system_state_stream, freq_dependent_idle_states=None): # NOQA
if freq_dependent_idle_states is None:
freq_dependent_idle_states = [0]
for system_state in system_state_stream:
core_states = []
for cpu in system_state.cpus:
if cpu.idle_state == -1:
core_states.append((-1, cpu.frequency))
elif cpu.idle_state in freq_dependent_idle_states:
if cpu.frequency is not None:
core_states.append((cpu.idle_state, cpu.frequency))
else:
core_states.append((None, None))
else:
core_states.append((cpu.idle_state, None))
yield (system_state.timestamp, core_states)
class PowerStateTimeline(object):
def __init__(self, filepath, core_names, idle_state_names):
self.filepath = filepath
self.idle_state_names = idle_state_names
self._wfh = open(filepath, 'w')
self.writer = csv.writer(self._wfh)
if core_names:
headers = ['ts'] + ['{} CPU{}'.format(c, i)
for i, c in enumerate(core_names)]
self.writer.writerow(headers)
def update(self, timestamp, core_states): # NOQA
row = [timestamp]
for idle_state, frequency in core_states:
if frequency is None:
if idle_state is None or idle_state == -1:
row.append(None)
else:
row.append(self.idle_state_names[idle_state])
else: # frequency is not None
if idle_state == -1:
if frequency == UNKNOWN_FREQUENCY:
frequency = 'Running (Unknown Hz)'
row.append(frequency)
elif idle_state is None:
row.append(None)
else:
if frequency == UNKNOWN_FREQUENCY:
frequency = 'Unknown Hz'
row.append('{} ({})'.format(self.idle_state_names[idle_state],
frequency))
self.writer.writerow(row)
def report(self):
self._wfh.close()
class ParallelStats(object):
def __init__(self, core_clusters, use_ratios=False):
self.clusters = defaultdict(set)
self.use_ratios = use_ratios
for i, clust in enumerate(core_clusters):
self.clusters[clust].add(i)
self.clusters['all'] = set(range(len(core_clusters)))
self.first_timestamp = None
self.last_timestamp = None
self.previous_states = None
self.parallel_times = defaultdict(lambda: defaultdict(int))
self.running_times = defaultdict(int)
def update(self, timestamp, core_states):
if self.last_timestamp is not None:
delta = timestamp - self.last_timestamp
active_cores = [i for i, c in enumerate(self.previous_states)
if c and c[0] == -1]
for cluster, cluster_cores in self.clusters.iteritems():
clust_active_cores = len(cluster_cores.intersection(active_cores))
self.parallel_times[cluster][clust_active_cores] += delta
if clust_active_cores:
self.running_times[cluster] += delta
else: # initial update
self.first_timestamp = timestamp
self.last_timestamp = timestamp
self.previous_states = core_states
def report(self): # NOQA
if self.last_timestamp is None:
return None
report = ParallelReport()
total_time = self.last_timestamp - self.first_timestamp
for cluster in sorted(self.parallel_times):
running_time = self.running_times[cluster]
for n in xrange(len(self.clusters[cluster]) + 1):
time = self.parallel_times[cluster][n]
time_pc = time / total_time
if not self.use_ratios:
time_pc *= 100
if n:
if running_time:
running_time_pc = time / running_time
else:
running_time_pc = 0
if not self.use_ratios:
running_time_pc *= 100
else:
running_time_pc = 0
precision = self.use_ratios and 3 or 1
fmt = '{{:.{}f}}'.format(precision)
report.add([cluster, n,
fmt.format(time),
fmt.format(time_pc),
fmt.format(running_time_pc),
])
return report
class ParallelReport(object):
def __init__(self):
self.values = []
def add(self, value):
self.values.append(value)
def write(self, filepath):
with open(filepath, 'w') as wfh:
writer = csv.writer(wfh)
writer.writerow(['cluster', 'number_of_cores', 'total_time', '%time', '%running_time'])
writer.writerows(self.values)
class PowerStateStats(object):
def __init__(self, core_names, idle_state_names=None, use_ratios=False):
self.core_names = core_names
self.idle_state_names = idle_state_names
self.use_ratios = use_ratios
self.first_timestamp = None
self.last_timestamp = None
self.previous_states = None
self.cpu_states = defaultdict(lambda: defaultdict(int))
def update(self, timestamp, core_states): # NOQA
if self.last_timestamp is not None:
delta = timestamp - self.last_timestamp
for cpu, (idle, freq) in enumerate(self.previous_states):
if idle == -1 and freq is not None:
state = '{:07}KHz'.format(freq)
elif freq:
if self.idle_state_names:
state = '{}-{:07}KHz'.format(self.idle_state_names[idle], freq)
else:
state = 'idle{}-{:07}KHz'.format(idle, freq)
elif idle not in (None, -1):
if self.idle_state_names:
state = self.idle_state_names[idle]
else:
state = 'idle{}'.format(idle)
else:
state = 'unkown'
self.cpu_states[cpu][state] += delta
else: # initial update
self.first_timestamp = timestamp
self.last_timestamp = timestamp
self.previous_states = core_states
def report(self):
if self.last_timestamp is None:
return None
total_time = self.last_timestamp - self.first_timestamp
state_stats = defaultdict(lambda: [None] * len(self.core_names))
for cpu, states in self.cpu_states.iteritems():
for state in states:
time = states[state]
time_pc = time / total_time
if not self.use_ratios:
time_pc *= 100
state_stats[state][cpu] = time_pc
precision = self.use_ratios and 3 or 1
return PowerStateStatsReport(state_stats, self.core_names, precision)
class PowerStateStatsReport(object):
def __init__(self, state_stats, core_names, precision=2):
self.state_stats = state_stats
self.core_names = core_names
self.precision = precision
def write(self, filepath):
with open(filepath, 'w') as wfh:
writer = csv.writer(wfh)
headers = ['state'] + ['{} CPU{}'.format(c, i)
for i, c in enumerate(self.core_names)]
writer.writerow(headers)
for state in sorted(self.state_stats):
stats = self.state_stats[state]
fmt = '{{:.{}f}}'.format(self.precision)
writer.writerow([state] + [fmt.format(s if s is not None else 0)
for s in stats])
class CpuUtilisationTimeline(object):
def __init__(self, filepath, core_names, max_freq_list):
self.filepath = filepath
self._wfh = open(filepath, 'w')
self.writer = csv.writer(self._wfh)
if core_names:
headers = ['ts'] + ['{} CPU{}'.format(c, i)
for i, c in enumerate(core_names)]
self.writer.writerow(headers)
self._max_freq_list = max_freq_list
def update(self, timestamp, core_states): # NOQA
row = [timestamp]
for core, [idle_state, frequency] in enumerate(core_states):
if idle_state == -1:
if frequency == UNKNOWN_FREQUENCY:
frequency = 0
elif idle_state is None:
frequency = 0
else:
frequency = 0
if core < len(self._max_freq_list):
frequency /= float(self._max_freq_list[core])
row.append(frequency)
else:
logger.warning('Unable to detect max frequency for this core. Cannot log utilisation value')
self.writer.writerow(row)
def report(self):
self._wfh.close()
def build_idle_domains(core_clusters, # NOQA
num_states,
first_cluster_state=None,
first_system_state=None):
"""
Returns a list of idle domain groups (one for each idle state). Each group is a
list of domains, and a domain is a list of cpu ids for which that idle state is
common. E.g.
[[[0], [1], [2]], [[0, 1], [2]], [[0, 1, 2]]]
This defines three idle states for a machine with three cores. The first idle state
has three domains with one core in each domain; the second state has two domains,
with cores 0 and 1 sharing one domain; the final state has only one domain shared
by all cores.
This mapping created based on the assumptions
- The device is an SMP or a big.LITTLE-like system with cores in one or
more clusters (for SMP systems, all cores are considered to be in a "cluster").
- Idle domain correspend to either individual cores, individual custers, or
the compute subsystem as a whole.
- Cluster states are always deeper (higher index) than core states, and
system states are always deeper than cluster states.
parameters:
:core_clusters: a list indicating cluster "ID" of the corresponing core, e.g.
``[0, 0, 1]`` represents a three-core machines with cores 0
and 1 on cluster 0, and core 2 on cluster 1.
:num_states: total number of idle states on a device.
:first_cluster_state: the ID of the first idle state shared by all cores in a
cluster
:first_system_state: the ID of the first idle state shared by all cores.
"""
if first_cluster_state is None:
first_cluster_state = sys.maxint
if first_system_state is None:
first_system_state = sys.maxint
all_cpus = range(len(core_clusters))
cluster_cpus = defaultdict(list)
for cpu, cluster in enumerate(core_clusters):
cluster_cpus[cluster].append(cpu)
cluster_domains = [cluster_cpus[c] for c in sorted(cluster_cpus)]
core_domains = [[c] for c in all_cpus]
idle_state_domains = []
for state_id in xrange(num_states):
if state_id >= first_system_state:
idle_state_domains.append([all_cpus])
elif state_id >= first_cluster_state:
idle_state_domains.append(cluster_domains)
else:
idle_state_domains.append(core_domains)
return idle_state_domains
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
num_idle_states, first_cluster_state=sys.maxint,
first_system_state=sys.maxint, use_ratios=False,
timeline_csv_file=None, filter_trace=False,
cpu_utilisation=None, max_freq_list=None):
# pylint: disable=too-many-locals
trace = TraceCmdTrace(filter_markers=filter_trace)
ps_processor = PowerStateProcessor(core_clusters,
num_idle_states=num_idle_states,
first_cluster_state=first_cluster_state,
first_system_state=first_system_state,
wait_for_start_marker=not filter_trace)
reporters = [
ParallelStats(core_clusters, use_ratios),
PowerStateStats(core_names, idle_state_names, use_ratios)
]
if timeline_csv_file:
reporters.append(PowerStateTimeline(timeline_csv_file,
core_names, idle_state_names))
if cpu_utilisation:
if max_freq_list:
reporters.append(CpuUtilisationTimeline(cpu_utilisation, core_names, max_freq_list))
else:
logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline')
event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency', 'print'])
transition_stream = stream_cpu_power_transitions(event_stream)
power_state_stream = ps_processor.process(transition_stream)
core_state_stream = gather_core_states(power_state_stream)
for timestamp, states in core_state_stream:
for reporter in reporters:
reporter.update(timestamp, states)
reports = []
for reporter in reporters:
report = reporter.report()
if report:
reports.append(report)
return reports
def main():
# pylint: disable=unbalanced-tuple-unpacking
args = parse_arguments()
parallel_report, powerstate_report = report_power_stats(
trace_file=args.infile,
idle_state_names=args.idle_state_names,
core_names=args.core_names,
core_clusters=args.core_clusters,
num_idle_states=args.num_idle_states,
first_cluster_state=args.first_cluster_state,
first_system_state=args.first_system_state,
use_ratios=args.ratios,
timeline_csv_file=args.timeline_file,
filter_trace=(not args.no_trace_filter),
cpu_utilisation=args.cpu_utilisation,
max_freq_list=args.max_freq_list,
)
parallel_report.write(os.path.join(args.output_directory, 'parallel.csv'))
powerstate_report.write(os.path.join(args.output_directory, 'cpustate.csv'))
class SplitListAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError('nargs not allowed')
super(SplitListAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, [v.strip() for v in values.split(',')])
def parse_arguments(): # NOQA
parser = argparse.ArgumentParser(description="""
Produce CPU power activity statistics reports from
power trace.
""")
parser.add_argument('infile', metavar='TRACEFILE', help='''
Path to the trace file to parse. This must be in the format generated
by "trace-cmd report" command.
''')
parser.add_argument('-d', '--output-directory', default='.',
help='''
Output directory where reports will be placed.
''')
parser.add_argument('-F', '--no-trace-filter', action='store_true', default=False,
help='''
Normally, only the trace between begin and end marker is used. This disables
the filtering so the entire trace file is considered.
''')
parser.add_argument('-c', '--core-names', action=SplitListAction,
help='''
Comma-separated list of core names for the device on which the trace
was collected.
''')
parser.add_argument('-C', '--core-clusters', action=SplitListAction, default=[],
help='''
Comma-separated list of core cluster IDs for the device on which the
trace was collected. If not specified, this will be generated from
core names on the assumption that all cores with the same name are on the
same cluster.
''')
parser.add_argument('-i', '--idle-state-names', action=SplitListAction,
help='''
Comma-separated list of idle state names. The number of names must match
--num-idle-states if that was explicitly specified.
''')
parser.add_argument('-n', '--num-idle-states', type=int,
help='''
number of idle states on the device
''')
parser.add_argument('-q', '--first-cluster-state', type=int,
help='''
ID of the first cluster state. Must be < --num-idle-states.
''')
parser.add_argument('-s', '--first-system-state', type=int,
help='''
ID of the first system state. Must be < --numb-idle-states, and
> --first-cluster-state.
''')
parser.add_argument('-R', '--ratios', action='store_true',
help='''
By default proportional values will be reported as percentages, if this
flag is enabled, they will be reported as ratios instead.
''')
parser.add_argument('-t', '--timeline-file', metavar='FILE',
help='''
A timeline of core power states will be written to the specified file in
CSV format.
''')
parser.add_argument('-u', '--cpu-utilisation', metavar='FILE',
help='''
A timeline of cpu(s) utilisation will be written to the specified file in
CSV format.
''')
parser.add_argument('-m', '--max-freq-list', action=SplitListAction, default=[],
help='''
Comma-separated list of core maximum frequencies for the device on which
the trace was collected.
Only required if --cpu-utilisation is set.
This is used to normalise the frequencies to obtain percentage utilisation.
''')
args = parser.parse_args()
if not args.core_names:
raise ValueError('core names must be specified using -c or --core-names')
if not args.core_clusters:
logger.debug('core clusters not specified, inferring from core names')
core_cluster_map = {}
core_clusters = []
current_cluster = 0
for cn in args.core_names:
if cn not in core_cluster_map:
core_cluster_map[cn] = current_cluster
current_cluster += 1
core_clusters.append(core_cluster_map[cn])
args.core_clusters = core_clusters
if not args.num_idle_states and args.idle_state_names:
args.num_idle_states = len(args.idle_state_names)
elif args.num_idle_states and not args.idle_state_names:
args.idle_state_names = ['idle{}'.format(i) for i in xrange(args.num_idle_states)]
elif args.num_idle_states and args.idle_state_names:
if len(args.idle_state_names) != args.num_idle_states:
raise ValueError('Number of idle state names does not match --num-idle-states')
else:
raise ValueError('Either --num-idle-states or --idle-state-names must be specified')
if not args.first_cluster_state and len(set(args.core_clusters)) > 1:
if args.first_system_state:
logger.debug('First cluster idle state not specified; state previous to first system state')
args.first_cluster_state = args.first_system_state - 1
else:
logger.debug('First cluster idle state not specified; assuming last available state')
args.first_cluster_state = args.num_idle_states - 1
return args
if __name__ == '__main__':
main()
|
|
"""Utilities for working with fastq files.
"""
from itertools import izip, product
import os
import random
import gzip
from Bio import SeqIO
from bcbio.distributed import objectstore
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio import utils
from bcbio.utils import open_possible_gzip
from bcbio.pipeline import config_utils
from bcbio.provenance import do
@utils.memoize_outfile(stem=".groom")
def groom(in_file, data, in_qual="illumina", out_dir=None, out_file=None):
"""
Grooms a FASTQ file from Illumina 1.3/1.5 quality scores into
sanger format, if it is not already in that format.
"""
seqtk = config_utils.get_program("seqtk", data["config"])
if in_qual == "fastq-sanger":
logger.info("%s is already in Sanger format." % in_file)
return out_file
with file_transaction(out_file) as tmp_out_file:
cmd = "{seqtk} seq -Q64 {in_file} | gzip > {tmp_out_file}".format(**locals())
do.run(cmd, "Converting %s to Sanger format." % in_file)
return out_file
@utils.memoize_outfile(stem=".fixed")
def filter_single_reads_by_length(in_file, quality_format, min_length=20,
out_file=None):
"""
removes reads from a fastq file which are shorter than a minimum
length
"""
logger.info("Removing reads in %s thare are less than %d bases."
% (in_file, min_length))
in_iterator = SeqIO.parse(in_file, quality_format)
out_iterator = (record for record in in_iterator if
len(record.seq) > min_length)
with file_transaction(out_file) as tmp_out_file:
with open(tmp_out_file, "w") as out_handle:
SeqIO.write(out_iterator, out_handle, quality_format)
return out_file
def filter_reads_by_length(fq1, fq2, quality_format, min_length=20):
"""
removes reads from a pair of fastq files that are shorter than
a minimum length. removes both ends of a read if one end falls
below the threshold while maintaining the order of the reads
"""
logger.info("Removing reads in %s and %s that "
"are less than %d bases." % (fq1, fq2, min_length))
fq1_out = utils.append_stem(fq1, ".fixed")
fq2_out = utils.append_stem(fq2, ".fixed")
fq1_single = utils.append_stem(fq1, ".singles")
fq2_single = utils.append_stem(fq2, ".singles")
if all(map(utils.file_exists, [fq1_out, fq2_out, fq2_single, fq2_single])):
return [fq1_out, fq2_out]
fq1_in = SeqIO.parse(fq1, quality_format)
fq2_in = SeqIO.parse(fq2, quality_format)
out_files = [fq1_out, fq2_out, fq1_single, fq2_single]
with file_transaction(out_files) as tmp_out_files:
fq1_out_handle = open(tmp_out_files[0], "w")
fq2_out_handle = open(tmp_out_files[1], "w")
fq1_single_handle = open(tmp_out_files[2], "w")
fq2_single_handle = open(tmp_out_files[3], "w")
for fq1_record, fq2_record in izip(fq1_in, fq2_in):
if len(fq1_record.seq) >= min_length and len(fq2_record.seq) >= min_length:
fq1_out_handle.write(fq1_record.format(quality_format))
fq2_out_handle.write(fq2_record.format(quality_format))
else:
if len(fq1_record.seq) > min_length:
fq1_single_handle.write(fq1_record.format(quality_format))
if len(fq2_record.seq) > min_length:
fq2_single_handle.write(fq2_record.format(quality_format))
fq1_out_handle.close()
fq2_out_handle.close()
fq1_single_handle.close()
fq2_single_handle.close()
return [fq1_out, fq2_out]
def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname
def combine_pairs(input_files):
""" calls files pairs if they are completely the same except
for one has _1 and the other has _2 returns a list of tuples
of pairs or singles.
From bipy.utils (https://github.com/roryk/bipy/blob/master/bipy/utils.py)
Adjusted to allow different input paths or extensions for matching files.
"""
PAIR_FILE_IDENTIFIERS = set(["1", "2"])
pairs = []
used = set([])
for in_file in input_files:
if in_file in used:
continue
for comp_file in input_files:
if comp_file in used or comp_file == in_file:
continue
a = rstrip_extra(utils.splitext_plus(os.path.basename(in_file))[0])
b = rstrip_extra(utils.splitext_plus(os.path.basename(comp_file))[0])
if len(a) != len(b):
continue
s = dif(a,b)
if len(s) > 1:
continue #there is only 1 difference
if (a[s[0]] in PAIR_FILE_IDENTIFIERS and
b[s[0]] in PAIR_FILE_IDENTIFIERS):
# if the 1/2 isn't the last digit before a separator, skip
# this skips stuff like 2P 2A, often denoting replicates, not
# read pairings
if len(b) > (s[0] + 1):
if (b[s[0]+1] not in ("_", "-", ".")):
continue
# if the 1/2 is not a separator or prefaced with R, skip
if b[s[0]- 1] in ("R", "_", "-", "."):
used.add(in_file)
used.add(comp_file)
if b[s[0]] == "2":
pairs.append([in_file, comp_file])
else:
pairs.append([comp_file, in_file])
break
if in_file not in used:
pairs.append([in_file])
used.add(in_file)
return pairs
def dif(a, b):
""" copy from http://stackoverflow.com/a/8545526 """
return [i for i in range(len(a)) if a[i] != b[i]]
def is_fastq(in_file, bzip=True):
fastq_ends = [".txt", ".fq", ".fastq"]
zip_ends = [".gzip", ".gz"]
if bzip:
zip_ends += [".bz2", ".bzip2"]
base, first_ext = os.path.splitext(in_file)
second_ext = os.path.splitext(base)[1]
if first_ext in fastq_ends:
return True
elif (second_ext, first_ext) in product(fastq_ends, zip_ends):
return True
else:
return False
def downsample(f1, f2, data, N, quick=False):
""" get N random headers from a fastq file without reading the
whole thing into memory
modified from: http://www.biostars.org/p/6544/
quick=True will just grab the first N reads rather than do a true
downsampling
"""
if quick:
rand_records = range(N)
else:
records = sum(1 for _ in open(f1)) / 4
N = records if N > records else N
rand_records = sorted(random.sample(xrange(records), N))
fh1 = open_possible_gzip(f1)
fh2 = open_possible_gzip(f2) if f2 else None
outf1 = os.path.splitext(f1)[0] + ".subset" + os.path.splitext(f1)[1]
outf2 = os.path.splitext(f2)[0] + ".subset" + os.path.splitext(f2)[1] if f2 else None
if utils.file_exists(outf1):
if not outf2:
return outf1, outf2
elif utils.file_exists(outf2):
return outf1, outf2
out_files = (outf1, outf2) if outf2 else (outf1)
with file_transaction(out_files) as tx_out_files:
if isinstance(tx_out_files, basestring):
tx_out_f1 = tx_out_files
else:
tx_out_f1, tx_out_f2 = tx_out_files
sub1 = open_possible_gzip(tx_out_f1, "w")
sub2 = open_possible_gzip(tx_out_f2, "w") if outf2 else None
rec_no = - 1
for rr in rand_records:
while rec_no < rr:
rec_no += 1
for i in range(4): fh1.readline()
if fh2:
for i in range(4): fh2.readline()
for i in range(4):
sub1.write(fh1.readline())
if sub2:
sub2.write(fh2.readline())
rec_no += 1
fh1.close()
sub1.close()
if f2:
fh2.close()
sub2.close()
return outf1, outf2
def estimate_read_length(fastq_file, quality_format="fastq-sanger", nreads=1000):
"""
estimate average read length of a fastq file
"""
in_handle = SeqIO.parse(open_fastq(fastq_file), quality_format)
read = in_handle.next()
average = len(read.seq)
for _ in range(nreads):
try:
average = (average + len(in_handle.next().seq)) / 2
except StopIteration:
break
in_handle.close()
return average
def estimate_maximum_read_length(fastq_file, quality_format="fastq-sanger",
nreads=1000):
"""
estimate average read length of a fastq file
"""
in_handle = SeqIO.parse(open_fastq(fastq_file), quality_format)
lengths = []
for _ in range(nreads):
try:
lengths.append(len(in_handle.next().seq))
except StopIteration:
break
in_handle.close()
return max(lengths)
def open_fastq(in_file):
""" open a fastq file, using gzip if it is gzipped
"""
if objectstore.is_remote(in_file):
return objectstore.open(in_file)
_, ext = os.path.splitext(in_file)
if ext == ".gz":
return gzip.open(in_file, 'rb')
if ext in [".fastq", ".fq"]:
return open(in_file, 'r')
# default to just opening it
return open(in_file, "r")
|
|
# from globibot.lib.helpers import formatting as f
from tornado.httpclient import AsyncHTTPClient
from tornado.platform.asyncio import to_asyncio_future
from asyncio import sleep, wait
from time import time as now
import os.path
import random
import json
class Utils:
async def fetch(url):
client = AsyncHTTPClient()
tornado_future = client.fetch(url)
future = to_asyncio_future(tornado_future)
response = await future
return response.body
class Fetch:
DATA_FILE = lambda name: os.path.join('./plugins/trivia/data', name)
def read_json(file_name):
def reader():
with open(Fetch.DATA_FILE(file_name), 'r') as f:
return json.load(f)
return reader
class Pick:
def random_collection(dataset):
random.shuffle(dataset)
index = 0
def pick():
nonlocal index
item = dataset[index]
index += 1
if index >= len(dataset):
index = 0
random.shuffle(dataset)
return item
return pick
def random_dict_item(dataset):
return Pick.random_collection(list(dataset.items()))
class Query:
def timed(delay):
# async def query(read_message):
# messages = []
# answered = set()
# start = now()
# while True:
# timeout = delay - (now() - start)
# if timeout <= 0:
# return messages
# message_set, _ = await wait((read_message(), ), timeout=timeout)
# try:
# message_task = next(iter(message_set))
# message = message_task.result()
# except StopIteration:
# pass
# else:
# if message.author.id not in answered:
# messages.append(message)
# answered.add(message.author.id)
async def query():
await sleep(delay)
return query
def readable_concat(collection):
comma_string = ', '.join(collection[:-1])
return ' and '.join(e for e in (comma_string, collection[-1]) if e)
def time_diff(delta):
return delta.seconds + (delta.microseconds / 1000000)
class Resolve:
def fastest(answers, to_find, skill):
winning_answers = [
answer for answer in answers
if answer.clean_content.strip().lower() == to_find
]
if not winning_answers:
return None, 'Nobody found in time, the answer was: `{}`'.format(to_find)
else:
fastest_answer, *other_answers = winning_answers
winners_mention = readable_concat([
answer.author.mention
for answer in winning_answers
])
message = (
'{} found the correct answer! It was: `{}`'
.format(winners_mention, to_find)
)
if other_answers:
other_times = ' - '.join(
'{} `+{:.3f}`'.format(
answer.author.mention,
time_diff(answer.timestamp - fastest_answer.timestamp)
)
for answer in other_answers
)
detail = (
'{} was the fastest to answer though\n{}'
.format(fastest_answer.author.mention, other_times)
)
message += '\n{}'.format(detail)
conclusion = (
'{} killed everyone else with superior {} skills'
.format(fastest_answer.author.mention, skill)
)
return fastest_answer.author, '{}\n{}'.format(message, conclusion)
def closest_int(answers, to_find, within, skill):
def is_integral(s):
try:
int(s)
except:
return False
else:
return True
valid_answers = [
answer for answer in answers
if is_integral(answer.clean_content.strip())
]
try:
closest_diff = min(
abs(to_find - int(answer.clean_content.strip()))
for answer in valid_answers
)
closest_diff = min(within, closest_diff)
except ValueError:
return None, 'Nobody found in time, the answer was: `{}`'.format(to_find)
winning_answers = [
answer for answer in valid_answers
if abs(to_find - int(answer.clean_content.strip())) == closest_diff
]
if not winning_answers:
return None, 'Nobody found in time, the answer was: `{}`'.format(to_find)
else:
fastest_answer, *other_answers = winning_answers
winners_mention = readable_concat([
answer.author.mention
for answer in winning_answers
])
detail = (
'found the correct answer'
if closest_diff == 0
else '{} close'.format('were' if other_answers else 'was')
)
message = (
'{} {}! It was: `{}`'
.format(winners_mention, detail, to_find)
)
if other_answers:
other_times = ' - '.join(
'{} `+{:.3f}`'.format(
answer.author.mention,
time_diff(answer.timestamp - fastest_answer.timestamp)
)
for answer in other_answers
)
detail = (
'{} was the fastest to answer though\n{}'
.format(fastest_answer.author.mention, other_times)
)
message += '\n{}'.format(detail)
conclusion = (
'{} killed everyone else with superior {} skills'
.format(fastest_answer.author.mention, skill)
)
return fastest_answer.author, '{}\n{}'.format(message, conclusion)
|
|
"""Multiprocessing ready entry points for sample analysis.
"""
from bcbio import heterogeneity, structural, utils, chipseq, upload
from bcbio.bam import callable
from bcbio.cwl import create as cwl_create
from bcbio.rnaseq import (sailfish)
from bcbio.ngsalign import alignprep
from bcbio.pipeline import (archive, disambiguate, qcsummary, sample,
main, shared, variation, run_info, rnaseq)
from bcbio.variation import (bamprep, bedutils, coverage, genotype, ensemble,
joint, multi, population, recalibrate, validate,
vcfutils)
@utils.map_wrap
def run_sailfish(*args):
return sailfish.run_sailfish(*args)
@utils.map_wrap
def prepare_sample(*args):
return sample.prepare_sample(*args)
@utils.map_wrap
def prepare_bcbio_samples(*args):
return sample.prepare_bcbio_samples(*args)
@utils.map_wrap
def trim_sample(*args):
return sample.trim_sample(*args)
@utils.map_wrap
def process_alignment(*args):
return sample.process_alignment(*args)
@utils.map_wrap
def postprocess_alignment(*args):
return sample.postprocess_alignment(*args)
@utils.map_wrap
def prep_samples(*args):
return sample.prep_samples(*args)
@utils.map_wrap
def prep_align_inputs(*args):
return alignprep.create_inputs(*args)
@utils.map_wrap
def merge_sample(*args):
return sample.merge_sample(*args)
@utils.map_wrap
def delayed_bam_merge(*args):
return sample.delayed_bam_merge(*args)
@utils.map_wrap
def piped_bamprep(*args):
return bamprep.piped_bamprep(*args)
@utils.map_wrap
def prep_recal(*args):
return recalibrate.prep_recal(*args)
@utils.map_wrap
def split_variants_by_sample(*args):
return multi.split_variants_by_sample(*args)
@utils.map_wrap
def postprocess_variants(*args):
return variation.postprocess_variants(*args)
@utils.map_wrap
def pipeline_summary(*args):
return qcsummary.pipeline_summary(*args)
@utils.map_wrap
def qsignature_summary(*args):
return qcsummary.qsignature_summary(*args)
@utils.map_wrap
def generate_transcript_counts(*args):
return rnaseq.generate_transcript_counts(*args)
@utils.map_wrap
def run_cufflinks(*args):
return rnaseq.run_cufflinks(*args)
@utils.map_wrap
def run_stringtie_expression(*args):
return rnaseq.run_stringtie_expression(*args)
@utils.map_wrap
def run_express(*args):
return rnaseq.run_express(*args)
@utils.map_wrap
def run_dexseq(*args):
return rnaseq.run_dexseq(*args)
@utils.map_wrap
def run_rnaseq_variant_calling(*args):
return rnaseq.run_rnaseq_variant_calling(*args)
@utils.map_wrap
def run_rnaseq_joint_genotyping(*args):
return rnaseq.run_rnaseq_joint_genotyping(*args)
@utils.map_wrap
def combine_bam(*args):
return shared.combine_bam(*args)
@utils.map_wrap
def variantcall_sample(*args):
return genotype.variantcall_sample(*args)
@utils.map_wrap
def combine_variant_files(*args):
return vcfutils.combine_variant_files(*args)
@utils.map_wrap
def concat_variant_files(*args):
return vcfutils.concat_variant_files(*args)
@utils.map_wrap
def merge_variant_files(*args):
return vcfutils.merge_variant_files(*args)
@utils.map_wrap
def detect_sv(*args):
return structural.detect_sv(*args)
@utils.map_wrap
def heterogeneity_estimate(*args):
return heterogeneity.estimate(*args)
@utils.map_wrap
def finalize_sv(*args):
return structural.finalize_sv(*args)
@utils.map_wrap
def combine_calls(*args):
return ensemble.combine_calls(*args)
@utils.map_wrap
def prep_gemini_db(*args):
return population.prep_gemini_db(*args)
@utils.map_wrap
def combine_bed(*args):
return bedutils.combine(*args)
@utils.map_wrap
def calc_callable_loci(*args):
return callable.calc_callable_loci(*args)
@utils.map_wrap
def combine_sample_regions(*args):
return callable.combine_sample_regions(*args)
@utils.map_wrap
def compare_to_rm(*args):
return validate.compare_to_rm(*args)
@utils.map_wrap
def coverage_summary(*args):
return coverage.summary(*args)
@utils.map_wrap
def run_disambiguate(*args):
return disambiguate.run(*args)
@utils.map_wrap
def disambiguate_split(*args):
return disambiguate.split(*args)
@utils.map_wrap
def disambiguate_merge_extras(*args):
return disambiguate.merge_extras(*args)
@utils.map_wrap
def clean_chipseq_alignment(*args):
return chipseq.clean_chipseq_alignment(*args)
@utils.map_wrap
def archive_to_cram(*args):
return archive.to_cram(*args)
@utils.map_wrap
def square_batch_region(*args):
return joint.square_batch_region(*args)
@utils.map_wrap
def cufflinks_assemble(*args):
return rnaseq.cufflinks_assemble(*args)
@utils.map_wrap
def cufflinks_merge(*args):
return rnaseq.cufflinks_merge(*args)
@utils.map_wrap
def organize_samples(*args):
return run_info.organize(*args)
@utils.map_wrap
def prep_system(*args):
return run_info.prep_system(*args)
@utils.map_wrap
def upload_samples(*args):
return upload.from_sample(*args)
@utils.map_wrap
def upload_samples_project(*args):
return upload.project_from_sample(*args)
@utils.map_wrap
def create_cwl(*args):
return cwl_create.from_world(*args)
@utils.map_wrap
def run_main(*args):
work_dir, ready_config_file, systemconfig, fcdir, parallel, samples = args
return main.run_main(work_dir, run_info_yaml=ready_config_file,
config_file=systemconfig, fc_dir=fcdir,
parallel=parallel, samples=samples)
|
|
# coding: utf-8
from pyramid.view import view_config
from dogpile.cache import make_region
from analytics.control_manager import base_data_manager
from citedby.custom_query import journal_titles
cache_region = make_region(name='views_ajax_cache')
@view_config(route_name='bibliometrics_document_received_citations', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_document_received_citations(request):
data = request.data_manager
code = request.GET.get('code', '')
data = request.stats.bibliometrics.document_received_citations(code)
return data
@view_config(route_name='bibliometrics_journal_jcr_eigen_factor_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_jcr_eigen_factor_chart(request):
data = request.data_manager
data = request.stats.bibliometrics.jcr_eigen_factor(data['selected_journal_code'])
return request.chartsconfig.bibliometrics_jcr_eigen_factor(data)
@view_config(route_name='bibliometrics_journal_jcr_received_citations_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_jcr_received_citations_chart(request):
data = request.data_manager
data = request.stats.bibliometrics.jcr_received_citations(data['selected_journal_code'])
return request.chartsconfig.bibliometrics_jcr_received_citations(data)
@view_config(route_name='bibliometrics_journal_jcr_average_impact_factor_percentile_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_jcr_average_impact_factor_percentile_chart(request):
data = request.data_manager
data = request.stats.bibliometrics.jcr_average_impact_factor_percentile(data['selected_journal_code'])
return request.chartsconfig.bibliometrics_jcr_average_impact_factor_percentile(data)
@view_config(route_name='bibliometrics_journal_jcr_impact_factor_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_jcr_impact_factor_chart(request):
data = request.data_manager
data = request.stats.bibliometrics.jcr_impact_factor(data['selected_journal_code'])
return request.chartsconfig.bibliometrics_jcr_impact_factor(data)
@view_config(route_name='bibliometrics_journal_google_h5m5_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_google_h5m5_chart(request):
data = request.data_manager
data = request.stats.bibliometrics.google_h5m5(data['selected_journal_code'])
return request.chartsconfig.bibliometrics_google_h5m5(data)
@view_config(route_name='usage_report_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def usage_report_chart(request):
data = request.data_manager
api_version = request.GET.get('api_version', 'v2')
range_start = request.GET.get('range_start', None)
range_end = request.GET.get('range_end', None)
report_code = request.GET.get('report_code', 'tr_j1')
selected_code = data['selected_code']
selected_collection_code = data['selected_collection_code']
data_chart = request.stats.usage.get_usage_report(
issn = selected_code,
collection = selected_collection_code,
begin_date = range_start,
end_date = range_end,
report_code = report_code,
api_version = api_version,
)
return request.chartsconfig.usage_report(data_chart)
@view_config(route_name='bibliometrics_journal_cited_and_citing_years_heat', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_cited_and_citing_years_heat(request):
data = request.data_manager
titles = request.GET.get('titles', None)
titles = titles.split('||') if titles else []
if data['selected_journal_code']:
journal = request.stats.articlemeta.journal(code=data['selected_journal_code'])
titles.append(journal.title)
titles.append(journal.abbreviated_title)
titles.extend(x['title'] for x in journal_titles.load(data['selected_journal_code']).get('should', []) if x['title'] not in titles)
data = request.stats.bibliometrics.cited_and_citing_years_heat(
data['selected_journal_code'],
titles
)
return request.chartsconfig.bibliometrics_cited_and_citing_years_heat(data)
@view_config(route_name='bibliometrics_journal_impact_factor_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_impact_factor_chart(request):
data = request.data_manager
titles = request.GET.get('titles', None)
titles = titles.split('||') if titles else []
if data['selected_journal_code']:
journal = request.stats.articlemeta.journal(code=data['selected_journal_code'])
titles.append(journal.title)
titles.append(journal.abbreviated_title)
titles.extend(x['title'] for x in journal_titles.load(data['selected_journal_code']).get('should', []) if x['title'] not in titles)
data = request.stats.impact_factor_chart(data['selected_journal_code'], data['selected_collection_code'], titles, py_range=data['py_range'])
return request.chartsconfig.bibliometrics_impact_factor(data)
@view_config(route_name='bibliometrics_journal_received_self_and_granted_citation_chart', request_method='GET', renderer='jsonp')
@base_data_manager
def bibliometrics_journal_received_self_and_granted_citation_chart(request):
data = request.data_manager
titles = request.GET.get('titles', None)
titles = titles.split('||') if titles else []
if data['selected_journal_code']:
journal = request.stats.articlemeta.journal(code=data['selected_journal_code'])
titles.append(journal.title)
titles.append(journal.abbreviated_title)
titles.extend(x['title'] for x in journal_titles.load(data['selected_journal_code']).get('should', []) if x['title'] not in titles)
data = request.stats.received_self_and_granted_citation_chart(data['selected_journal_code'], data['selected_collection_code'], titles, py_range=data['py_range'])
return request.chartsconfig.bibliometrics_journal_received_self_and_granted_citation_chart(data)
@view_config(route_name='publication_article_references', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_references(request):
data = request.data_manager
chart_data = request.stats.publication.general('article', 'citations', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'], size=40, sort_term='asc')
return request.chartsconfig.publication_article_references(chart_data)
@view_config(route_name='publication_article_authors', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_authors(request):
data = request.data_manager
chart_data = request.stats.publication.general('article', 'authors', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'], size=0, sort_term='asc')
return request.chartsconfig.publication_article_authors(chart_data)
@view_config(route_name='publication_article_affiliations_map', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_affiliations_map(request):
data = request.data_manager
chart_data = request.stats.publication.general('article', 'aff_countries', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_affiliations_map(chart_data)
@view_config(route_name='publication_article_affiliations', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_affiliations(request):
data = request.data_manager
chart_data = request.stats.publication.general('article', 'aff_countries', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'], size=20)
return request.chartsconfig.publication_article_affiliations(chart_data)
@view_config(route_name='publication_article_affiliations_publication_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_affiliations_publication_year(request):
data = request.data_manager
chart_data = request.stats.publication.affiliations_by_publication_year(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], data['la_scope'])
return request.chartsconfig.publication_article_affiliations_by_publication_year(chart_data)
@view_config(route_name='publication_article_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_year(request):
data = request.data_manager
data_chart = request.stats.publication.general('article', 'publication_year', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'], size=0, sort_term='desc')
return request.chartsconfig.publication_article_year(data_chart)
@view_config(route_name='publication_article_languages', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_languages(request):
data = request.data_manager
data_chart = request.stats.publication.general('article', 'languages', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_languages(data_chart)
@view_config(route_name='publication_article_languages_publication_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_languages_publication_year(request):
data = request.data_manager
data_chart = request.stats.publication.languages_by_publication_year(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_languages_by_publication_year(data_chart)
@view_config(route_name='publication_journal_status', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_journal_status(request):
data = request.data_manager
result = request.stats.publication.general('journal', 'status', data['selected_code'], data['selected_collection_code'], sa_scope=data['sa_scope'])
return request.chartsconfig.publication_journal_status(result)
@view_config(route_name='publication_journal_status_detailde', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_journal_status_detailde(request):
data = request.data_manager
return request.stats.publication.journals_status_detailde(data['selected_collection_code'])
@view_config(route_name='publication_journal_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_journal_year(request):
data = request.data_manager
data_chart = request.stats.publication.general('journal', 'included_at_year', data['selected_code'], data['selected_collection_code'], sa_scope=data['sa_scope'], size=0, sort_term='asc')
return request.chartsconfig.publication_journal_year(data_chart)
@view_config(route_name='publication_article_citable_documents', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_citable_documents(request):
data = request.data_manager
data_chart = request.stats.publication.citable_documents(data['selected_code'], data['selected_collection_code'], py_range=data['py_range'])
return request.chartsconfig.publication_article_citable_documents(data_chart)
@view_config(route_name='publication_article_subject_areas', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_subject_areas(request):
data = request.data_manager
data_chart = request.stats.publication.general('article', 'subject_areas', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_subject_areas(data_chart)
@view_config(route_name='publication_article_subject_areas_publication_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_subject_areas_publication_year(request):
data = request.data_manager
data_chart = request.stats.publication.subject_areas_by_publication_year(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_subject_areas_by_publication_year(data_chart)
@view_config(route_name='publication_article_document_type', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_document_type(request):
data = request.data_manager
data_chart = request.stats.publication.general('article', 'document_type', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_document_type(data_chart)
@view_config(route_name='publication_article_document_type_publication_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_document_type_publication_year(request):
data = request.data_manager
data_chart = request.stats.publication.document_type_by_publication_year(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], la_scope=data['la_scope'])
return request.chartsconfig.publication_article_document_type_by_publication_year(data_chart)
@view_config(route_name='publication_article_licenses_publication_year', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_licenses_publication_year(request):
data = request.data_manager
data_chart = request.stats.publication.lincenses_by_publication_year(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], data['la_scope'])
return request.chartsconfig.publication_article_licenses_by_publication_year(data_chart)
@view_config(route_name='publication_article_licenses', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_article_licenses(request):
data = request.data_manager
data_chart = request.stats.publication.general('article', 'license', data['selected_code'], data['selected_collection_code'], py_range=data['py_range'], sa_scope=data['sa_scope'])
return request.chartsconfig.publication_article_licenses(data_chart)
@view_config(route_name='publication_journal_subject_areas', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_journal_subject_areas(request):
data = request.data_manager
data_chart = request.stats.publication.general('journal', 'subject_areas', data['selected_code'], data['selected_collection_code'], sa_scope=data['sa_scope'])
return request.chartsconfig.publication_journal_subject_areas(data_chart)
@view_config(route_name='publication_journal_licenses', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_journal_licenses(request):
data = request.data_manager
data_chart = request.stats.publication.general('journal', 'license', data['selected_code'], data['selected_collection_code'], sa_scope=data['sa_scope'])
return request.chartsconfig.publication_journal_licenses(data_chart)
@view_config(route_name='publication_size', request_method='GET', renderer='jsonp')
@base_data_manager
def publication_size(request):
data = request.data_manager
field = request.GET.get('field', None)
data = request.stats.publication.collection_size(data['selected_code'], data['selected_collection_code'], field, data['py_range'], data['sa_scope'], data['la_scope'])
return data
@view_config(route_name='accesses_bymonthandyear', request_method='GET', renderer='jsonp')
@base_data_manager
def bymonthandyear(request):
data = request.data_manager
range_start = request.GET.get('range_start', None)
range_end = request.GET.get('range_end', None)
data_chart = request.stats.access.access_by_month_and_year(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], data['la_scope'], range_start, range_end)
return request.chartsconfig.bymonthandyear(data_chart)
@view_config(route_name='accesses_bydocumenttype', request_method='GET', renderer='jsonp')
@base_data_manager
def documenttype(request):
data = request.data_manager
range_start = request.GET.get('range_start', None)
range_end = request.GET.get('range_end', None)
data_chart = request.stats.access.access_by_document_type(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], data['la_scope'], range_start, range_end)
return request.chartsconfig.documenttype(data_chart)
@view_config(route_name='accesses_lifetime', request_method='GET', renderer='jsonp')
@base_data_manager
def lifetime(request):
data = request.data_manager
range_start = request.GET.get('range_start', None)
range_end = request.GET.get('range_end', None)
data_chart = request.stats.access.access_lifetime(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], data['la_scope'], range_start, range_end)
return request.chartsconfig.lifetime(data_chart)
@view_config(route_name='accesses_heat', request_method='GET', renderer='jsonp')
@base_data_manager
def accesses_heat(request):
data = request.data_manager
range_start = request.GET.get('range_start', None)
range_end = request.GET.get('range_end', None)
data = request.stats.access.access_heat(data['selected_code'], data['selected_collection_code'], data['py_range'], data['sa_scope'], data['la_scope'], range_start, range_end)
return request.chartsconfig.access_heat(data)
|
|
'''
Implementation of Gaussian Mixture Models.
Author : Aleyna Kara(@karalleyna)
'''
import superimport
import jax.numpy as jnp
from jax import vmap, value_and_grad, jit
from jax.lax import scan
from jax.random import PRNGKey, uniform, split, permutation
from jax.nn import softmax
import distrax
from distrax._src.utils import jittable
import tensorflow_probability as tfp
from mixture_lib import MixtureSameFamily
import matplotlib.pyplot as plt
import itertools
from jax.experimental import optimizers
opt_init, opt_update, get_params = optimizers.adam(5e-2)
class GMM(jittable.Jittable):
def __init__(self, mixing_coeffs, means, covariances):
'''
Initializes Gaussian Mixture Model
Parameters
----------
mixing_coeffs : array
means : array
variances : array
'''
self.model = (mixing_coeffs, means, covariances)
@property
def mixing_coeffs(self):
return self._model.mixture_distribution.probs
@property
def means(self):
return self._model.components_distribution.loc
@property
def covariances(self):
return self._model.components_distribution.covariance()
@property
def model(self):
return self._model
@model.setter
def model(self, value):
mixing_coeffs, means, covariances = value
components_distribution = distrax.as_distribution(
tfp.substrates.jax.distributions.MultivariateNormalFullCovariance(loc=means,
covariance_matrix=covariances,
validate_args=True))
self._model = MixtureSameFamily(mixture_distribution=distrax.Categorical(probs=mixing_coeffs),
components_distribution=components_distribution)
def expected_log_likelihood(self, observations):
'''
Calculates expected log likelihood
Parameters
----------
observations : array(N, seq_len)
Dataset
Returns
-------
* int
Log likelihood
'''
return jnp.sum(self._model.log_prob(observations))
def responsibility(self, observations, comp_dist_idx):
'''
Computes responsibilities, or posterior probability p(z_{comp_dist_idx}|x)
Parameters
----------
observations : array(N, seq_len)
Dataset
comp_dist_idx : int
Index which specifies the specific mixing distribution component
Returns
-------
* array
Responsibilities
'''
return self._model.posterior_marginal(observations).prob(comp_dist_idx)
def responsibilities(self, observations):
'''
Computes responsibilities, or posterior probability p(z|x)
Parameters
----------
observations : array(N, seq_len)
Dataset
Returns
-------
* array
Responsibilities
'''
return self.model.posterior_marginal(observations).probs
def _m_step(self, observations, S, eta):
'''
Maximization step
Parameters
----------
observations : array(N, seq_len)
Dataset
S : array
A prior p(theta) is defined over the parameters to find MAP solutions
eta : int
Returns
-------
* array
Mixing coefficients
* array
Means
* array
Covariances
'''
n_obs, n_comp = observations.shape
def m_step_per_gaussian(responsibility):
effective_prob = responsibility.sum()
mean = (responsibility[:, None] * observations).sum(axis=0) / effective_prob
centralized_observations = (observations - mean)
covariance = responsibility[:, None, None] * jnp.einsum("ij, ik->ikj",
centralized_observations,
centralized_observations)
covariance = covariance.sum(axis=0)
if eta is None:
covariance = covariance / effective_prob
else:
covariance = (S + covariance) / (eta + effective_prob + n_comp + 2)
mixing_coeff = effective_prob / n_obs
return (mixing_coeff, mean, covariance)
mixing_coeffs, means, covariances = vmap(m_step_per_gaussian, in_axes=(1))(self.responsibilities(observations))
return mixing_coeffs, means, covariances
def _add_final_values_to_history(self, history, observations):
'''
Appends the final values of log likelihood, mixing coefficients, means, variances and responsibilities into the
history
Parameters
----------
history : tuple
Consists of values of log likelihood, mixing coefficients, means, variances and responsibilities, which are
found per iteration
observations : array(N, seq_len)
Dataset
Returns
-------
* array
Mean loss values found per iteration
* array
Mixing coefficients found per iteration
* array
Means of Gaussian distribution found per iteration
* array
Covariances of Gaussian distribution found per iteration
* array
Responsibilites found per iteration
'''
ll_hist, mix_dist_probs_hist, comp_dist_loc_hist, comp_dist_cov_hist, responsibility_hist = history
ll_hist = jnp.append(ll_hist, self.expected_log_likelihood(observations))
mix_dist_probs_hist = jnp.vstack([mix_dist_probs_hist, self.mixing_coeffs])
comp_dist_loc_hist = jnp.vstack([comp_dist_loc_hist, self.means[None, :]])
comp_dist_cov_hist = jnp.vstack([comp_dist_cov_hist, self.covariances[None, :]])
responsibility_hist = jnp.vstack([responsibility_hist, jnp.array([self.responsibility(observations, 0)])])
history = (ll_hist, mix_dist_probs_hist, comp_dist_loc_hist, comp_dist_cov_hist, responsibility_hist)
return history
def fit_em(self, observations, num_of_iters, S=None, eta=None):
'''
Fits the model using em algorithm.
Parameters
----------
observations : array(N, seq_len)
Dataset
num_of_iters : int
The number of iterations the training process takes place
S : array
A prior p(theta) is defined over the parameters to find MAP solutions
eta : int
Returns
-------
* array
Mean loss values found per iteration
* array
Mixing coefficients found per iteration
* array
Means of Gaussian distribution found per iteration
* array
Covariances of Gaussian distribution found per iteration
* array
Responsibilites found per iteration
'''
initial_mixing_coeffs = self.mixing_coeffs
initial_means = self.means
initial_covariances = self.covariances
iterations = jnp.arange(num_of_iters)
def train_step(params, i):
self.model = params
log_likelihood = self.expected_log_likelihood(observations)
responsibility = self.responsibility(observations, 0)
mixing_coeffs, means, covariances = self._m_step(observations, S, eta)
return (mixing_coeffs, means, covariances), (log_likelihood, *params, responsibility)
initial_params = (initial_mixing_coeffs,
initial_means,
initial_covariances)
final_params, history = scan(train_step, initial_params, iterations)
self.model = final_params
history = self._add_final_values_to_history(history, observations)
return history
def _make_minibatches(self, observations, batch_size, rng_key):
'''
Creates minibatches consists of the random permutations of the
given observation sequences
Parameters
----------
observations : array(N, seq_len)
Dataset
batch_size : int
The number of observation sequences that will be included in
each minibatch
rng_key : array
Random key of shape (2,) and dtype uint32
Returns
-------
* array(num_batches, batch_size, max_len)
Minibatches
'''
num_train = len(observations)
perm = permutation(rng_key, num_train)
def create_mini_batch(batch_idx):
return observations[batch_idx]
num_batches = num_train // batch_size
batch_indices = perm.reshape((num_batches, -1))
minibatches = vmap(create_mini_batch)(batch_indices)
return minibatches
def _transform_to_covariance_matrix(self, sq_mat):
'''
Takes the upper triangular matrix of the given matrix and then multiplies it by its transpose
https://ericmjl.github.io/notes/stats-ml/estimating-a-multivariate-gaussians-parameters-by-gradient-descent/
Parameters
----------
sq_mat : array
Square matrix
Returns
-------
* array
'''
U = jnp.triu(sq_mat)
U_T = jnp.transpose(U)
return jnp.dot(U_T, U)
def loss_fn(self, params, batch):
"""
Calculates expected mean negative loglikelihood.
Parameters
----------
params : tuple
Consists of mixing coefficients' logits, means and variances of the Gaussian distributions respectively.
batch : array
The subset of observations
Returns
-------
* int
Negative log likelihood
"""
mixing_coeffs, means, untransormed_cov = params
cov_matrix = vmap(self._transform_to_covariance_matrix)(untransormed_cov)
self.model = (softmax(mixing_coeffs), means, cov_matrix)
return -self.expected_log_likelihood(batch) / len(batch)
def update(self, i, opt_state, batch):
'''
Updates the optimizer state after taking derivative
i : int
The current iteration
opt_state : jax.experimental.optimizers.OptimizerState
The current state of the parameters
batch : array
The subset of observations
Returns
-------
* jax.experimental.optimizers.OptimizerState
The updated state
* int
Loss value calculated on the current batch
'''
params = get_params(opt_state)
loss, grads = value_and_grad(self.loss_fn)(params, batch)
return opt_update(i, grads, opt_state), loss
def fit_sgd(self, observations, batch_size, rng_key=None, optimizer=None, num_epochs=3):
'''
Finds the parameters of Gaussian Mixture Model using gradient descent algorithm with the given hyperparameters.
Parameters
----------
observations : array
The observation sequences which Bernoulli Mixture Model is trained on
batch_size : int
The size of the batch
rng_key : array
Random key of shape (2,) and dtype uint32
optimizer : jax.experimental.optimizers.Optimizer
Optimizer to be used
num_epochs : int
The number of epoch the training process takes place
Returns
-------
* array
Mean loss values found per epoch
* array
Mixing coefficients found per epoch
* array
Means of Gaussian distribution found per epoch
* array
Covariances of Gaussian distribution found per epoch
* array
Responsibilites found per epoch
'''
global opt_init, opt_update, get_params
if rng_key is None:
rng_key = PRNGKey(0)
if optimizer is not None:
opt_init, opt_update, get_params = optimizer
opt_state = opt_init((softmax(self.mixing_coeffs), self.means, self.covariances))
itercount = itertools.count()
def epoch_step(opt_state, key):
def train_step(opt_state, batch):
opt_state, loss = self.update(next(itercount), opt_state, batch)
return opt_state, loss
batches = self._make_minibatches(observations, batch_size, key)
opt_state, losses = scan(train_step, opt_state, batches)
params = get_params(opt_state)
mixing_coeffs, means, untransormed_cov = params
cov_matrix = vmap(self._transform_to_covariance_matrix)(untransormed_cov)
self.model = (softmax(mixing_coeffs), means, cov_matrix)
responsibilities = self.responsibilities(observations)
return opt_state, (losses.mean(), *params, responsibilities)
epochs = split(rng_key, num_epochs)
opt_state, history = scan(epoch_step, opt_state, epochs)
params = get_params(opt_state)
mixing_coeffs, means, untransormed_cov = params
cov_matrix = vmap(self._transform_to_covariance_matrix)(untransormed_cov)
self.model = (softmax(mixing_coeffs), means, cov_matrix)
return history
def plot(self, observations, means=None, covariances=None, responsibilities=None,
step=0.01, cmap="viridis", colors=None, ax=None):
'''
Plots Gaussian Mixture Model.
Parameters
----------
observations : array
Dataset
means : array
covariances : array
responsibilities : array
step: float
Step size of the grid for the density contour.
cmap : str
ax : array
'''
means = self.means if means is None else means
covariances = self.covariances if covariances is None else covariances
responsibilities = self.model.posterior_marginal(observations).probs if responsibilities is None \
else responsibilities
colors = uniform(PRNGKey(100), (means.shape[0], 3)) if colors is None else colors
ax = ax if ax is not None else plt.subplots()[1]
min_x, min_y = observations.min(axis=0)
max_x, max_y = observations.max(axis=0)
xs, ys = jnp.meshgrid(jnp.arange(min_x, max_x, step), jnp.arange(min_y, max_y, step))
grid = jnp.vstack([xs.ravel(), ys.ravel()]).T
def multivariate_normal(mean, cov):
'''
Initializes multivariate normal distribution with the given mean and covariance.
Note that the pdf has the same precision with its parameters' dtype.
'''
return tfp.substrates.jax.distributions.MultivariateNormalFullCovariance(loc=mean,
covariance_matrix=cov)
for (means, cov), color in zip(zip(means, covariances), colors):
normal_dist = multivariate_normal(means, cov)
density = normal_dist.prob(grid).reshape(xs.shape)
ax.contour(xs, ys, density, levels=1, colors=color, linewidths=5)
ax.scatter(*observations.T, alpha=0.7, c=responsibilities, cmap=cmap, s=10)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
|
|
from __future__ import unicode_literals
from django_shares.constants import Status
from django_shares.models import Share
from django_testing.testcases.users import SingleUserTestCase
from django_testing.user_utils import create_user
from test_models.models import TestSharedObjectModel
from test_models.models import TestSharedObjectModel2
class ShareTests(SingleUserTestCase):
def setUp(self):
"""Run once per test."""
super(ShareTests, self).setUp()
self.shared_user = create_user()
def tearDown(self):
super(ShareTests, self).tearDown()
self.shared_user.delete()
def test_add_for_user(self):
"""Share a user object with a another user."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
self.assertEqual(share.shared_object, self.shared_user)
def test_create_for_non_user(self):
"""Test for creating an object share with with an unknown user."""
first_name = 'Jimmy'
last_name = 'Buffet'
email = '[email protected]'
message = 'Share with me.'
status = Status.PENDING
share = Share.objects.create_for_non_user(created_user=self.user,
shared_object=self.shared_user,
first_name=first_name,
last_name=last_name,
email=email,
message=message,
status=status)
self.assertEqual(share.first_name, first_name)
self.assertEqual(share.last_name, last_name)
self.assertEqual(share.email, email)
self.assertEqual(share.status, status)
self.assertEqual(share.message, message)
def test_get_for_user(self):
"""Get shares for user."""
user = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=user,
shared_object=self.shared_user)
shares = Share.objects.get_for_user(user=user)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_for_user_id(self):
"""Get shares for a user id."""
user = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=user,
shared_object=self.shared_user)
shares = Share.objects.get_for_user_id(user_id=user.id)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_email(self):
"""Get shares by email."""
user = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=user,
shared_object=self.shared_user)
shares = Share.objects.get_by_email(email=user.email)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_by_token(self):
"""Get a share by token."""
# self.assertEqual(self.car.shares, [])
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share_db = Share.objects.get_by_token(token=share.token)
self.assertEqual(share, share_db)
def test_get_by_shared_object(self):
"""Get shares for a shared object."""
shared_object = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=shared_object)
shares = Share.objects.get_by_shared_object(obj=shared_object)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_by_shared_objects(self):
"""Get shares for a shared objects."""
obj_1 = TestSharedObjectModel.objects.create()
obj_2 = TestSharedObjectModel2.objects.create()
user_2 = create_user()
share_user_1_obj_1 = Share.objects.create_for_user(
created_user=self.user,
for_user=self.user,
shared_object=obj_1
)
share_user_1_obj_2 = Share.objects.create_for_user(
created_user=self.user,
for_user=self.user,
shared_object=obj_2
)
share_user_2_obj_1 = Share.objects.create_for_user(
created_user=user_2,
for_user=user_2,
shared_object=obj_1
)
share_user_2_obj_2 = Share.objects.create_for_user(
created_user=user_2,
for_user=user_2,
shared_object=obj_2
)
shares = list(Share.objects.get_by_shared_objects(objs=[obj_1, obj_2]))
self.assertEqual(len(shares), 4)
self.assertTrue(share_user_1_obj_1 in shares)
self.assertTrue(share_user_1_obj_2 in shares)
self.assertTrue(share_user_2_obj_1 in shares)
self.assertTrue(share_user_2_obj_2 in shares)
shares = Share.objects.get_by_shared_objects(
objs=[obj_1, obj_2],
for_user=user_2
)
self.assertEqual(len(shares), 2)
self.assertTrue(share_user_2_obj_1 in shares)
self.assertTrue(share_user_2_obj_2 in shares)
def test_accept_share(self):
"""Test for accepting share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
self.assertEqual(share.status, Status.PENDING)
first_name = 'Test first name'
share.accept(first_name=first_name)
self.assertEqual(share.status, Status.ACCEPTED)
self.assertEqual(share.first_name, first_name)
def test_decline_share(self):
"""Test for accepting share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share.decline()
self.assertEqual(share.status, Status.DECLINED)
def test_inactivate(self):
"""Test for inactivating a share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share.inactivate()
self.assertEqual(share.status, Status.INACTIVE)
def test_is_accepted(self):
"""Test the is_accepted method."""
share = Share(status=Status.ACCEPTED)
self.assertTrue(share.is_accepted())
def test_is_pending(self):
"""Test the is_pending method."""
share = Share(status=Status.PENDING)
self.assertTrue(share.is_pending())
def test_is_declined(self):
"""Test the is_declined method."""
share = Share(status=Status.DECLINED)
self.assertTrue(share.is_declined())
def test_copy(self):
"""Test for inactivating a share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share_copy = share.copy()
self.assertNotEqual(share.token, share_copy.token)
def test_get_full_name_for_user(self):
"""Test get full name for a share for existing user."""
first_name = 'John'
last_name = 'Doe'
user_2 = create_user(first_name=first_name, last_name=last_name)
share = Share.objects.create_for_user(created_user=user_2,
for_user=user_2,
shared_object=self.shared_user)
self.assertEqual(share.get_full_name(), '{0} {1}'.format(first_name,
last_name))
def test_get_full_name_for_non_user(self):
"""Test get full name for a share for non user."""
first_name = 'John'
last_name = 'Doe'
share = Share.objects.create_for_non_user(created_user=self.user,
email='[email protected]',
first_name=first_name,
last_name=last_name,
shared_object=self.shared_user)
self.assertEqual(share.get_full_name(), '{0} {1}'.format(first_name,
last_name))
def test_get_first_name(self):
"""Test get first name for a share."""
first_name = 'John'
share = Share(first_name=first_name)
self.assertEqual(share.get_first_name(), first_name)
def test_get_last_name(self):
"""Test get last name for a share."""
last_name = 'Doe'
share = Share(last_name=last_name)
self.assertEqual(share.get_last_name(), last_name)
def test_create_many(self):
"""Test for creating many objects at once. This is different from
bulk_create. See ``create_many`` doc.
"""
user = create_user()
obj_1 = TestSharedObjectModel.objects.create()
obj_2 = TestSharedObjectModel.objects.create()
obj_3 = TestSharedObjectModel.objects.create()
objs = [obj_1, obj_2, obj_3]
# There shouldn't be any shares here.
self.assertEqual(obj_1.shares.count(), 0)
self.assertEqual(obj_2.shares.count(), 0)
self.assertEqual(obj_3.shares.count(), 0)
ShareClass = TestSharedObjectModel.get_share_class()
shares = ShareClass.objects.create_many(objs=objs,
for_user=user,
created_user=user,
status=Status.ACCEPTED)
self.assertEqual(obj_1.shares.count(), 1)
self.assertEqual(obj_2.shares.count(), 1)
self.assertEqual(obj_3.shares.count(), 1)
def test_create_many_prevent_duplicate_share(self):
"""Test the ``create_many`` method that ensure no duplicate shares are
created for a single user.
"""
user = create_user()
obj_1 = TestSharedObjectModel.objects.create()
obj_1.shares.create_for_user(for_user=user,
created_user=user,
status=Status.ACCEPTED)
self.assertEqual(obj_1.shares.count(), 1)
obj_2 = TestSharedObjectModel.objects.create()
obj_3 = TestSharedObjectModel.objects.create()
objs = [obj_1, obj_2, obj_3]
ShareClass = TestSharedObjectModel.get_share_class()
shares = ShareClass.objects.create_many(objs=objs,
for_user=user,
created_user=user,
status=Status.ACCEPTED)
self.assertEqual(obj_1.shares.count(), 1)
self.assertEqual(obj_2.shares.count(), 1)
self.assertEqual(obj_3.shares.count(), 1)
|
|
import re
import sys
import uuid
import copy
import types
import datetime
import RefinerUtils
from pandajedi.jedicore import Interaction
from pandajedi.jedicore import JediException
from pandajedi.jedicore.JediTaskSpec import JediTaskSpec
from pandajedi.jedicore.JediDatasetSpec import JediDatasetSpec
from pandajedi.jedicore.JediFileSpec import JediFileSpec
from pandaserver.taskbuffer import EventServiceUtils
# base class for task refine
class TaskRefinerBase (object):
# constructor
def __init__(self,taskBufferIF,ddmIF):
self.ddmIF = ddmIF
self.taskBufferIF = taskBufferIF
self.initializeRefiner(None)
self.refresh()
# refresh
def refresh(self):
self.siteMapper = self.taskBufferIF.getSiteMapper()
# initialize
def initializeRefiner(self,tmpLog):
self.taskSpec = None
self.inMasterDatasetSpec = []
self.inSecDatasetSpecList = []
self.outDatasetSpecList = []
self.outputTemplateMap = {}
self.jobParamsTemplate = None
self.cloudName = None
self.siteName = None
self.tmpLog = tmpLog
self.updatedTaskParams = None
self.unmergeMasterDatasetSpec = {}
self.unmergeDatasetSpecMap = {}
self.oldTaskStatus = None
self.unknownDatasetList = []
# set jobParamsTemplate
def setJobParamsTemplate(self,jobParamsTemplate):
self.jobParamsTemplate = jobParamsTemplate
# extract common parameters
def extractCommon(self,jediTaskID,taskParamMap,workQueueMapper,splitRule):
# make task spec
taskSpec = JediTaskSpec()
taskSpec.jediTaskID = jediTaskID
taskSpec.taskName = taskParamMap['taskName']
taskSpec.userName = taskParamMap['userName']
taskSpec.vo = taskParamMap['vo']
taskSpec.prodSourceLabel = taskParamMap['prodSourceLabel']
taskSpec.taskPriority = taskParamMap['taskPriority']
taskSpec.currentPriority = taskSpec.taskPriority
taskSpec.architecture = taskParamMap['architecture']
taskSpec.transUses = taskParamMap['transUses']
taskSpec.transHome = taskParamMap['transHome']
taskSpec.transPath = taskParamMap['transPath']
taskSpec.processingType = taskParamMap['processingType']
taskSpec.taskType = taskParamMap['taskType']
taskSpec.splitRule = splitRule
taskSpec.startTime = datetime.datetime.utcnow()
if taskParamMap.has_key('workingGroup'):
taskSpec.workingGroup = taskParamMap['workingGroup']
if taskParamMap.has_key('countryGroup'):
taskSpec.countryGroup = taskParamMap['countryGroup']
if taskParamMap.has_key('ticketID'):
taskSpec.ticketID = taskParamMap['ticketID']
if taskParamMap.has_key('ticketSystemType'):
taskSpec.ticketSystemType = taskParamMap['ticketSystemType']
if taskParamMap.has_key('reqID'):
taskSpec.reqID = taskParamMap['reqID']
else:
taskSpec.reqID = jediTaskID
if taskParamMap.has_key('coreCount'):
taskSpec.coreCount = taskParamMap['coreCount']
else:
taskSpec.coreCount = 1
if taskParamMap.has_key('walltime'):
taskSpec.walltime = taskParamMap['walltime']
else:
taskSpec.walltime = 0
if not taskParamMap.has_key('walltimeUnit'):
# force to set NULL so that retried tasks get data from scouts again
taskSpec.forceUpdate('walltimeUnit')
if taskParamMap.has_key('outDiskCount'):
taskSpec.outDiskCount = taskParamMap['outDiskCount']
else:
taskSpec.outDiskCount = 0
if 'outDiskUnit' in taskParamMap:
taskSpec.outDiskUnit = taskParamMap['outDiskUnit']
if taskParamMap.has_key('workDiskCount'):
taskSpec.workDiskCount = taskParamMap['workDiskCount']
else:
taskSpec.workDiskCount = 0
if taskParamMap.has_key('workDiskUnit'):
taskSpec.workDiskUnit = taskParamMap['workDiskUnit']
if taskParamMap.has_key('ramCount'):
taskSpec.ramCount = taskParamMap['ramCount']
else:
taskSpec.ramCount = 0
if taskParamMap.has_key('ramUnit'):
taskSpec.ramUnit = taskParamMap['ramUnit']
if taskParamMap.has_key('baseRamCount'):
taskSpec.baseRamCount = taskParamMap['baseRamCount']
else:
taskSpec.baseRamCount = 0
# HS06 stuff
if 'cpuTimeUnit' in taskParamMap:
taskSpec.cpuTimeUnit = taskParamMap['cpuTimeUnit']
if 'cpuTime' in taskParamMap:
taskSpec.cpuTime = taskParamMap['cpuTime']
if 'cpuEfficiency' in taskParamMap:
taskSpec.cpuEfficiency = taskParamMap['cpuEfficiency']
else:
# 90% of cpu efficiency by default
taskSpec.cpuEfficiency = 90
if 'baseWalltime' in taskParamMap:
taskSpec.baseWalltime = taskParamMap['baseWalltime']
else:
# 10min of offset by default
taskSpec.baseWalltime = 10*60
# for merge
if 'mergeRamCount' in taskParamMap:
taskSpec.mergeRamCount = taskParamMap['mergeRamCount']
if 'mergeCoreCount' in taskParamMap:
taskSpec.mergeCoreCount = taskParamMap['mergeCoreCount']
# scout
if not taskParamMap.has_key('skipScout') and not taskSpec.isPostScout():
taskSpec.setUseScout(True)
# cloud
if taskParamMap.has_key('cloud'):
self.cloudName = taskParamMap['cloud']
taskSpec.cloud = self.cloudName
else:
# set dummy to force update
taskSpec.cloud = 'dummy'
taskSpec.cloud = None
# site
if taskParamMap.has_key('site'):
self.siteName = taskParamMap['site']
taskSpec.site = self.siteName
else:
# set dummy to force update
taskSpec.site = 'dummy'
taskSpec.site = None
# nucleus
if 'nucleus' in taskParamMap:
taskSpec.nucleus = taskParamMap['nucleus']
# preset some parameters for job cloning
if 'useJobCloning' in taskParamMap:
# set implicit parameters
if not 'nEventsPerWorker' in taskParamMap:
taskParamMap['nEventsPerWorker'] = 1
if not 'nSitesPerJob' in taskParamMap:
taskParamMap['nSitesPerJob'] = 2
if not 'nEsConsumers' in taskParamMap:
taskParamMap['nEsConsumers'] = taskParamMap['nSitesPerJob']
# event service flag
if 'useJobCloning' in taskParamMap:
taskSpec.eventService = 2
elif taskParamMap.has_key('nEventsPerWorker'):
taskSpec.eventService = 1
else:
taskSpec.eventService = 0
# ttcr: requested time to completion
if taskParamMap.has_key('ttcrTimestamp'):
try:
# get rid of the +00:00 timezone string and parse the timestamp
taskSpec.ttcRequested = datetime.datetime.strptime(taskParamMap['ttcrTimestamp'].split('+')[0], '%Y-%m-%d %H:%M:%S.%f')
except (IndexError, ValueError):
pass
# goal
if 'goal' in taskParamMap:
try:
taskSpec.goal = int(float(taskParamMap['goal'])*10)
if taskSpec.goal >= 1000:
taskSpec.goal = None
except:
pass
# campaign
if taskParamMap.has_key('campaign'):
taskSpec.campaign = taskParamMap['campaign']
# request type
if 'requestType' in taskParamMap:
taskSpec.requestType = taskParamMap['requestType']
self.taskSpec = taskSpec
# set split rule
if 'tgtNumEventsPerJob' in taskParamMap:
# set nEventsPerJob not respect file boundaries when nFilesPerJob is not used
if not 'nFilesPerJob' in taskParamMap:
self.setSplitRule(None,taskParamMap['tgtNumEventsPerJob'],JediTaskSpec.splitRuleToken['nEventsPerJob'])
self.setSplitRule(taskParamMap,'nFilesPerJob', JediTaskSpec.splitRuleToken['nFilesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerJob', JediTaskSpec.splitRuleToken['nEventsPerJob'])
self.setSplitRule(taskParamMap,'nGBPerJob', JediTaskSpec.splitRuleToken['nGBPerJob'])
self.setSplitRule(taskParamMap,'nMaxFilesPerJob', JediTaskSpec.splitRuleToken['nMaxFilesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerWorker', JediTaskSpec.splitRuleToken['nEventsPerWorker'])
self.setSplitRule(taskParamMap,'useLocalIO', JediTaskSpec.splitRuleToken['useLocalIO'])
self.setSplitRule(taskParamMap,'disableAutoRetry', JediTaskSpec.splitRuleToken['disableAutoRetry'])
self.setSplitRule(taskParamMap,'nEsConsumers', JediTaskSpec.splitRuleToken['nEsConsumers'])
self.setSplitRule(taskParamMap,'waitInput', JediTaskSpec.splitRuleToken['waitInput'])
self.setSplitRule(taskParamMap,'addNthFieldToLFN', JediTaskSpec.splitRuleToken['addNthFieldToLFN'])
self.setSplitRule(taskParamMap,'scoutSuccessRate', JediTaskSpec.splitRuleToken['scoutSuccessRate'])
self.setSplitRule(taskParamMap,'t1Weight', JediTaskSpec.splitRuleToken['t1Weight'])
self.setSplitRule(taskParamMap,'maxAttemptES', JediTaskSpec.splitRuleToken['maxAttemptES'])
self.setSplitRule(taskParamMap,'nSitesPerJob', JediTaskSpec.splitRuleToken['nSitesPerJob'])
self.setSplitRule(taskParamMap,'nJumboJobs', JediTaskSpec.splitRuleToken['nJumboJobs'])
self.setSplitRule(taskParamMap,'nEventsPerMergeJob', JediTaskSpec.splitRuleToken['nEventsPerMergeJob'])
self.setSplitRule(taskParamMap,'nFilesPerMergeJob', JediTaskSpec.splitRuleToken['nFilesPerMergeJob'])
self.setSplitRule(taskParamMap,'nGBPerMergeJob', JediTaskSpec.splitRuleToken['nGBPerMergeJob'])
self.setSplitRule(taskParamMap,'nMaxFilesPerMergeJob', JediTaskSpec.splitRuleToken['nMaxFilesPerMergeJob'])
if taskParamMap.has_key('loadXML'):
self.setSplitRule(None,3,JediTaskSpec.splitRuleToken['loadXML'])
self.setSplitRule(None,4,JediTaskSpec.splitRuleToken['groupBoundaryID'])
if taskParamMap.has_key('pfnList'):
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['pfnList'])
if taskParamMap.has_key('noWaitParent') and taskParamMap['noWaitParent'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['noWaitParent'])
if 'respectLB' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['respectLB'])
if taskParamMap.has_key('reuseSecOnDemand'):
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['reuseSecOnDemand'])
if 'ddmBackEnd' in taskParamMap:
self.taskSpec.setDdmBackEnd(taskParamMap['ddmBackEnd'])
if 'disableReassign' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['disableReassign'])
if 'allowPartialFinish' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['allowPartialFinish'])
if 'useExhausted' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useExhausted'])
if 'useRealNumEvents' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useRealNumEvents'])
if 'ipConnectivity' in taskParamMap:
self.taskSpec.setIpConnectivity(taskParamMap['ipConnectivity'])
if 'altStageOut' in taskParamMap:
self.taskSpec.setAltStageOut(taskParamMap['altStageOut'])
if 'allowInputLAN' in taskParamMap:
self.taskSpec.setAllowInputLAN(taskParamMap['allowInputLAN'])
if 'runUntilClosed' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['runUntilClosed'])
if 'stayOutputOnSite' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['stayOutputOnSite'])
if 'useJobCloning' in taskParamMap:
scValue = EventServiceUtils.getJobCloningValue(taskParamMap['useJobCloning'])
self.setSplitRule(None,scValue,JediTaskSpec.splitRuleToken['useJobCloning'])
if 'failWhenGoalUnreached' in taskParamMap and taskParamMap['failWhenGoalUnreached'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['failGoalUnreached'])
if 'switchEStoNormal' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['switchEStoNormal'])
if 'nEventsPerRange' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['dynamicNumEvents'])
if 'allowInputWAN' in taskParamMap and taskParamMap['allowInputWAN'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['allowInputWAN'])
if 'putLogToOS' in taskParamMap and taskParamMap['putLogToOS'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['putLogToOS'])
if 'mergeEsOnOS' in taskParamMap and taskParamMap['mergeEsOnOS'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['mergeEsOnOS'])
if 'writeInputToFile' in taskParamMap and taskParamMap['writeInputToFile'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['writeInputToFile'])
if 'useFileAsSourceLFN' in taskParamMap and taskParamMap['useFileAsSourceLFN'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useFileAsSourceLFN'])
if 'ignoreMissingInDS' in taskParamMap and taskParamMap['ignoreMissingInDS'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['ignoreMissingInDS'])
# work queue
workQueue = None
if 'workQueueName' in taskParamMap:
# work queue is specified
workQueue = workQueueMapper.getQueueWithName(taskSpec.vo,taskSpec.prodSourceLabel,taskParamMap['workQueueName'])
if workQueue is None:
# get work queue based on task attributes
workQueue,tmpStr = workQueueMapper.getQueueWithSelParams(taskSpec.vo,
taskSpec.prodSourceLabel,
processingType=taskSpec.processingType,
workingGroup=taskSpec.workingGroup,
coreCount=taskSpec.coreCount,
site=taskSpec.site,
eventService=taskSpec.eventService,
splitRule=taskSpec.splitRule,
campaign=taskSpec.campaign)
if workQueue is None:
errStr = 'workqueue is undefined for vo={0} label={1} '.format(taskSpec.vo,taskSpec.prodSourceLabel)
errStr += 'processingType={0} workingGroup={1} coreCount={2} eventService={3} '.format(taskSpec.processingType,
taskSpec.workingGroup,
taskSpec.coreCount,
taskSpec.eventService)
errStr += 'splitRule={0} campaign={1}'.format(taskSpec.splitRule,taskSpec.campaign)
raise RuntimeError,errStr
self.taskSpec.workQueue_ID = workQueue.queue_id
# Initialize the global share
gshare = None
if 'gshare' in taskParamMap and self.taskBufferIF.is_valid_share(taskParamMap['gshare']):
# work queue is specified
gshare = taskParamMap['gshare']
else:
# get share based on definition
gshare = self.taskBufferIF.get_share_for_task(self.taskSpec)
if gshare is None:
gshare = 'No match'
# errStr = 'share is undefined for vo={0} label={1} '.format(taskSpec.vo,taskSpec.prodSourceLabel)
# errStr += 'workingGroup={0} campaign={1} '.format(taskSpec.workingGroup, taskSpec.campaign)
# raise RuntimeError,errStr
self.taskSpec.gshare = gshare
# return
return
# basic refinement procedure
def doBasicRefine(self,taskParamMap):
# get input/output/log dataset specs
nIn = 0
nOutMap = {}
if isinstance(taskParamMap['log'],dict):
itemList = taskParamMap['jobParameters'] + [taskParamMap['log']]
else:
itemList = taskParamMap['jobParameters'] + taskParamMap['log']
# pseudo input
if taskParamMap.has_key('noInput') and taskParamMap['noInput'] == True:
tmpItem = {}
tmpItem['type'] = 'template'
tmpItem['value'] = ''
tmpItem['dataset'] = 'pseudo_dataset'
tmpItem['param_type'] = 'pseudo_input'
itemList = [tmpItem] + itemList
# random seed
if RefinerUtils.useRandomSeed(taskParamMap):
tmpItem = {}
tmpItem['type'] = 'template'
tmpItem['value'] = ''
tmpItem['dataset'] = 'RNDMSEED'
tmpItem['param_type'] = 'random_seed'
itemList.append(tmpItem)
# loop over all items
allDsList = []
for tmpItem in itemList:
# look for datasets
if tmpItem['type'] == 'template' and tmpItem.has_key('dataset'):
# avoid duplication
if not tmpItem['dataset'] in allDsList:
allDsList.append(tmpItem['dataset'])
else:
continue
datasetSpec = JediDatasetSpec()
datasetSpec.datasetName = tmpItem['dataset']
datasetSpec.jediTaskID = self.taskSpec.jediTaskID
datasetSpec.type = tmpItem['param_type']
if tmpItem.has_key('container'):
datasetSpec.containerName = tmpItem['container']
if tmpItem.has_key('token'):
datasetSpec.storageToken = tmpItem['token']
if tmpItem.has_key('destination'):
datasetSpec.destination = tmpItem['destination']
if tmpItem.has_key('attributes'):
datasetSpec.setDatasetAttribute(tmpItem['attributes'])
if tmpItem.has_key('ratio'):
datasetSpec.setDatasetAttribute('ratio={0}'.format(tmpItem['ratio']))
if tmpItem.has_key('eventRatio'):
datasetSpec.setEventRatio(tmpItem['eventRatio'])
if tmpItem.has_key('check'):
datasetSpec.setDatasetAttribute('cc')
if tmpItem.has_key('usedup'):
datasetSpec.setDatasetAttribute('ud')
if tmpItem.has_key('random'):
datasetSpec.setDatasetAttribute('rd')
if tmpItem.has_key('reusable'):
datasetSpec.setDatasetAttribute('ru')
if tmpItem.has_key('offset'):
datasetSpec.setOffset(tmpItem['offset'])
if tmpItem.has_key('allowNoOutput'):
datasetSpec.allowNoOutput()
if tmpItem.has_key('nFilesPerJob'):
datasetSpec.setNumFilesPerJob(tmpItem['nFilesPerJob'])
if tmpItem.has_key('num_records'):
datasetSpec.setNumRecords(tmpItem['num_records'])
if 'transient' in tmpItem:
datasetSpec.setTransient(tmpItem['transient'])
datasetSpec.vo = self.taskSpec.vo
datasetSpec.nFiles = 0
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
datasetSpec.nEvents = 0
datasetSpec.nEventsUsed = 0
datasetSpec.nEventsToBeUsed = 0
datasetSpec.status = 'defined'
if datasetSpec.type in JediDatasetSpec.getInputTypes() + ['random_seed']:
datasetSpec.streamName = RefinerUtils.extractStreamName(tmpItem['value'])
if not tmpItem.has_key('expandedList'):
tmpItem['expandedList'] = []
# dataset names could be comma-concatenated
datasetNameList = datasetSpec.datasetName.split(',')
# datasets could be added by incexec
incexecDS = 'dsFor{0}'.format(datasetSpec.streamName)
# remove /XYZ
incexecDS = incexecDS.split('/')[0]
if taskParamMap.has_key(incexecDS):
for tmpDatasetName in taskParamMap[incexecDS].split(','):
if not tmpDatasetName in datasetNameList:
datasetNameList.append(tmpDatasetName)
# loop over all dataset names
inDatasetSpecList = []
for datasetName in datasetNameList:
# skip empty
if datasetName == '':
continue
# expand
if datasetSpec.isPseudo() or datasetSpec.type in ['random_seed'] or datasetName == 'DBR_LATEST':
# pseudo input
tmpDatasetNameList = [datasetName]
elif tmpItem.has_key('expand') and tmpItem['expand'] == True:
# expand dataset container
tmpDatasetNameList = self.ddmIF.getInterface(self.taskSpec.vo).expandContainer(datasetName)
else:
# normal dataset name
tmpDatasetNameList = self.ddmIF.getInterface(self.taskSpec.vo).listDatasets(datasetName)
for elementDatasetName in tmpDatasetNameList:
if nIn > 0 or not elementDatasetName in tmpItem['expandedList']:
tmpItem['expandedList'].append(elementDatasetName)
inDatasetSpec = copy.copy(datasetSpec)
inDatasetSpec.datasetName = elementDatasetName
inDatasetSpec.containerName = datasetName
inDatasetSpecList.append(inDatasetSpec)
# empty input
if inDatasetSpecList == [] and self.oldTaskStatus != 'rerefine':
errStr = 'doBasicRefine : unknown input dataset "{0}"'.format(datasetSpec.datasetName)
self.taskSpec.setErrDiag(errStr)
if not datasetSpec.datasetName in self.unknownDatasetList:
self.unknownDatasetList.append(datasetSpec.datasetName)
raise JediException.UnknownDatasetError,errStr
# set master flag
for inDatasetSpec in inDatasetSpecList:
if nIn == 0:
# master
self.inMasterDatasetSpec.append(inDatasetSpec)
else:
# secondary
self.inSecDatasetSpecList.append(inDatasetSpec)
nIn += 1
continue
if datasetSpec.type in ['output','log']:
if not nOutMap.has_key(datasetSpec.type):
nOutMap[datasetSpec.type] = 0
# make stream name
datasetSpec.streamName = "{0}{1}".format(datasetSpec.type.upper(),nOutMap[datasetSpec.type])
nOutMap[datasetSpec.type] += 1
# set attribute for event service
if self.taskSpec.useEventService() and taskParamMap.has_key('objectStore') and datasetSpec.type in ['output']:
datasetSpec.setObjectStore(taskParamMap['objectStore'])
# extract output filename template and change the value field
outFileTemplate,tmpItem['value'] = RefinerUtils.extractReplaceOutFileTemplate(tmpItem['value'],
datasetSpec.streamName)
# make output template
if outFileTemplate != None:
if tmpItem.has_key('offset'):
offsetVal = 1 + tmpItem['offset']
else:
offsetVal = 1
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : offsetVal,
'streamName' : datasetSpec.streamName,
'filenameTemplate' : outFileTemplate,
'outtype' : datasetSpec.type,
}
if self.outputTemplateMap.has_key(datasetSpec.outputMapKey()):
# multiple files are associated to the same output datasets
self.outputTemplateMap[datasetSpec.outputMapKey()].append(outTemplateMap)
# don't insert the same output dataset
continue
self.outputTemplateMap[datasetSpec.outputMapKey()] = [outTemplateMap]
# append
self.outDatasetSpecList.append(datasetSpec)
# make unmerged dataset
if taskParamMap.has_key('mergeOutput') and taskParamMap['mergeOutput'] == True:
umDatasetSpec = JediDatasetSpec()
umDatasetSpec.datasetName = 'panda.um.' + datasetSpec.datasetName
umDatasetSpec.jediTaskID = self.taskSpec.jediTaskID
umDatasetSpec.storageToken = 'TOMERGE'
umDatasetSpec.vo = datasetSpec.vo
umDatasetSpec.type = "tmpl_trn_" + datasetSpec.type
umDatasetSpec.nFiles = 0
umDatasetSpec.nFilesUsed = 0
umDatasetSpec.nFilesToBeUsed = 0
umDatasetSpec.nFilesFinished = 0
umDatasetSpec.nFilesFailed = 0
umDatasetSpec.nFilesOnHold = 0
umDatasetSpec.status = 'defined'
umDatasetSpec.streamName = datasetSpec.streamName
if datasetSpec.isAllowedNoOutput():
umDatasetSpec.allowNoOutput()
# ratio
if datasetSpec.getRatioToMaster() > 1:
umDatasetSpec.setDatasetAttribute('ratio={0}'.format(datasetSpec.getRatioToMaster()))
# make unmerged output template
if outFileTemplate != None:
umOutTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : 1,
'streamName' : umDatasetSpec.streamName,
'outtype' : datasetSpec.type,
}
# append temporary name
if taskParamMap.has_key('umNameAtEnd') and taskParamMap['umNameAtEnd'] == True:
# append temporary name at the end
umOutTemplateMap['filenameTemplate'] = outFileTemplate + '.panda.um'
else:
umOutTemplateMap['filenameTemplate'] = 'panda.um.' + outFileTemplate
if self.outputTemplateMap.has_key(umDatasetSpec.outputMapKey()):
# multiple files are associated to the same output datasets
self.outputTemplateMap[umDatasetSpec.outputMapKey()].append(umOutTemplateMap)
# don't insert the same output dataset
continue
self.outputTemplateMap[umDatasetSpec.outputMapKey()] = [umOutTemplateMap]
# use log as master for merging
if datasetSpec.type == 'log':
self.unmergeMasterDatasetSpec[datasetSpec.outputMapKey()] = umDatasetSpec
else:
# append
self.unmergeDatasetSpecMap[datasetSpec.outputMapKey()] = umDatasetSpec
# set attributes for merging
if taskParamMap.has_key('mergeOutput') and taskParamMap['mergeOutput'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['mergeOutput'])
# make job parameters
rndmSeedOffset = None
firstEventOffset = None
jobParameters = ''
for tmpItem in taskParamMap['jobParameters']:
if tmpItem.has_key('value'):
# hidden parameter
if tmpItem.has_key('hidden') and tmpItem['hidden'] == True:
continue
# add tags for ES-only parameters
esOnly = False
if 'es_only' in tmpItem and tmpItem['es_only'] == True:
esOnly = True
if esOnly:
jobParameters += '<PANDA_ES_ONLY>'
jobParameters += '{0}'.format(tmpItem['value'])
if esOnly:
jobParameters += '</PANDA_ES_ONLY>'
# padding
if tmpItem.has_key('padding') and tmpItem['padding'] == False:
pass
else:
jobParameters += ' '
# get offset for random seed and first event
if tmpItem['type'] == 'template' and tmpItem['param_type'] == 'number':
if '${RNDMSEED}' in tmpItem['value']:
if tmpItem.has_key('offset'):
rndmSeedOffset = tmpItem['offset']
else:
rndmSeedOffset = 0
elif '${FIRSTEVENT}' in tmpItem['value']:
if tmpItem.has_key('offset'):
firstEventOffset = tmpItem['offset']
jobParameters = jobParameters[:-1]
# append parameters for event service merging if necessary
esmergeParams = self.getParamsForEventServiceMerging(taskParamMap)
if esmergeParams != None:
jobParameters += esmergeParams
self.setJobParamsTemplate(jobParameters)
# set random seed offset
if rndmSeedOffset != None:
self.setSplitRule(None,rndmSeedOffset,JediTaskSpec.splitRuleToken['randomSeed'])
if firstEventOffset != None:
self.setSplitRule(None,firstEventOffset,JediTaskSpec.splitRuleToken['firstEvent'])
# return
return
# replace placeholder with dict provided by prepro job
def replacePlaceHolders(self,paramItem,placeHolderName,newValue):
if isinstance(paramItem,types.DictType):
# loop over all dict params
for tmpParName,tmpParVal in paramItem.iteritems():
if tmpParVal == placeHolderName:
# replace placeholder
paramItem[tmpParName] = newValue
elif isinstance(tmpParVal,types.DictType) or \
isinstance(tmpParVal,types.ListType):
# recursive execution
self.replacePlaceHolders(tmpParVal,placeHolderName,newValue)
elif isinstance(paramItem,types.ListType):
# loop over all list items
for tmpItem in paramItem:
self.replacePlaceHolders(tmpItem,placeHolderName,newValue)
# refinement procedure for preprocessing
def doPreProRefine(self,taskParamMap):
# no preprocessing
if not taskParamMap.has_key('preproSpec'):
return None,taskParamMap
# already preprocessed
if self.taskSpec.checkPreProcessed():
# get replaced task params
tmpStat,tmpJsonStr = self.taskBufferIF.getPreprocessMetadata_JEDI(self.taskSpec.jediTaskID)
try:
# replace placeholders
replaceParams = RefinerUtils.decodeJSON(tmpJsonStr)
self.tmpLog.debug("replace placeholders with "+str(replaceParams))
for tmpKey,tmpVal in replaceParams.iteritems():
self.replacePlaceHolders(taskParamMap,tmpKey,tmpVal)
except:
errtype,errvalue = sys.exc_info()[:2]
self.tmpLog.error('{0} failed to get additional task params with {1}:{2}'.format(self.__class__.__name__,
errtype.__name__,errvalue))
return False,taskParamMap
# succeeded
self.updatedTaskParams = taskParamMap
return None,taskParamMap
# make dummy dataset to keep track of preprocessing
datasetSpec = JediDatasetSpec()
datasetSpec.datasetName = 'panda.pp.in.{0}.{1}'.format(uuid.uuid4(),self.taskSpec.jediTaskID)
datasetSpec.jediTaskID = self.taskSpec.jediTaskID
datasetSpec.type = 'pp_input'
datasetSpec.vo = self.taskSpec.vo
datasetSpec.nFiles = 1
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesToBeUsed = 1
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
datasetSpec.status = 'ready'
self.inMasterDatasetSpec.append(datasetSpec)
# make file
fileSpec = JediFileSpec()
fileSpec.jediTaskID = datasetSpec.jediTaskID
fileSpec.type = datasetSpec.type
fileSpec.status = 'ready'
fileSpec.lfn = 'pseudo_lfn'
fileSpec.attemptNr = 0
fileSpec.maxAttempt = 3
fileSpec.keepTrack = 1
datasetSpec.addFile(fileSpec)
# make log dataset
logDatasetSpec = JediDatasetSpec()
logDatasetSpec.datasetName = 'panda.pp.log.{0}.{1}'.format(uuid.uuid4(),self.taskSpec.jediTaskID)
logDatasetSpec.jediTaskID = self.taskSpec.jediTaskID
logDatasetSpec.type = 'tmpl_pp_log'
logDatasetSpec.streamName = 'PP_LOG'
logDatasetSpec.vo = self.taskSpec.vo
logDatasetSpec.nFiles = 0
logDatasetSpec.nFilesUsed = 0
logDatasetSpec.nFilesToBeUsed = 0
logDatasetSpec.nFilesFinished = 0
logDatasetSpec.nFilesFailed = 0
logDatasetSpec.nFilesOnHold = 0
logDatasetSpec.status = 'defined'
self.outDatasetSpecList.append(logDatasetSpec)
# make output template for log
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : 1,
'streamName' : logDatasetSpec.streamName,
'filenameTemplate' : "{0}._${{SN}}.log.tgz".format(logDatasetSpec.datasetName),
'outtype' : re.sub('^tmpl_','',logDatasetSpec.type),
}
self.outputTemplateMap[logDatasetSpec.outputMapKey()] = [outTemplateMap]
# set split rule to use preprocessing
self.taskSpec.setPrePro()
# set task status
self.taskSpec.status = 'topreprocess'
# return
return True,taskParamMap
# set split rule
def setSplitRule(self,taskParamMap,keyName,valName):
if taskParamMap != None:
if not taskParamMap.has_key(keyName):
return
tmpStr = '{0}={1}'.format(valName,taskParamMap[keyName])
else:
tmpStr = '{0}={1}'.format(valName,keyName)
if self.taskSpec.splitRule in [None,'']:
self.taskSpec.splitRule = tmpStr
else:
tmpMatch = re.search(valName+'=(-*\d+)',self.taskSpec.splitRule)
if tmpMatch == None:
# append
self.taskSpec.splitRule += ',{0}'.format(tmpStr)
else:
# replace
self.taskSpec.splitRule = re.sub(valName+'=(-*\d+)',
tmpStr,
self.taskSpec.splitRule)
return
# get parameters for event service merging
def getParamsForEventServiceMerging(self,taskParamMap):
# no event service
if not self.taskSpec.useEventService():
return None
# extract parameters
transPath = 'UnDefined'
jobParameters = 'UnDefined'
if taskParamMap.has_key('esmergeSpec'):
if taskParamMap['esmergeSpec'].has_key('transPath'):
transPath = taskParamMap['esmergeSpec']['transPath']
if taskParamMap['esmergeSpec'].has_key('jobParameters'):
jobParameters = taskParamMap['esmergeSpec']['jobParameters']
# return
return '<PANDA_ESMERGE_TRF>'+transPath+'</PANDA_ESMERGE_TRF>'+'<PANDA_ESMERGE_JOBP>'+jobParameters+'</PANDA_ESMERGE_JOBP>'
Interaction.installSC(TaskRefinerBase)
|
|
""" rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debuging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They recieved an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args):
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparision. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l.lambda_func
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for i in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples:
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples:
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
return template % (func_name, self.tree2str(argtree))
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not poluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
|
|
# Authors: Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from numpy import einsum
from numpy.fft import rfft, irfft
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.utils import requires_sklearn, run_tests_if_main
from mne.decoding import ReceptiveField, TimeDelayingRidge
from mne.decoding.receptive_field import (_delay_time_series, _SCORERS,
_times_to_delays, _delays_to_slice)
from mne.decoding.time_delaying_ridge import (_compute_reg_neighbors,
_compute_corrs)
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.1, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Loading raw data
n_jobs_test = (1, 'cuda')
def test_compute_reg_neighbors():
"""Test fast calculation of laplacian regularizer."""
for reg_type in (
('ridge', 'ridge'),
('ridge', 'laplacian'),
('laplacian', 'ridge'),
('laplacian', 'laplacian')):
for n_ch_x, n_delays in (
(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (4, 1),
(2, 2), (2, 3), (3, 2), (3, 3),
(2, 4), (4, 2), (3, 4), (4, 3), (4, 4),
(5, 4), (4, 5), (5, 5),
(20, 9), (9, 20)):
for normed in (True, False):
reg_direct = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'direct', normed=normed)
reg_csgraph = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'csgraph', normed=normed)
assert_allclose(
reg_direct, reg_csgraph, atol=1e-7,
err_msg='%s: %s' % (reg_type, (n_ch_x, n_delays)))
@requires_sklearn
def test_rank_deficiency():
"""Test signals that are rank deficient."""
# See GH#4253
from sklearn.linear_model import Ridge
N = 256
fs = 1.
tmin, tmax = -50, 100
reg = 0.1
rng = np.random.RandomState(0)
eeg = rng.randn(N, 1)
eeg *= 100
eeg = rfft(eeg, axis=0)
eeg[N // 4:] = 0 # rank-deficient lowpass
eeg = irfft(eeg, axis=0)
win = np.hanning(N // 8)
win /= win.mean()
y = np.apply_along_axis(np.convolve, 0, eeg, win, mode='same')
y += rng.randn(*y.shape) * 100
for est in (Ridge(reg), reg):
rf = ReceptiveField(tmin, tmax, fs, estimator=est, patterns=True)
rf.fit(eeg, y)
pred = rf.predict(eeg)
assert_equal(y.shape, pred.shape)
corr = np.corrcoef(y.ravel(), pred.ravel())[0, 1]
assert corr > 0.995
def test_time_delay():
"""Test that time-delaying w/ times and samples works properly."""
# Explicit delays + sfreq
X = np.random.RandomState(0).randn(1000, 2)
assert (X == 0).sum() == 0 # need this for later
test_tlims = [
((1, 2), 1),
((1, 1), 1),
((0, 2), 1),
((0, 1), 1),
((0, 0), 1),
((-1, 2), 1),
((-1, 1), 1),
((-1, 0), 1),
((-1, -1), 1),
((-2, 2), 1),
((-2, 1), 1),
((-2, 0), 1),
((-2, -1), 1),
((-2, -1), 1),
((0, .2), 10),
((-.1, .1), 10)]
for (tmin, tmax), isfreq in test_tlims:
# sfreq must be int/float
with pytest.raises(TypeError, match='`sfreq` must be an instance of'):
_delay_time_series(X, tmin, tmax, sfreq=[1])
# Delays must be int/float
with pytest.raises(TypeError, match='.*complex.*'):
_delay_time_series(X, np.complex128(tmin), tmax, 1)
# Make sure swapaxes works
start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1
n_delays = stop - start
X_delayed = _delay_time_series(X, tmin, tmax, isfreq)
assert_equal(X_delayed.shape, (1000, 2, n_delays))
# Make sure delay slice is correct
delays = _times_to_delays(tmin, tmax, isfreq)
assert_array_equal(delays, np.arange(start, stop))
keep = _delays_to_slice(delays)
expected = np.where((X_delayed != 0).all(-1).all(-1))[0]
got = np.arange(len(X_delayed))[keep]
assert_array_equal(got, expected)
assert X_delayed[keep].shape[-1] > 0
assert (X_delayed[keep] == 0).sum() == 0
del_zero = int(round(-tmin * isfreq))
for ii in range(-2, 3):
idx = del_zero + ii
err_msg = '[%s,%s] (%s): %s %s' % (tmin, tmax, isfreq, ii, idx)
if 0 <= idx < X_delayed.shape[-1]:
if ii == 0:
assert_array_equal(X_delayed[:, :, idx], X,
err_msg=err_msg)
elif ii < 0: # negative delay
assert_array_equal(X_delayed[:ii, :, idx], X[-ii:, :],
err_msg=err_msg)
assert_array_equal(X_delayed[ii:, :, idx], 0.)
else:
assert_array_equal(X_delayed[ii:, :, idx], X[:-ii, :],
err_msg=err_msg)
assert_array_equal(X_delayed[:ii, :, idx], 0.)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_basic(n_jobs):
"""Test model prep and fitting."""
from sklearn.linear_model import Ridge
# Make sure estimator pulling works
mod = Ridge()
rng = np.random.RandomState(1337)
# Test the receptive field model
# Define parameters for the model and simulate inputs + weights
tmin, tmax = -10., 0
n_feats = 3
rng = np.random.RandomState(0)
X = rng.randn(10000, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats)
# Delay inputs and cut off first 4 values since they'll be cut in the fit
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
# Fit the model and test values
feature_names = ['feature_%i' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod,
patterns=True)
rf.fit(X, y)
assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1))
y_pred = rf.predict(X)
assert_allclose(y[rf.valid_samples_], y_pred[rf.valid_samples_], atol=1e-2)
scores = rf.score(X, y)
assert scores > .99
assert_allclose(rf.coef_.T.ravel(), w, atol=1e-3)
# Make sure different input shapes work
rf.fit(X[:, np.newaxis:], y[:, np.newaxis])
rf.fit(X, y[:, np.newaxis])
with pytest.raises(ValueError, match='If X has 3 .* y must have 2 or 3'):
rf.fit(X[..., np.newaxis], y)
with pytest.raises(ValueError, match='X must be shape'):
rf.fit(X[:, 0], y)
with pytest.raises(ValueError, match='X and y do not have the same n_epo'):
rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis],
[1, 2, 1]))
with pytest.raises(ValueError, match='X and y do not have the same n_tim'):
rf.fit(X, y[:-2])
with pytest.raises(ValueError, match='n_features in X does not match'):
rf.fit(X[:, :1], y)
# auto-naming features
feature_names = ['feature_%s' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, estimator=mod,
feature_names=feature_names)
assert_equal(rf.feature_names, feature_names)
rf = ReceptiveField(tmin, tmax, 1, estimator=mod)
rf.fit(X, y)
assert_equal(rf.feature_names, None)
# Float becomes ridge
rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0)
str(rf) # repr works before fit
rf.fit(X, y)
assert isinstance(rf.estimator_, TimeDelayingRidge)
str(rf) # repr works after fit
rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0)
rf.fit(X[:, [0]], y)
str(rf) # repr with one feature
# Should only accept estimators or floats
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator='foo').fit(X, y)
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3])).fit(X, y)
with pytest.raises(ValueError, match='tmin .* must be at most tmax'):
ReceptiveField(5, 4, 1).fit(X, y)
# scorers
for key, val in _SCORERS.items():
rf = ReceptiveField(tmin, tmax, 1, ['one'],
estimator=0, scoring=key, patterns=True)
rf.fit(X[:, [0]], y)
y_pred = rf.predict(X[:, [0]]).T.ravel()[:, np.newaxis]
assert_allclose(val(y[:, np.newaxis], y_pred,
multioutput='raw_values'),
rf.score(X[:, [0]], y), rtol=1e-2)
with pytest.raises(ValueError, match='inputs must be shape'):
_SCORERS['corrcoef'](y.ravel(), y_pred, multioutput='raw_values')
# Need correct scorers
with pytest.raises(ValueError, match='scoring must be one of'):
ReceptiveField(tmin, tmax, 1., scoring='foo').fit(X, y)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
def test_time_delaying_fast_calc(n_jobs):
"""Test time delaying and fast calculations."""
X = np.array([[1, 2, 3], [5, 7, 11]]).T
# all negative
smin, smax = 1, 2
X_del = _delay_time_series(X, smin, smax, 1.)
# (n_times, n_features, n_delays) -> (n_times, n_features * n_delays)
X_del.shape = (X.shape[0], -1)
expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[5, 2, 19, 10], [2, 1, 7, 5], [19, 7, 74, 35], [10, 5, 35, 25]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# all positive
smin, smax = -2, -1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[9, 6, 33, 21], [6, 13, 22, 47],
[33, 22, 121, 77], [21, 47, 77, 170]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# both sides
smin, smax = -1, 1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[2, 3, 0], [1, 2, 3], [0, 1, 2],
[7, 11, 0], [5, 7, 11], [0, 5, 7]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[13, 8, 3, 47, 31, 15],
[8, 14, 8, 29, 52, 31],
[3, 8, 5, 11, 29, 19],
[47, 29, 11, 170, 112, 55],
[31, 52, 29, 112, 195, 112],
[15, 31, 19, 55, 112, 74]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# slightly harder to get the non-Toeplitz correction correct
X = np.array([[1, 2, 3, 5]]).T
smin, smax = 0, 3
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3],
[0, 0, 1, 2], [0, 0, 0, 1]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[39, 23, 13, 5], [23, 14, 8, 3], [13, 8, 5, 2], [5, 3, 2, 1]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# even worse
X = np.array([[1, 2, 3], [5, 7, 11]]).T
smin, smax = 0, 2
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3], [0, 1, 2], [0, 0, 1],
[5, 7, 11], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = np.array([[14, 8, 3, 52, 31, 15],
[8, 5, 2, 29, 19, 10],
[3, 2, 1, 11, 7, 5],
[52, 29, 11, 195, 112, 55],
[31, 19, 7, 112, 74, 35],
[15, 10, 5, 55, 35, 25]])
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# And a bunch of random ones for good measure
rng = np.random.RandomState(0)
X = rng.randn(25, 3)
y = np.empty((25, 2))
vals = (0, -1, 1, -2, 2, -11, 11)
for smax in vals:
for smin in vals:
if smin > smax:
continue
for ii in range(X.shape[1]):
kernel = rng.randn(smax - smin + 1)
kernel -= np.mean(kernel)
y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, 'same')
x_xt, x_yt, n_ch_x, _, _ = _compute_corrs(X, y, smin, smax + 1)
X_del = _delay_time_series(X, smin, smax, 1., fill_mean=False)
x_yt_true = einsum('tfd,to->ofd', X_del, y)
x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T
assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax))
X_del.shape = (X.shape[0], -1)
x_xt_true = np.dot(X_del.T, X_del).T
assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax))
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_1d(n_jobs):
"""Test that the fast solving works like Ridge."""
from sklearn.linear_model import Ridge
rng = np.random.RandomState(0)
x = rng.randn(500, 1)
for delay in range(-2, 3):
y = np.zeros(500)
slims = [(-2, 4)]
if delay == 0:
y[:] = x[:, 0]
elif delay < 0:
y[:delay] = x[-delay:, 0]
slims += [(-4, -1)]
else:
y[delay:] = x[:-delay, 0]
slims += [(1, 2)]
for ndim in (1, 2):
y.shape = (y.shape[0],) + (1,) * (ndim - 1)
for slim in slims:
smin, smax = slim
lap = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian',
fit_intercept=False, n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), Ridge(alpha=0.1), 0., 0.1,
lap):
for offset in (-100, 0, 100):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator,
n_jobs=n_jobs)
use_x = x + offset
model.fit(use_x, y)
if estimator is lap:
continue # these checks are too stringent
assert_allclose(model.estimator_.intercept_, -offset,
atol=1e-1)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
expected = (model.delays_ == delay).astype(float)
expected = expected[np.newaxis] # features
if y.ndim == 2:
expected = expected[np.newaxis] # outputs
assert_equal(model.coef_.ndim, ndim + 1)
assert_allclose(model.coef_, expected, atol=1e-3)
start = model.valid_samples_.start or 0
stop = len(use_x) - (model.valid_samples_.stop or 0)
assert stop - start >= 495
assert_allclose(
model.predict(use_x)[model.valid_samples_],
y[model.valid_samples_], atol=1e-2)
score = np.mean(model.score(use_x, y))
assert score > 0.9999
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_nd(n_jobs):
"""Test multidimensional support."""
from sklearn.linear_model import Ridge
# multidimensional
rng = np.random.RandomState(3)
x = rng.randn(1000, 3)
y = np.zeros((1000, 2))
smin, smax = 0, 5
# This is a weird assignment, but it's just a way to distribute some
# unique values at various delays, and "expected" explains how they
# should appear in the resulting RF
for ii in range(1, 5):
y[ii:, ii % 2] += (-1) ** ii * ii * x[:-ii, ii % 3]
y -= np.mean(y, axis=0)
x -= np.mean(x, axis=0)
x_off = x + 1e3
expected = [
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 2, 0, 0, 0]],
[[0, 0, 0, -3, 0, 0],
[0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
]
tdr_l = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', n_jobs=n_jobs)
tdr_nc = TimeDelayingRidge(smin, smax, 1., 0.1, n_jobs=n_jobs,
edge_correction=False)
for estimator, atol in zip((Ridge(alpha=0.), 0., 0.01, tdr_l, tdr_nc),
(1e-3, 1e-3, 1e-3, 5e-3, 5e-2)):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator)
model.fit(x, y)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
assert_allclose(model.coef_, expected, atol=atol)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type='foo',
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type entries must be one of'):
model.fit(x, y)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type=['laplacian'],
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type must have two elements'):
model.fit(x, y)
model = ReceptiveField(smin, smax, 1, estimator=tdr, fit_intercept=False)
with pytest.raises(ValueError, match='fit_intercept'):
model.fit(x, y)
# Now check the intercept_
tdr = TimeDelayingRidge(smin, smax, 1., 0., n_jobs=n_jobs)
tdr_no = TimeDelayingRidge(smin, smax, 1., 0., fit_intercept=False,
n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), tdr,
Ridge(alpha=0., fit_intercept=False), tdr_no):
# first with no intercept in the data
model = ReceptiveField(smin, smax, 1., estimator=estimator)
model.fit(x, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=1e-3,
err_msg=repr(estimator))
y_pred = model.predict(x)
assert_allclose(y_pred[model.valid_samples_],
y[model.valid_samples_],
atol=1e-2, err_msg=repr(estimator))
score = np.mean(model.score(x, y))
assert score > 0.9999
# now with an intercept in the data
model.fit(x_off, y)
if estimator.fit_intercept:
val = [-6000, 4000]
itol = 0.5
ctol = 5e-4
else:
val = itol = 0.
ctol = 2.
assert_allclose(model.estimator_.intercept_, val, atol=itol,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=ctol, rtol=ctol,
err_msg=repr(estimator))
if estimator.fit_intercept:
ptol = 1e-2
stol = 0.999999
else:
ptol = 10
stol = 0.6
y_pred = model.predict(x_off)[model.valid_samples_]
assert_allclose(y_pred, y[model.valid_samples_],
atol=ptol, err_msg=repr(estimator))
score = np.mean(model.score(x_off, y))
assert score > stol, estimator
model = ReceptiveField(smin, smax, 1., fit_intercept=False)
model.fit(x_off, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7)
score = np.mean(model.score(x_off, y))
assert score > 0.6
def _make_data(n_feats, n_targets, n_samples, tmin, tmax):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats, n_targets)
# Delay inputs
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
return X, y
@requires_sklearn
def test_inverse_coef():
"""Test inverse coefficients computation."""
from sklearn.linear_model import Ridge
tmin, tmax = 0., 10.
n_feats, n_targets, n_samples = 3, 2, 1000
n_delays = int((tmax - tmin) + 1)
# Check coefficient dims, for all estimator types
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
tdr = TimeDelayingRidge(tmin, tmax, 1., 0.1, 'laplacian')
for estimator in (0., 0.01, Ridge(alpha=0.), tdr):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
rf.fit(X, y)
inv_rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
inv_rf.fit(y, X)
assert_array_equal(rf.coef_.shape, rf.patterns_.shape,
(n_targets, n_feats, n_delays))
assert_array_equal(inv_rf.coef_.shape, inv_rf.patterns_.shape,
(n_feats, n_targets, n_delays))
# we should have np.dot(patterns.T,coef) ~ np.eye(n)
c0 = rf.coef_.reshape(n_targets, n_feats * n_delays)
c1 = rf.patterns_.reshape(n_targets, n_feats * n_delays)
assert_allclose(np.dot(c0, c1.T), np.eye(c0.shape[0]), atol=0.2)
@requires_sklearn
def test_linalg_warning():
"""Test that warnings are issued when no regularization is applied."""
from sklearn.linear_model import Ridge
n_feats, n_targets, n_samples = 5, 60, 50
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
for estimator in (0., Ridge(alpha=0.)):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator)
with pytest.warns((RuntimeWarning, UserWarning),
match='[Singular|scipy.linalg.solve]'):
rf.fit(y, X)
run_tests_if_main()
|
|
"""
Equivalent layer processing.
Use the classes here to estimate an equivalent layer from potential field data.
Then you can use the estimated layer to perform tranformations (gridding,
continuation, derivation, reduction to the pole, etc.) by forward modeling
the layer. Use :mod:`fatiando.gravmag.sphere` for forward modeling.
**Algorithms**
* :class:`~fatiando.gravmag.eqlayer.EQLGravity` and
:class:`~fatiando.gravmag.eqlayer.EQLTotalField`: The classic (space domain)
equivalent layer as formulated in Li and Oldenburg (2010) or
Oliveira Jr. et al (2012).
Doesn't have wavelet compression or other tweaks.
* :class:`~fatiando.gravmag.eqlayer.PELGravity` and
:class:`~fatiando.gravmag.eqlayer.PELTotalField`: The polynomial equivalent
layer of Oliveira Jr. et al (2012). A fast and memory efficient algorithm.
Both of these require special regularization
(:class:`~fatiando.gravmag.eqlayer.PELSmoothness`).
**References**
Li, Y., and D. W. Oldenburg (2010), Rapid construction of equivalent sources
using wavelets, Geophysics, 75(3), L51-L59, doi:10.1190/1.3378764.
Oliveira Jr., V. C., V. C. F. Barbosa, and L. Uieda (2012), Polynomial
equivalent layer, Geophysics, 78(1), G1-G13, doi:10.1190/geo2012-0196.1.
----
"""
from __future__ import division
from future.builtins import super, range
import numpy
import scipy.sparse
from . import sphere as kernel
from ..utils import dircos, safe_dot
from ..inversion import Misfit, Smoothness
class EQLBase(Misfit):
"""
Base class for the classic equivalent layer.
"""
def __init__(self, x, y, z, data, grid):
super().__init__(data=data, nparams=len(grid), islinear=True)
self.x = x
self.y = y
self.z = z
self.grid = grid
def predicted(self, p):
"""
Calculate the data predicted by a given parameter vector.
Parameters:
* p : 1d-array (optional)
The parameter vector with the estimated physical properties of the
layer. If not given, will use the value calculated by ``.fit()``.
Returns:
* result : 1d-array
The predicted data vector.
"""
return safe_dot(self.jacobian(p), p)
class EQLGravity(EQLBase):
"""
Estimate an equivalent layer from gravity data.
.. note:: Assumes x = North, y = East, z = Down.
Parameters:
* x, y, z : 1d-arrays
The x, y, z coordinates of each data point.
* data : 1d-array
The gravity data at each point.
* grid : :class:`~fatiando.mesher.PointGrid`
The sources in the equivalent layer. Will invert for the density of
each point in the grid.
* field : string
Which gravitational field is the data. Options are: ``'gz'`` (gravity
anomaly), ``'gxx'``, ``'gxy'``, ..., ``'gzz'`` (gravity gradient
tensor). Defaults to ``'gz'``.
"""
def __init__(self, x, y, z, data, grid, field='gz'):
super().__init__(x, y, z, data, grid)
self.field = field
def jacobian(self, p):
"""
Calculate the Jacobian matrix for a given parameter vector.
"""
x = self.x
y = self.y
z = self.z
func = getattr(kernel, self.field)
jac = numpy.empty((self.ndata, self.nparams), dtype=numpy.float)
for i, c in enumerate(self.grid):
jac[:, i] = func(x, y, z, [c], dens=1.)
return jac
class EQLTotalField(EQLBase):
"""
Estimate an equivalent layer from total field magnetic anomaly data.
.. note:: Assumes x = North, y = East, z = Down.
Parameters:
* x, y, z : 1d-arrays
The x, y, z coordinates of each data point.
* data : 1d-array
The total field anomaly data at each point.
* inc, dec : floats
The inclination and declination of the inducing field
* grid : :class:`~fatiando.mesher.PointGrid`
The sources in the equivalent layer. Will invert for the magnetization
intensity of each point in the grid.
* sinc, sdec : None or floats
The inclination and declination of the equivalent layer. Use these if
there is remanent magnetization and the total magnetization of the
layer if different from the induced magnetization.
If there is only induced magnetization, use None
"""
def __init__(self, x, y, z, data, inc, dec, grid, sinc=None, sdec=None):
super().__init__(x, y, z, data, grid)
self.inc, self.dec = inc, dec
self.sinc = sinc if sinc is not None else inc
self.sdec = sdec if sdec is not None else dec
def jacobian(self, p):
"""
Calculate the Jacobian matrix for a given parameter vector.
"""
x = self.x
y = self.y
z = self.z
inc, dec = self.inc, self.dec
mag = dircos(self.sinc, self.sdec)
jac = numpy.empty((self.ndata, self.nparams), dtype=float)
for i, c in enumerate(self.grid):
jac[:, i] = kernel.tf(x, y, z, [c], inc, dec, pmag=mag)
return jac
class PELBase(EQLBase):
"""
Base class for the Polynomial Equivalent Layer.
.. note::
Overloads *fit* to convert the estimated coefficients to physical
properties. The coefficients are stored in the ``coeffs_`` attribute.
"""
def __init__(self, x, y, z, data, grid, windows, degree):
super().__init__(x, y, z, data, grid)
self.nparams = windows[0]*windows[1]*ncoeffs(degree)
self.windows = windows
self.degree = degree
def fmt_estimate(self, coefs):
"""
Convert the estimated polynomial coefficients to physical property
values along the layer.
Parameters:
* coefs : 1d-array
The estimated parameter vector with the polynomial coefficients
Returns:
* estimate : 1d-array
The converted physical property values along the layer.
"""
ny, nx = self.windows
pergrid = ncoeffs(self.degree)
estimate = numpy.empty(self.grid.shape, dtype=float)
grids = self.grid.split(self.windows)
k = 0
ystart = 0
gny, gnx = grids[0].shape
for i in range(ny):
yend = ystart + gny
xstart = 0
for j in range(nx):
xend = xstart + gnx
g = grids[k]
bk = _bkmatrix(g, self.degree)
window_coefs = coefs[k * pergrid:(k + 1) * pergrid]
window_props = safe_dot(bk, window_coefs).reshape(g.shape)
estimate[ystart:yend, xstart:xend] = window_props
xstart = xend
k += 1
ystart = yend
self.coeffs_ = coefs
return estimate.ravel()
def _bkmatrix(grid, degree):
"""
Make the Bk polynomial coefficient matrix for a given PointGrid.
This matrix converts the coefficients into physical property values.
Parameters:
* grid : :class:`~fatiando.mesher.PointGrid`
The sources in the equivalent layer
* degree : int
The degree of the bivariate polynomial
Returns:
* bk : 2d-array
The matrix
Examples:
>>> from fatiando.mesher import PointGrid
>>> grid = PointGrid((0, 1, 0, 2), 10, (2, 2))
>>> print _bkmatrix(grid, 2)
[[ 1. 0. 0. 0. 0. 0.]
[ 1. 2. 0. 4. 0. 0.]
[ 1. 0. 1. 0. 0. 1.]
[ 1. 2. 1. 4. 2. 1.]]
>>> print _bkmatrix(grid, 1)
[[ 1. 0. 0.]
[ 1. 2. 0.]
[ 1. 0. 1.]
[ 1. 2. 1.]]
>>> print _bkmatrix(grid, 3)
[[ 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[ 1. 2. 0. 4. 0. 0. 8. 0. 0. 0.]
[ 1. 0. 1. 0. 0. 1. 0. 0. 0. 1.]
[ 1. 2. 1. 4. 2. 1. 8. 4. 2. 1.]]
"""
bmatrix = numpy.transpose(
[(grid.x**i)*(grid.y**j)
for l in range(1, degree + 2)
for i, j in zip(range(l), range(l - 1, -1, -1))])
return bmatrix
def ncoeffs(degree):
"""
Calculate the number of coefficients in a bivarite polynomail.
Parameters:
* degree : int
The degree of the polynomial
Returns:
* n : int
The number of coefficients
Examples:
>>> ncoeffs(1)
3
>>> ncoeffs(2)
6
>>> ncoeffs(3)
10
>>> ncoeffs(4)
15
"""
return sum(range(1, degree + 2))
class PELGravity(PELBase):
"""
Estimate a polynomial equivalent layer from gravity data.
.. note:: Assumes x = North, y = East, z = Down.
Parameters:
* x, y, z : 1d-arrays
The x, y, z coordinates of each data point.
* data : 1d-array
The gravity data at each point.
* grid : :class:`~fatiando.mesher.PointGrid`
The sources in the equivalent layer. Will invert for the density of
each point in the grid.
* windows : tuple = (ny, nx)
The number of windows that the layer will be divided in the y and x
directions, respectively
* degree : int
The degree of the bivariate polynomials used in each window of the PEL
* field : string
Which gravitational field is the data. Options are: ``'gz'`` (gravity
anomaly), ``'gxx'``, ``'gxy'``, ..., ``'gzz'`` (gravity gradient
tensor). Defaults to ``'gz'``.
"""
def __init__(self, x, y, z, data, grid, windows, degree, field='gz'):
super().__init__(x, y, z, data, grid, windows, degree)
self.field = field
def jacobian(self, p):
"""
Calculate the Jacobian matrix for a given parameter vector.
"""
x = self.x
y = self.y
z = self.z
func = getattr(kernel, self.field)
grids = self.grid.split(self.windows)
pergrid = ncoeffs(self.degree)
jac = numpy.empty((self.ndata, self.nparams), dtype=float)
gk = numpy.empty((self.ndata, grids[0].size), dtype=float)
for i, grid in enumerate(grids):
bk = _bkmatrix(grid, self.degree)
for k, c in enumerate(grid):
gk[:, k] = func(x, y, z, [c], dens=1.)
jac[:, i*pergrid:(i + 1)*pergrid] = safe_dot(gk, bk)
return jac
class PELTotalField(PELBase):
"""
Estimate a polynomial equivalent layer from magnetic total field anomaly.
.. note:: Assumes x = North, y = East, z = Down.
Parameters:
* x, y, z : 1d-arrays
The x, y, z coordinates of each data point.
* data : 1d-array
The total field magnetic anomaly data at each point.
* inc, dec : floats
The inclination and declination of the inducing field
* grid : :class:`~fatiando.mesher.PointGrid`
The sources in the equivalent layer. Will invert for the magnetization
intensity of each point in the grid.
* windows : tuple = (ny, nx)
The number of windows that the layer will be divided in the y and x
directions, respectively
* degree : int
The degree of the bivariate polynomials used in each window of the PEL
* sinc, sdec : None or floats
The inclination and declination of the equivalent layer. Use these if
there is remanent magnetization and the total magnetization of the
layer if different from the induced magnetization.
If there is only induced magnetization, use None
"""
def __init__(self, x, y, z, data, inc, dec, grid, windows, degree,
sinc=None, sdec=None):
super().__init__(x, y, z, data, grid, windows, degree)
self.inc, self.dec = inc, dec
self.sinc = sinc if sinc is not None else inc
self.sdec = sdec if sdec is not None else dec
def jacobian(self, p):
"""
Calculate the Jacobian matrix for a given parameter vector.
"""
x = self.x
y = self.y
z = self.z
inc, dec = self.inc, self.dec
mag = dircos(self.sinc, self.sdec)
grids = self.grid.split(self.windows)
pergrid = ncoeffs(self.degree)
jac = numpy.empty((self.ndata, self.nparams), dtype=float)
gk = numpy.empty((self.ndata, grids[0].size), dtype=float)
for i, grid in enumerate(grids):
bk = _bkmatrix(grid, self.degree)
for k, c in enumerate(grid):
gk[:, k] = kernel.tf(x, y, z, [c], inc, dec, pmag=mag)
jac[:, i*pergrid:(i + 1)*pergrid] = safe_dot(gk, bk)
return jac
class PELSmoothness(Smoothness):
"""
Regularization to "join" neighboring windows in the PEL.
Use this with :class:`~fatiando.gravmag.eqlayer.PELGravity` and
:class:`~fatiando.gravmag.eqlayer.PELTotalField`.
Parameters passed to PELSmoothness must be the same as passed to the PEL
solvers.
Parameters:
* grid : :class:`~fatiando.mesher.PointGrid`
The sources in the equivalent layer.
* windows : tuple = (ny, nx)
The number of windows that the layer will be divided in the y and x
directions, respectively.
* degree : int
The degree of the bivariate polynomials used in each window of the PEL
See the docstring of :class:`~fatiando.gravmag.eqlayer.PELGravity` for an
example usage.
"""
def __init__(self, grid, windows, degree):
super().__init__(_pel_fdmatrix(windows, grid, degree))
def _pel_fdmatrix(windows, grid, degree):
"""
Makes the finite difference matrix for PEL smoothness.
"""
ny, nx = windows
grids = grid.split(windows)
gsize = grids[0].size
gny, gnx = grids[0].shape
nderivs = (nx - 1) * grid.shape[0] + (ny - 1) * grid.shape[1]
rmatrix = scipy.sparse.lil_matrix((nderivs, grid.size))
deriv = 0
# derivatives in x
for k in range(0, len(grids) - ny):
bottom = k * gsize + gny * (gnx - 1)
top = (k + ny) * gsize
for i in range(gny):
rmatrix[deriv, bottom + i] = -1.
rmatrix[deriv, top + 1] = 1.
deriv += 1
# derivatives in y
for k in range(0, len(grids)):
if (k + 1) % ny == 0:
continue
right = k * gsize + gny - 1
left = (k + 1) * gsize
for i in range(gnx):
rmatrix[deriv, right + i * gny] = -1.
rmatrix[deriv, left + i * gny] = 1.
deriv += 1
rmatrix = rmatrix.tocsr()
# Make the RB matrix because R is for the sources, B converts it to
# coefficients.
pergrid = ncoeffs(degree)
ncoefs = len(grids) * pergrid
fdmatrix = numpy.empty((nderivs, ncoefs), dtype=float)
st = 0
for i, g in enumerate(grids):
bk = _bkmatrix(g, degree)
en = st + g.size
fdmatrix[:, i*pergrid:(i + 1)*pergrid] = safe_dot(rmatrix[:, st:en],
bk)
st = en
return fdmatrix
|
|
from activities.models import Message
from categories.models import Category, Keyword
from constance import config
from employees.models import Employee, Location, Position, Role
from events.models import Event, EventActivity
from stars.models import Badge
from django.conf import settings
from django.contrib.sites.models import Site
from django.db.models import Q
from django.shortcuts import get_list_or_404, get_object_or_404
from rest_framework.exceptions import APIException
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from .serializers import CategorySerializer, KeywordSerializer, BadgeSerializer
from .serializers import EmployeeSerializer, EmployeeTopSerializer
from .serializers import LocationSerializer, PositionSerializer, RoleSerializer
from .serializers import EventSerializer, EventActivitySerializer
from .serializers import MessageSerializer, SiteInfoSerializer
from .pagination import AdministratorPagination
class BadgeList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all badges
---
serializer: administrator.serializers.BadgeSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
badges = get_list_or_404(Badge)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(badges, request)
serializer = BadgeSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = BadgeSerializer(badges, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new badge
---
serializer: administrator.serializers.BadgeSerializer
"""
serializer = BadgeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BagdeDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, badge_id, format=None):
"""
Get badge details
"""
badge = get_object_or_404(Badge, pk=badge_id)
serializer = BadgeSerializer(badge)
return Response(serializer.data)
def put(self, request, badge_id, format=None):
"""
Edit badge
---
serializer: administrator.serializers.BadgeSerializer
"""
badge = get_object_or_404(Badge, pk=badge_id)
serializer = BadgeSerializer(badge, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, badge_id, format=None):
"""
Delete badge (inactive badge, you should edit is_active attribute to revert this change)
---
serializer: administrator.serializers.BadgeSerializer
"""
badge = get_object_or_404(Badge, pk=badge_id)
badge.is_active = False
badge.save()
serializer = BadgeSerializer(badge)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class CategoryList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all categories
---
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
categories = get_list_or_404(Category)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(categories, request)
serializer = CategorySerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = CategorySerializer(categories, many=True)
return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)
def post(self, request, format=None):
"""
Create new category
---
serializer: administrator.serializers.CategorySerializer
"""
serializer = CategorySerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CategoryDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, category_id, format=None):
"""
Get category details
"""
category = get_object_or_404(Category, pk=category_id)
serializer = CategorySerializer(category)
return Response(serializer.data)
def put(self, request, category_id, format=None):
"""
Edit category
---
serializer: administrator.serializers.CategorySerializer
"""
category = get_object_or_404(Category, pk=category_id)
serializer = CategorySerializer(category, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, category_id, format=None):
"""
Delete category (inactive category, you should edit is_active attribute to revert this change)
---
serializer: administrator.serializers.CategorySerializer
"""
category = get_object_or_404(Category, pk=category_id)
category.is_active = False
category.save()
serializer = CategorySerializer(category)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class EmployeeList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all employees
---
serializer: administrator.serializers.EmployeeSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
employees = get_list_or_404(Employee)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(employees, request)
serializer = EmployeeSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = EmployeeSerializer(employees, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class EmployeeTopList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, kind, format=None):
"""
List all employees
---
serializer: administrator.serializers.EmployeeSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
- name: quantity
required: false
type: string
paramType: query
"""
employee_list = Employee.objects.filter(is_active=True, is_base_profile_complete=True).order_by('-' + kind)
if request.GET.get('quantity'):
try:
quantity = request.GET.get('quantity')
employee_list = employee_list[:quantity]
except Exception as e:
raise APIException(e)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(employee_list, request)
serializer = EmployeeTopSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = EmployeeTopSerializer(employee_list, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class EventList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all events
---
serializer: administrator.serializers.EventSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
events = get_list_or_404(Event)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(events, request)
serializer = EventSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = EventSerializer(events, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new event
---
serializer: administrator.serializers.EventSerializer
"""
serializer = EventSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class EventDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, event_id, format=None):
"""
Get event details
---
serializer: administrator.serializers.EventSerializer
"""
event = get_object_or_404(Event, pk=event_id)
serializer = EventSerializer(event)
return Response(serializer.data)
def put(self, request, event_id, format=None):
"""
Edit event
---
serializer: administrator.serializers.EventSerializer
"""
event = get_object_or_404(Category, pk=event_id)
serializer = EventSerializer(event, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, event_id, format=None):
"""
Delete event (inactive event, you should edit is_active attribute to revert this change)
---
serializer: administrator.serializers.EventSerializer
"""
event = get_object_or_404(Event, pk=event_id)
event.is_active = False
event.save()
serializer = EventSerializer(event)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class EventActivityList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, event_id, format=None):
"""
List all activities for an event
---
serializer: administrator.serializers.EventActivitySerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
event = get_object_or_404(Event, pk=event_id)
activities = EventActivity.objects.filter(event=event)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(activities, request)
serializer = EventActivitySerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = EventActivitySerializer(activities, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class EventActivityDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, event_id, news_id, format=None):
"""
Get event activity detail
---
serializer: administrator.serializers.EventActivitySerializer
"""
event = get_object_or_404(Event, pk=event_id)
activity = get_object_or_404(EventActivity, event=event, pk=news_id)
serializer = EventActivitySerializer(activity)
return Response(serializer.data)
def delete(self, request, event_id, news_id, format=None):
"""
Delete event activity (you cannot revert this change)
"""
event = get_object_or_404(Event, pk=event_id)
activity = get_object_or_404(EventActivity, event=event, pk=news_id)
activity.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class KeywordList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all keywords (tags, skills) or result list if you use ?search=term%of%search
---
parameters:
- name: search
required: false
type: string
paramType: query
- name: pagination
required: false
type: string
paramType: query
"""
if request.GET.get('search'):
request_terms = request.GET.get('search')
search_terms_array = request_terms.split()
initial_term = search_terms_array[0]
keywords = Keyword.objects.filter(Q(name__icontains=initial_term))
if len(search_terms_array) > 1:
for term in range(1, len(search_terms_array)):
keywords = keywords.filter(Q(name__icontains=search_terms_array[term]))
else:
keywords = get_list_or_404(Keyword)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(keywords, request)
serializer = KeywordSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = KeywordSerializer(keywords, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new keyword (tag, skill)
---
serializer: administrator.serializers.KeywordSerializer
"""
serializer = KeywordSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class KeywordDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, keyword_id, format=None):
"""
Get keyword detail
"""
keyword = get_object_or_404(Keyword, pk=keyword_id)
serializer = KeywordSerializer(keyword)
return Response(serializer.data)
def put(self, request, keyword_id, format=None):
"""
Edit keyword
---
serializer: administrator.serializers.KeywordSerializer
"""
keyword = get_object_or_404(Keyword, pk=keyword_id)
serializer = KeywordSerializer(keyword, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, keyword_id, format=None):
"""
Delete keyword (inactive keyword, you should edit is_active attribute to revert this change)
---
serializer: administrator.serializers.KeywordSerializer
"""
keyword = get_object_or_404(Keyword, pk=keyword_id)
keyword.is_active = False
keyword.save()
serializer = KeywordSerializer(keyword)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class LocationList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all employee positions
---
serializer: administrator.serializers.LocationSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
locations = get_list_or_404(Location)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(locations, request)
serializer = LocationSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = LocationSerializer(locations, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new location
---
serializer: administrator.serializers.LocationSerializer
"""
serializer = LocationSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LocationDetail(APIView):
permission_classes = (IsAuthenticated, IsAdminUser)
def get(self, request, location_id, format=None):
"""
Get location detail
"""
location = get_object_or_404(Location, pk=location_id)
serializer = LocationSerializer(location)
return Response(serializer.data)
def put(self, request, location_id, format=None):
"""
Edit location
---
serializer: administrator.serializers.LocationSerializer
"""
location = get_object_or_404(Location, pk=location_id)
serializer = LocationSerializer(location, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, location_id, format=None):
"""
Deactivate location, you should edit is_active attribute to revert this change
---
serializer: administrator.serializers.LocationSerializer
"""
location = get_object_or_404(Location, pk=location_id)
location.is_active = False
location.save()
serializer = LocationSerializer(location)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class MessageList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all messages
---
serializer: administrator.serializers.MessageSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
messages = get_list_or_404(Message)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(messages, request)
serializer = MessageSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = MessageSerializer(messages, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class MessageDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, message_id, format=None):
"""
Get message detail
---
serializer: administrator.serializers.MessageSerializer
"""
message = get_object_or_404(Message, pk=message_id)
serializer = MessageSerializer(message)
return Response(serializer.data)
def delete(self, request, message_id, format=None):
"""
Delete message (you cannot revert this change)
---
serializer: administrator.serializers.MessageSerializer
"""
message = get_object_or_404(Event, pk=message_id)
message.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class MessageListFromEmployee(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, employee_id, format=None):
"""
List all messages from employee
---
serializer: administrator.serializers.MessageSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
employee = get_object_or_404(Employee, pk=employee_id)
messages = get_list_or_404(Message, from_user=employee)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(messages, request)
serializer = MessageSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = MessageSerializer(messages, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class PositionList(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, format=None):
"""
List all employee positions
---
serializer: administrator.serializers.PositionSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
positions = get_list_or_404(Position)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(positions, request)
serializer = PositionSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = PositionSerializer(positions, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new position
---
serializer: administrator.serializers.PositionSerializer
"""
serializer = PositionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PositionDetail(APIView):
permission_classes = (IsAuthenticated, IsAdminUser)
def get(self, request, position_id, format=None):
"""
Get position detail
"""
position = get_object_or_404(Position, pk=position_id)
serializer = PositionSerializer(position)
return Response(serializer.data)
def put(self, request, position_id, format=None):
"""
Edit position
---
serializer: administrator.serializers.PositionSerializer
"""
position = get_object_or_404(Position, pk=position_id)
serializer = PositionSerializer(position, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, position_id, format=None):
"""
Deactivate position, you should edit is_active attribute to revert this change
---
serializer: administrator.serializers.PositionSerializer
"""
position = get_object_or_404(Position, pk=position_id)
position.is_active = False
position.save()
serializer = PositionSerializer(position)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class RoleList(APIView):
permission_classes = (IsAuthenticated, IsAdminUser)
def get(self, request, format=None):
"""
List all roles
---
serializer: administrator.serializers.RoleSerializer
parameters:
- name: pagination
required: false
type: string
paramType: query
"""
roles = get_list_or_404(Role)
if request.GET.get('pagination'):
pagination = request.GET.get('pagination')
if pagination == 'true':
paginator = AdministratorPagination()
results = paginator.paginate_queryset(roles, request)
serializer = RoleSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = RoleSerializer(roles, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new Role
---
serializer: administrator.serializers.RoleSerializer
"""
serializer = RoleSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class RoleDetail(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def get(self, request, role_id, format=None):
"""
Get role detail
---
serializer: administrator.serializers.RoleSerializer
"""
role = get_object_or_404(Role, pk=role_id)
serializer = RoleSerializer(role)
return Response(serializer.data)
def put(self, request, role_id, format=None):
"""
Edit role
---
serializer: administrator.serializers.RoleSerializer
"""
role = get_object_or_404(Role, pk=role_id)
serializer = RoleSerializer(role, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, role_id, format=None):
"""
Delete role, you should edit is_active to revert this change.
---
serializer: administrator.serializers.RoleSerializer
"""
role = get_object_or_404(Role, pk=role_id)
role.is_active = False
role.save()
serializer = RoleSerializer(role)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
class ObjectsDelete(APIView):
permission_classes = (IsAdminUser, IsAuthenticated)
def delete(self, request, id, kind, format=None):
"""
WARNING: Force delete
"""
if kind == 'badge':
kind = get_object_or_404(Badge, pk=id)
elif kind == 'category':
kind = get_object_or_404(Category, pk=id)
elif kind == 'event':
kind = get_object_or_404(Event, pk=id)
elif kind == 'keyword':
kind = get_object_or_404(Keyword, pk=id)
elif kind == 'location':
kind = get_object_or_404(Location, pk=id)
elif kind == 'position':
kind = get_object_or_404(Position, pk=id)
elif kind == 'role':
kind = get_object_or_404(Role, pk=id)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
kind.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SiteInfoDetail(APIView):
def get(self, request, format=None):
"""
Get site info
---
serializer: administrator.serializers.SiteInfoSerializer
"""
email_domain = settings.EMAIL_DOMAIN_LIST[0]
current_site = Site.objects.get_current()
version = config.VERSION
data = {'site': current_site.domain,
'email_domain': email_domain,
'backend_version': version}
serializer = SiteInfoSerializer(data)
return Response(serializer.data, status=status.HTTP_200_OK)
|
|
'''gradient/Jacobian of normal and t loglikelihood
use chain rule
normal derivative wrt mu, sigma and beta
new version: loc-scale distributions, derivative wrt loc, scale
also includes "standardized" t distribution (for use in GARCH)
TODO:
* use sympy for derivative of loglike wrt shape parameters
it works for df of t distribution dlog(gamma(a))da = polygamma(0,a) check
polygamma is available in scipy.special
* get loc-scale example to work with mean = X*b
* write some full unit test examples
A: josef-pktd
'''
import numpy as np
from scipy import special
from scipy.special import gammaln
def norm_lls(y, params):
'''normal loglikelihood given observations and mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
lls : array
contribution to loglikelihood for each observation
'''
mu, sigma2 = params.T
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + (y-mu)**2/sigma2)
return lls
def norm_lls_grad(y, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt variance in second column
Notes
-----
this is actually the derivative wrt sigma not sigma**2, but evaluated
with parameter sigma2 = sigma**2
'''
mu, sigma2 = params.T
dllsdmu = (y-mu)/sigma2
dllsdsigma2 = ((y-mu)**2/sigma2 - 1)/np.sqrt(sigma2)
return np.column_stack((dllsdmu, dllsdsigma2))
def mean_grad(x, beta):
'''gradient/Jacobian for d (x*beta)/ d beta
'''
return x
def normgrad(y, x, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable with mean x*beta, and variance sigma2
x : array, 2d
explanatory variables, observation in rows, variables in columns
params: array_like, (nvars + 1)
array of coefficients and variance (beta, sigma2)
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt scale (sigma) in second column
assume params = (beta, sigma2)
Notes
-----
TODO: for heteroscedasticity need sigma to be a 1d array
'''
beta = params[:-1]
sigma2 = params[-1]*np.ones((len(y),1))
dmudbeta = mean_grad(x, beta)
mu = np.dot(x, beta)
#print beta, sigma2
params2 = np.column_stack((mu,sigma2))
dllsdms = norm_lls_grad(y,params2)
grad = np.column_stack((dllsdms[:,:1]*dmudbeta, dllsdms[:,:1]))
return grad
def tstd_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : integer
degrees of freedom of the t distribution
Returns
-------
lls : array
contribution to loglikelihood for each observation
Notes
-----
parameterized for garch
'''
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2)/sigma2) + 0.5 * np.log(sigma2)
return lls
def norm_dlldy(y):
'''derivative of log pdf of standard normal with respect to y
'''
return -y
def ts_dlldy(y, df):
'''derivative of log pdf of standardized (?) t with respect to y
Notes
-----
parameterized for garch, with mean 0 and variance 1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y
def tstd_pdf(x, df):
'''pdf for standardized (not standard) t distribution, variance is one
'''
r = np.array(df*1.0)
Px = np.exp(special.gammaln((r+1)/2.)-special.gammaln(r/2.))/np.sqrt((r-2)*pi)
Px /= (1+(x**2)/(r-2))**((r+1)/2.)
return Px
def ts_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : integer
degrees of freedom of the t distribution
Returns
-------
lls : array
contribution to loglikelihood for each observation
Notes
-----
parameterized for garch
normalized/rescaled so that sigma2 is the variance
>>> df = 10; sigma = 1.
>>> stats.t.stats(df, loc=0., scale=sigma.*np.sqrt((df-2.)/df))
(array(0.0), array(1.0))
>>> sigma = np.sqrt(2.)
>>> stats.t.stats(df, loc=0., scale=sigma*np.sqrt((df-2.)/df))
(array(0.0), array(2.0))
'''
print y, params, df
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df)*np.pi)
lls -= (df+1.)/2. * np.log(1. + (y-mu)**2/(df)/sigma2) + 0.5 * np.log(sigma2)
return lls
def ts_dlldy(y, df):
'''derivative of log pdf of standard t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : array
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
with mean 0 and scale 1, but variance is df/(df-2)
'''
df = df*1.
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y
def tstd_dlldy(y, df):
'''derivative of log pdf of standardized t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : array
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
parameterized for garch, standardized to variance=1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
#return (df+1)/(df) / (1 + y**2/(df)) * y
def locscale_grad(y, loc, scale, dlldy, *args):
'''derivative of log-likelihood with respect to location and scale
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
loc : float
location parameter of distribution
scale : float
scale parameter of distribution
dlldy : function
derivative of loglikelihood fuction wrt. random variable x
args : array_like
shape parameters of log-likelihood function
Returns
-------
dlldloc : array
derivative of loglikelihood wrt location evaluated at the
points given in y
dlldscale : array
derivative of loglikelihood wrt scale evaluated at the
points given in y
'''
yst = (y-loc)/scale #ystandardized
dlldloc = -dlldy(yst, *args) / scale
dlldscale = -1./scale - dlldy(yst, *args) * (y-loc)/scale**2
return dlldloc, dlldscale
if __name__ == '__main__':
verbose = 0
if verbose:
sig = 0.1
beta = np.ones(2)
rvs = np.random.randn(10,3)
x = rvs[:,1:]
y = np.dot(x,beta) + sig*rvs[:,0]
params = [1,1,1]
print normgrad(y, x, params)
dllfdbeta = (y-np.dot(x, beta))[:,None]*x #for sigma = 1
print dllfdbeta
print locscale_grad(y, np.dot(x, beta), 1, norm_dlldy)
print (y-np.dot(x, beta))
from scipy import stats, misc
def llt(y,loc,scale,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def lltloc(loc,y,scale,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def lltscale(scale,y,loc,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def llnorm(y,loc,scale):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
def llnormloc(loc,y,scale):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
def llnormscale(scale,y,loc):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
if verbose:
print '\ngradient of t'
print misc.derivative(llt, 1, dx=1e-6, n=1, args=(0,1,10), order=3)
print 't ', locscale_grad(1, 0, 1, tstd_dlldy, 10)
print 'ts', locscale_grad(1, 0, 1, ts_dlldy, 10)
print misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(0,1,20), order=3),
print 'ts', locscale_grad(1.5, 0, 1, ts_dlldy, 20)
print misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(0,2,20), order=3),
print 'ts', locscale_grad(1.5, 0, 2, ts_dlldy, 20)
print misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(1,2,20), order=3),
print 'ts', locscale_grad(1.5, 1, 2, ts_dlldy, 20)
print misc.derivative(lltloc, 1, dx=1e-10, n=1, args=(1.5,2,20), order=3),
print misc.derivative(lltscale, 2, dx=1e-10, n=1, args=(1.5,1,20), order=3)
y,loc,scale,df = 1.5, 1, 2, 20
print 'ts', locscale_grad(y,loc,scale, ts_dlldy, 20)
print misc.derivative(lltloc, loc, dx=1e-10, n=1, args=(y,scale,df), order=3),
print misc.derivative(lltscale, scale, dx=1e-10, n=1, args=(y,loc,df), order=3)
print '\ngradient of norm'
print misc.derivative(llnorm, 1, dx=1e-6, n=1, args=(0,1), order=3)
print locscale_grad(1, 0, 1, norm_dlldy)
y,loc,scale = 1.5, 1, 2
print 'ts', locscale_grad(y,loc,scale, norm_dlldy)
print misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(y,scale), order=3),
print misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(y,loc), order=3)
y,loc,scale = 1.5, 0, 1
print 'ts', locscale_grad(y,loc,scale, norm_dlldy)
print misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(y,scale), order=3),
print misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(y,loc), order=3)
#print 'still something wrong with handling of scale and variance'
#looks ok now
print '\nloglike of t'
print tstd_lls(1, np.array([0,1]), 100), llt(1,0,1,100), 'differently standardized'
print tstd_lls(1, np.array([0,1]), 10), llt(1,0,1,10), 'differently standardized'
print ts_lls(1, np.array([0,1]), 10), llt(1,0,1,10)
print tstd_lls(1, np.array([0,1.*10./8.]), 10), llt(1.,0,1.,10)
print ts_lls(1, np.array([0,1]), 100), llt(1,0,1,100)
print tstd_lls(1, np.array([0,1]), 10), llt(1,0,1.*np.sqrt(8/10.),10)
from numpy.testing import assert_almost_equal
params =[(0, 1), (1.,1.), (0.,2.), ( 1., 2.)]
yt = np.linspace(-2.,2.,11)
for loc,scale in params:
dlldlo = misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(yt,scale), order=3)
dlldsc = misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(yt,loc), order=3)
gr = locscale_grad(yt, loc, scale, norm_dlldy)
assert_almost_equal(dlldlo, gr[0], 5, err_msg='deriv loc')
assert_almost_equal(dlldsc, gr[1], 5, err_msg='deriv scale')
for df in [3, 10, 100]:
for loc,scale in params:
dlldlo = misc.derivative(lltloc, loc, dx=1e-10, n=1, args=(yt,scale,df), order=3)
dlldsc = misc.derivative(lltscale, scale, dx=1e-10, n=1, args=(yt,loc,df), order=3)
gr = locscale_grad(yt, loc, scale, ts_dlldy, df)
assert_almost_equal(dlldlo, gr[0], 4, err_msg='deriv loc')
assert_almost_equal(dlldsc, gr[1], 4, err_msg='deriv scale')
assert_almost_equal(ts_lls(yt, np.array([loc, scale**2]), df),
llt(yt,loc,scale,df), 5,
err_msg='loglike')
assert_almost_equal(tstd_lls(yt, np.array([loc, scale**2]), df),
llt(yt,loc,scale*np.sqrt((df-2.)/df),df), 5,
err_msg='loglike')
|
|
"""Redis transport."""
from __future__ import absolute_import, unicode_literals
import numbers
import socket
from bisect import bisect
from collections import namedtuple
from contextlib import contextmanager
from time import time
from vine import promise
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, values, string_t
from kombu.log import get_logger
from kombu.utils.compat import register_after_fork
from kombu.utils.eventio import poll, READ, ERR
from kombu.utils.encoding import bytes_to_str
from kombu.utils.json import loads, dumps
from kombu.utils.objects import cached_property
from kombu.utils.scheduling import cycle_by_name
from kombu.utils.url import _parse_url
from kombu.utils.uuid import uuid
from kombu.utils.compat import _detect_environment
from . import virtual
try:
import redis
except ImportError: # pragma: no cover
redis = None # noqa
try:
from redis import sentinel
except ImportError: # pragma: no cover
sentinel = None # noqa
logger = get_logger('kombu.transport.redis')
crit, warn = logger.critical, logger.warn
DEFAULT_PORT = 6379
DEFAULT_DB = 0
PRIORITY_STEPS = [0, 3, 6, 9]
error_classes_t = namedtuple('error_classes_t', (
'connection_errors', 'channel_errors',
))
NO_ROUTE_ERROR = """
Cannot route message for exchange {0!r}: Table empty or key no longer exists.
Probably the key ({1!r}) has been removed from the Redis database.
"""
# This implementation may seem overly complex, but I assure you there is
# a good reason for doing it this way.
#
# Consuming from several connections enables us to emulate channels,
# which means we can have different service guarantees for individual
# channels.
#
# So we need to consume messages from multiple connections simultaneously,
# and using epoll means we don't have to do so using multiple threads.
#
# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout
# exchanges (broadcast), as an alternative to pushing messages to fanout-bound
# queues manually.
def get_redis_error_classes():
"""Return tuple of redis error classes."""
from redis import exceptions
# This exception suddenly changed name between redis-py versions
if hasattr(exceptions, 'InvalidData'):
DataError = exceptions.InvalidData
else:
DataError = exceptions.DataError
return error_classes_t(
(virtual.Transport.connection_errors + (
InconsistencyError,
socket.error,
IOError,
OSError,
exceptions.ConnectionError,
exceptions.AuthenticationError,
exceptions.TimeoutError)),
(virtual.Transport.channel_errors + (
DataError,
exceptions.InvalidResponse,
exceptions.ResponseError)),
)
def get_redis_ConnectionError():
"""Return the redis ConnectionError exception class."""
from redis import exceptions
return exceptions.ConnectionError
class MutexHeld(Exception):
"""Raised when another party holds the lock."""
@contextmanager
def Mutex(client, name, expire):
"""The Redis lock implementation (probably shaky)."""
lock_id = uuid()
i_won = client.setnx(name, lock_id)
try:
if i_won:
client.expire(name, expire)
yield
else:
if not client.ttl(name):
client.expire(name, expire)
raise MutexHeld()
finally:
if i_won:
try:
with client.pipeline(True) as pipe:
pipe.watch(name)
if pipe.get(name) == lock_id:
pipe.multi()
pipe.delete(name)
pipe.execute()
pipe.unwatch()
except redis.WatchError:
pass
def _after_fork_cleanup_channel(channel):
channel._after_fork()
class QoS(virtual.QoS):
"""Redis Ack Emulation."""
restore_at_shutdown = True
def __init__(self, *args, **kwargs):
super(QoS, self).__init__(*args, **kwargs)
self._vrestore_count = 0
def append(self, message, delivery_tag):
delivery = message.delivery_info
EX, RK = delivery['exchange'], delivery['routing_key']
# TODO: Remove this once we soley on Redis-py 3.0.0+
if redis.VERSION[0] >= 3:
# Redis-py changed the format of zadd args in v3.0.0
zadd_args = [{delivery_tag: time()}]
else:
zadd_args = [time(), delivery_tag]
with self.pipe_or_acquire() as pipe:
pipe.zadd(self.unacked_index_key, *zadd_args) \
.hset(self.unacked_key, delivery_tag,
dumps([message._raw, EX, RK])) \
.execute()
super(QoS, self).append(message, delivery_tag)
def restore_unacked(self, client=None):
with self.channel.conn_or_acquire(client) as client:
for tag in self._delivered:
self.restore_by_tag(tag, client=client)
self._delivered.clear()
def ack(self, delivery_tag):
self._remove_from_indices(delivery_tag).execute()
super(QoS, self).ack(delivery_tag)
def reject(self, delivery_tag, requeue=False):
if requeue:
self.restore_by_tag(delivery_tag, leftmost=True)
self.ack(delivery_tag)
@contextmanager
def pipe_or_acquire(self, pipe=None, client=None):
if pipe:
yield pipe
else:
with self.channel.conn_or_acquire(client) as client:
yield client.pipeline()
def _remove_from_indices(self, delivery_tag, pipe=None):
with self.pipe_or_acquire(pipe) as pipe:
return pipe.zrem(self.unacked_index_key, delivery_tag) \
.hdel(self.unacked_key, delivery_tag)
def restore_visible(self, start=0, num=10, interval=10):
self._vrestore_count += 1
if (self._vrestore_count - 1) % interval:
return
with self.channel.conn_or_acquire() as client:
ceil = time() - self.visibility_timeout
try:
with Mutex(client, self.unacked_mutex_key,
self.unacked_mutex_expire):
env = _detect_environment()
if env == 'gevent':
ceil = time()
visible = client.zrevrangebyscore(
self.unacked_index_key, ceil, 0,
start=num and start, num=num, withscores=True)
for tag, score in visible or []:
self.restore_by_tag(tag, client)
except MutexHeld:
pass
def restore_by_tag(self, tag, client=None, leftmost=False):
with self.channel.conn_or_acquire(client) as client:
with client.pipeline() as pipe:
p, _, _ = self._remove_from_indices(
tag, pipe.hget(self.unacked_key, tag)).execute()
if p:
M, EX, RK = loads(bytes_to_str(p)) # json is unicode
self.channel._do_restore_message(M, EX, RK, client, leftmost)
@cached_property
def unacked_key(self):
return self.channel.unacked_key
@cached_property
def unacked_index_key(self):
return self.channel.unacked_index_key
@cached_property
def unacked_mutex_key(self):
return self.channel.unacked_mutex_key
@cached_property
def unacked_mutex_expire(self):
return self.channel.unacked_mutex_expire
@cached_property
def visibility_timeout(self):
return self.channel.visibility_timeout
class MultiChannelPoller(object):
"""Async I/O poller for Redis transport."""
eventflags = READ | ERR
#: Set by :meth:`get` while reading from the socket.
_in_protected_read = False
#: Set of one-shot callbacks to call after reading from socket.
after_read = None
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map.
self._fd_to_chan = {}
# channel -> socket map
self._chan_to_sock = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
# one-shot callbacks called after reading from socket.
self.after_read = set()
def close(self):
for fd in values(self._chan_to_sock):
try:
self.poller.unregister(fd)
except (KeyError, ValueError):
pass
self._channels.clear()
self._fd_to_chan.clear()
self._chan_to_sock.clear()
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
def _on_connection_disconnect(self, connection):
try:
self.poller.unregister(connection._sock)
except (AttributeError, TypeError):
pass
def _register(self, channel, client, type):
if (channel, client, type) in self._chan_to_sock:
self._unregister(channel, client, type)
if client.connection._sock is None: # not connected yet.
client.connection.connect()
sock = client.connection._sock
self._fd_to_chan[sock.fileno()] = (channel, type)
self._chan_to_sock[(channel, client, type)] = sock
self.poller.register(sock, self.eventflags)
def _unregister(self, channel, client, type):
self.poller.unregister(self._chan_to_sock[(channel, client, type)])
def _client_registered(self, channel, client, cmd):
if getattr(client, 'connection', None) is None:
client.connection = client.connection_pool.get_connection('_')
return (client.connection._sock is not None and
(channel, client, cmd) in self._chan_to_sock)
def _register_BRPOP(self, channel):
"""Enable BRPOP mode for channel."""
ident = channel, channel.client, 'BRPOP'
if not self._client_registered(channel, channel.client, 'BRPOP'):
channel._in_poll = False
self._register(*ident)
if not channel._in_poll: # send BRPOP
channel._brpop_start()
def _register_LISTEN(self, channel):
"""Enable LISTEN mode for channel."""
if not self._client_registered(channel, channel.subclient, 'LISTEN'):
channel._in_listen = False
self._register(channel, channel.subclient, 'LISTEN')
if not channel._in_listen:
channel._subscribe() # send SUBSCRIBE
def on_poll_start(self):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
def on_poll_init(self, poller):
self.poller = poller
for channel in self._channels:
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def maybe_restore_messages(self):
for channel in self._channels:
if channel.active_queues:
# only need to do this once, as they are not local to channel.
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def on_readable(self, fileno):
chan, type = self._fd_to_chan[fileno]
if chan.qos.can_consume():
chan.handlers[type]()
def handle_event(self, fileno, event):
if event & READ:
return self.on_readable(fileno), self
elif event & ERR:
chan, type = self._fd_to_chan[fileno]
chan._poll_error(type)
def get(self, callback, timeout=None):
self._in_protected_read = True
try:
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
events = self.poller.poll(timeout)
if events:
for fileno, event in events:
ret = self.handle_event(fileno, event)
if ret:
return
# - no new data, so try to restore messages.
# - reset active redis commands.
self.maybe_restore_messages()
raise Empty()
finally:
self._in_protected_read = False
while self.after_read:
try:
fun = self.after_read.pop()
except KeyError:
break
else:
fun()
@property
def fds(self):
return self._fd_to_chan
class Channel(virtual.Channel):
"""Redis Channel."""
QoS = QoS
_client = None
_subclient = None
_closing = False
supports_fanout = True
keyprefix_queue = '_kombu.binding.%s'
keyprefix_fanout = '/{db}.'
sep = '\x06\x16'
_in_poll = False
_in_listen = False
_fanout_queues = {}
ack_emulation = True
unacked_key = 'unacked'
unacked_index_key = 'unacked_index'
unacked_mutex_key = 'unacked_mutex'
unacked_mutex_expire = 300 # 5 minutes
unacked_restore_limit = None
visibility_timeout = 3600 # 1 hour
priority_steps = PRIORITY_STEPS
socket_timeout = None
socket_connect_timeout = None
socket_keepalive = None
socket_keepalive_options = None
max_connections = 10
#: Transport option to disable fanout keyprefix.
#: Can also be string, in which case it changes the default
#: prefix ('/{db}.') into to something else. The prefix must
#: include a leading slash and a trailing dot.
#:
#: Enabled by default since Kombu 4.x.
#: Disable for backwards compatibility with Kombu 3.x.
fanout_prefix = True
#: If enabled the fanout exchange will support patterns in routing
#: and binding keys (like a topic exchange but using PUB/SUB).
#:
#: Enabled by default since Kombu 4.x.
#: Disable for backwards compatibility with Kombu 3.x.
fanout_patterns = True
#: Order in which we consume from queues.
#:
#: Can be either string alias, or a cycle strategy class
#:
#: - ``round_robin``
#: (:class:`~kombu.utils.scheduling.round_robin_cycle`).
#:
#: Make sure each queue has an equal opportunity to be consumed from.
#:
#: - ``sorted``
#: (:class:`~kombu.utils.scheduling.sorted_cycle`).
#:
#: Consume from queues in alphabetical order.
#: If the first queue in the sorted list always contains messages,
#: then the rest of the queues will never be consumed from.
#:
#: - ``priority``
#: (:class:`~kombu.utils.scheduling.priority_cycle`).
#:
#: Consume from queues in original order, so that if the first
#: queue always contains messages, the rest of the queues
#: in the list will never be consumed from.
#:
#: The default is to consume from queues in round robin.
queue_order_strategy = 'round_robin'
_async_pool = None
_pool = None
from_transport_options = (
virtual.Channel.from_transport_options +
('ack_emulation',
'unacked_key',
'unacked_index_key',
'unacked_mutex_key',
'unacked_mutex_expire',
'visibility_timeout',
'unacked_restore_limit',
'fanout_prefix',
'fanout_patterns',
'socket_timeout',
'socket_connect_timeout',
'socket_keepalive',
'socket_keepalive_options',
'queue_order_strategy',
'max_connections',
'priority_steps') # <-- do not add comma here!
)
connection_class = redis.Connection if redis else None
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
if not self.ack_emulation: # disable visibility timeout
self.QoS = virtual.QoS
self._queue_cycle = cycle_by_name(self.queue_order_strategy)()
self.Client = self._get_client()
self.ResponseError = self._get_response_error()
self.active_fanout_queues = set()
self.auto_delete_queues = set()
self._fanout_to_queue = {}
self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}
if self.fanout_prefix:
if isinstance(self.fanout_prefix, string_t):
self.keyprefix_fanout = self.fanout_prefix
else:
# previous versions did not set a fanout, so cannot enable
# by default.
self.keyprefix_fanout = ''
# Evaluate connection.
try:
self.client.ping()
except Exception:
self._disconnect_pools()
raise
self.connection.cycle.add(self) # add to channel poller.
# copy errors, in case channel closed but threads still
# are still waiting for data.
self.connection_errors = self.connection.connection_errors
if register_after_fork is not None:
register_after_fork(self, _after_fork_cleanup_channel)
def _after_fork(self):
self._disconnect_pools()
def _disconnect_pools(self):
pool = self._pool
async_pool = self._async_pool
self._async_pool = self._pool = None
if pool is not None:
pool.disconnect()
if async_pool is not None:
async_pool.disconnect()
def _on_connection_disconnect(self, connection):
if self._in_poll is connection:
self._in_poll = None
if self._in_listen is connection:
self._in_listen = None
if self.connection and self.connection.cycle:
self.connection.cycle._on_connection_disconnect(connection)
def _do_restore_message(self, payload, exchange, routing_key,
client=None, leftmost=False):
with self.conn_or_acquire(client) as client:
try:
try:
payload['headers']['redelivered'] = True
except KeyError:
pass
for queue in self._lookup(exchange, routing_key):
(client.lpush if leftmost else client.rpush)(
queue, dumps(payload),
)
except Exception:
crit('Could not restore message: %r', payload, exc_info=True)
def _restore(self, message, leftmost=False):
if not self.ack_emulation:
return super(Channel, self)._restore(message)
tag = message.delivery_tag
with self.conn_or_acquire() as client:
with client.pipeline() as pipe:
P, _ = pipe.hget(self.unacked_key, tag) \
.hdel(self.unacked_key, tag) \
.execute()
if P:
M, EX, RK = loads(bytes_to_str(P)) # json is unicode
self._do_restore_message(M, EX, RK, client, leftmost)
def _restore_at_beginning(self, message):
return self._restore(message, leftmost=True)
def basic_consume(self, queue, *args, **kwargs):
if queue in self._fanout_queues:
exchange, _ = self._fanout_queues[queue]
self.active_fanout_queues.add(queue)
self._fanout_to_queue[exchange] = queue
ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
# Update fair cycle between queues.
#
# We cycle between queues fairly to make sure that
# each queue is equally likely to be consumed from,
# so that a very busy queue will not block others.
#
# This works by using Redis's `BRPOP` command and
# by rotating the most recently used queue to the
# and of the list. See Kombu github issue #166 for
# more discussion of this method.
self._update_queue_cycle()
return ret
def basic_cancel(self, consumer_tag):
# If we are busy reading messages we may experience
# a race condition where a message is consumed after
# canceling, so we must delay this operation until reading
# is complete (Issue celery/celery#1773).
connection = self.connection
if connection:
if connection.cycle._in_protected_read:
return connection.cycle.after_read.add(
promise(self._basic_cancel, (consumer_tag,)),
)
return self._basic_cancel(consumer_tag)
def _basic_cancel(self, consumer_tag):
try:
queue = self._tag_to_queue[consumer_tag]
except KeyError:
return
try:
self.active_fanout_queues.remove(queue)
except KeyError:
pass
else:
self._unsubscribe_from(queue)
try:
exchange, _ = self._fanout_queues[queue]
self._fanout_to_queue.pop(exchange)
except KeyError:
pass
ret = super(Channel, self).basic_cancel(consumer_tag)
self._update_queue_cycle()
return ret
def _get_publish_topic(self, exchange, routing_key):
if routing_key and self.fanout_patterns:
return ''.join([self.keyprefix_fanout, exchange, '/', routing_key])
return ''.join([self.keyprefix_fanout, exchange])
def _get_subscribe_topic(self, queue):
exchange, routing_key = self._fanout_queues[queue]
return self._get_publish_topic(exchange, routing_key)
def _subscribe(self):
keys = [self._get_subscribe_topic(queue)
for queue in self.active_fanout_queues]
if not keys:
return
c = self.subclient
if c.connection._sock is None:
c.connection.connect()
self._in_listen = c.connection
c.psubscribe(keys)
def _unsubscribe_from(self, queue):
topic = self._get_subscribe_topic(queue)
c = self.subclient
if c.connection and c.connection._sock:
c.unsubscribe([topic])
def _handle_message(self, client, r):
if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0:
client.subscribed = False
return
if bytes_to_str(r[0]) == 'pmessage':
type, pattern, channel, data = r[0], r[1], r[2], r[3]
else:
type, pattern, channel, data = r[0], None, r[1], r[2]
return {
'type': type,
'pattern': pattern,
'channel': channel,
'data': data,
}
def _receive(self):
c = self.subclient
ret = []
try:
ret.append(self._receive_one(c))
except Empty:
pass
if c.connection is not None:
while c.connection.can_read(timeout=0):
ret.append(self._receive_one(c))
return any(ret)
def _receive_one(self, c):
response = None
try:
response = c.parse_response()
except self.connection_errors:
self._in_listen = None
raise
if response is not None:
payload = self._handle_message(c, response)
if bytes_to_str(payload['type']).endswith('message'):
channel = bytes_to_str(payload['channel'])
if payload['data']:
if channel[0] == '/':
_, _, channel = channel.partition('.')
try:
message = loads(bytes_to_str(payload['data']))
except (TypeError, ValueError):
warn('Cannot process event on channel %r: %s',
channel, repr(payload)[:4096], exc_info=1)
raise Empty()
exchange = channel.split('/', 1)[0]
self.connection._deliver(
message, self._fanout_to_queue[exchange])
return True
def _brpop_start(self, timeout=1):
queues = self._queue_cycle.consume(len(self.active_queues))
if not queues:
return
keys = [self._q_for_pri(queue, pri) for pri in self.priority_steps
for queue in queues] + [timeout or 0]
self._in_poll = self.client.connection
self.client.connection.send_command('BRPOP', *keys)
def _brpop_read(self, **options):
try:
try:
dest__item = self.client.parse_response(self.client.connection,
'BRPOP',
**options)
except self.connection_errors:
# if there's a ConnectionError, disconnect so the next
# iteration will reconnect automatically.
self.client.connection.disconnect()
raise
if dest__item:
dest, item = dest__item
dest = bytes_to_str(dest).rsplit(self.sep, 1)[0]
self._queue_cycle.rotate(dest)
self.connection._deliver(loads(bytes_to_str(item)), dest)
return True
else:
raise Empty()
finally:
self._in_poll = None
def _poll_error(self, type, **options):
if type == 'LISTEN':
self.subclient.parse_response()
else:
self.client.parse_response(self.client.connection, type)
def _get(self, queue):
with self.conn_or_acquire() as client:
for pri in self.priority_steps:
item = client.rpop(self._q_for_pri(queue, pri))
if item:
return loads(bytes_to_str(item))
raise Empty()
def _size(self, queue):
with self.conn_or_acquire() as client:
with client.pipeline() as pipe:
for pri in self.priority_steps:
pipe = pipe.llen(self._q_for_pri(queue, pri))
sizes = pipe.execute()
return sum(size for size in sizes
if isinstance(size, numbers.Integral))
def _q_for_pri(self, queue, pri):
pri = self.priority(pri)
return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))
def priority(self, n):
steps = self.priority_steps
return steps[bisect(steps, n) - 1]
def _put(self, queue, message, **kwargs):
"""Deliver message."""
pri = self._get_message_priority(message, reverse=False)
with self.conn_or_acquire() as client:
client.lpush(self._q_for_pri(queue, pri), dumps(message))
def _put_fanout(self, exchange, message, routing_key, **kwargs):
"""Deliver fanout message."""
with self.conn_or_acquire() as client:
client.publish(
self._get_publish_topic(exchange, routing_key),
dumps(message),
)
def _new_queue(self, queue, auto_delete=False, **kwargs):
if auto_delete:
self.auto_delete_queues.add(queue)
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
# Mark exchange as fanout.
self._fanout_queues[queue] = (
exchange, routing_key.replace('#', '*'),
)
with self.conn_or_acquire() as client:
client.sadd(self.keyprefix_queue % (exchange,),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
def _delete(self, queue, exchange, routing_key, pattern, *args, **kwargs):
self.auto_delete_queues.discard(queue)
with self.conn_or_acquire(client=kwargs.get('client')) as client:
client.srem(self.keyprefix_queue % (exchange,),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
with client.pipeline() as pipe:
for pri in self.priority_steps:
pipe = pipe.delete(self._q_for_pri(queue, pri))
pipe.execute()
def _has_queue(self, queue, **kwargs):
with self.conn_or_acquire() as client:
with client.pipeline() as pipe:
for pri in self.priority_steps:
pipe = pipe.exists(self._q_for_pri(queue, pri))
return any(pipe.execute())
def get_table(self, exchange):
key = self.keyprefix_queue % exchange
with self.conn_or_acquire() as client:
values = client.smembers(key)
if not values:
raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key))
return [tuple(bytes_to_str(val).split(self.sep)) for val in values]
def _purge(self, queue):
with self.conn_or_acquire() as client:
with client.pipeline() as pipe:
for pri in self.priority_steps:
priq = self._q_for_pri(queue, pri)
pipe = pipe.llen(priq).delete(priq)
sizes = pipe.execute()
return sum(sizes[::2])
def close(self):
self._closing = True
if not self.closed:
# remove from channel poller.
self.connection.cycle.discard(self)
# delete fanout bindings
client = self.__dict__.get('client') # only if property cached
if client is not None:
for queue in self._fanout_queues:
if queue in self.auto_delete_queues:
self.queue_delete(queue, client=client)
self._disconnect_pools()
self._close_clients()
super(Channel, self).close()
def _close_clients(self):
# Close connections
for attr in 'client', 'subclient':
try:
client = self.__dict__[attr]
connection, client.connection = client.connection, None
connection.disconnect()
except (KeyError, AttributeError, self.ResponseError):
pass
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, numbers.Integral):
if not vhost or vhost == '/':
vhost = DEFAULT_DB
elif vhost.startswith('/'):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError:
raise ValueError(
'Database is int between 0 and limit - 1, not {0}'.format(
vhost,
))
return vhost
def _filter_tcp_connparams(self, socket_keepalive=None,
socket_keepalive_options=None, **params):
return params
def _connparams(self, asynchronous=False):
conninfo = self.connection.client
connparams = {
'host': conninfo.hostname or '127.0.0.1',
'port': conninfo.port or self.connection.default_port,
'virtual_host': conninfo.virtual_host,
'password': conninfo.password,
'max_connections': self.max_connections,
'socket_timeout': self.socket_timeout,
'socket_connect_timeout': self.socket_connect_timeout,
'socket_keepalive': self.socket_keepalive,
'socket_keepalive_options': self.socket_keepalive_options,
}
if conninfo.ssl:
# Connection(ssl={}) must be a dict containing the keys:
# 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile'
try:
connparams.update(conninfo.ssl)
connparams['connection_class'] = redis.SSLConnection
except TypeError:
pass
host = connparams['host']
if '://' in host:
scheme, _, _, _, password, path, query = _parse_url(host)
if scheme == 'socket':
connparams = self._filter_tcp_connparams(**connparams)
connparams.update({
'connection_class': redis.UnixDomainSocketConnection,
'path': '/' + path}, **query)
connparams.pop('socket_connect_timeout', None)
connparams.pop('socket_keepalive', None)
connparams.pop('socket_keepalive_options', None)
connparams['password'] = password
connparams.pop('host', None)
connparams.pop('port', None)
connparams['db'] = self._prepare_virtual_host(
connparams.pop('virtual_host', None))
channel = self
connection_cls = (
connparams.get('connection_class') or
self.connection_class
)
if asynchronous:
class Connection(connection_cls):
def disconnect(self):
# NOTE: see celery issue #3898
# redis-py Connection shutdown()s the socket
# which causes all copies of file descriptor
# to become unusable, however close() only
# affect process-local copies of fds.
# So we just override Connection's disconnect method.
self._parser.on_disconnect()
channel._on_connection_disconnect(self)
if self._sock is None:
return
try:
# self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
connection_cls = Connection
connparams['connection_class'] = connection_cls
return connparams
def _create_client(self, asynchronous=False):
if asynchronous:
return self.Client(connection_pool=self.async_pool)
return self.Client(connection_pool=self.pool)
def _get_pool(self, asynchronous=False):
params = self._connparams(asynchronous=asynchronous)
self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db'])
return redis.ConnectionPool(**params)
def _get_client(self):
if redis.VERSION < (2, 10, 5):
raise VersionMismatch(
'Redis transport requires redis-py versions 2.10.5 or later. '
'You have {0.__version__}'.format(redis))
return redis.StrictRedis
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
yield self._create_client()
@property
def pool(self):
if self._pool is None:
self._pool = self._get_pool()
return self._pool
@property
def async_pool(self):
if self._async_pool is None:
self._async_pool = self._get_pool(asynchronous=True)
return self._async_pool
@cached_property
def client(self):
"""Client used to publish messages, BRPOP etc."""
return self._create_client(asynchronous=True)
@cached_property
def subclient(self):
"""Pub/Sub connection used to consume fanout queues."""
client = self._create_client(asynchronous=True)
return client.pubsub()
def _update_queue_cycle(self):
self._queue_cycle.update(self.active_queues)
def _get_response_error(self):
from redis import exceptions
return exceptions.ResponseError
@property
def active_queues(self):
"""Set of queues being consumed from (excluding fanout queues)."""
return {queue for queue in self._active_queues
if queue not in self.active_fanout_queues}
class Transport(virtual.Transport):
"""Redis Transport."""
Channel = Channel
polling_interval = None # disable sleep between unsuccessful polls.
default_port = DEFAULT_PORT
driver_type = 'redis'
driver_name = 'redis'
implements = virtual.Transport.implements.extend(
asynchronous=True,
exchange_type=frozenset(['direct', 'topic', 'fanout'])
)
def __init__(self, *args, **kwargs):
if redis is None:
raise ImportError('Missing redis library (pip install redis)')
super(Transport, self).__init__(*args, **kwargs)
# Get redis-py exceptions.
self.connection_errors, self.channel_errors = self._get_errors()
# All channels share the same poller.
self.cycle = MultiChannelPoller()
def driver_version(self):
return redis.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.on_poll_init(loop.poller)
cycle_poll_start = cycle.on_poll_start
add_reader = loop.add_reader
on_readable = self.on_readable
def _on_disconnect(connection):
if connection._sock:
loop.remove(connection._sock)
cycle._on_connection_disconnect = _on_disconnect
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
loop.call_repeatedly(10, cycle.maybe_restore_messages)
def on_readable(self, fileno):
"""Handle AIO event for one of our file descriptors."""
self.cycle.on_readable(fileno)
def _get_errors(self):
"""Utility to import redis-py's exceptions at runtime."""
return get_redis_error_classes()
class SentinelChannel(Channel):
"""Channel with explicit Redis Sentinel knowledge.
Broker url is supposed to look like:
sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/...
where each sentinel is separated by a `;`.
Other arguments for the sentinel should come from the transport options
(see :method:`Celery.connection` which is in charge of creating the
`Connection` object).
You must provide at least one option in Transport options:
* `master_name` - name of the redis group to poll
"""
from_transport_options = Channel.from_transport_options + (
'master_name',
'min_other_sentinels',
'sentinel_kwargs')
connection_class = sentinel.SentinelManagedConnection if sentinel else None
def _sentinel_managed_pool(self, asynchronous=False):
connparams = self._connparams(asynchronous)
additional_params = connparams.copy()
additional_params.pop('host', None)
additional_params.pop('port', None)
connection_list = []
for url in self.connection.client.alt:
if url and 'sentinel://' in url:
connection_list.append(url.split('/')[2].split(':'))
sentinel_inst = sentinel.Sentinel(
connection_list,
min_other_sentinels=getattr(self, 'min_other_sentinels', 0),
sentinel_kwargs=getattr(self, 'sentinel_kwargs', None),
**additional_params)
master_name = getattr(self, 'master_name', None)
return sentinel_inst.master_for(
master_name,
self.Client,
).connection_pool
def _get_pool(self, asynchronous=False):
return self._sentinel_managed_pool(asynchronous)
class SentinelTransport(Transport):
"""Redis Sentinel Transport."""
default_port = 26379
Channel = SentinelChannel
|
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XX XXXXXX XXXX XXX XXXX XXXXX XXXXXXX XX
XXX XX X XXXXXXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXX XXXXXX
XXXXXXX X XXXXX
X
XX XXXX XX XXXXXXX XXX XXX XXXXX XX XXX XXX XXXXXXX XXXX XX XXX XXXX XX
XXX
XX X
XXXXXXXXXXXXX XXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXX
X
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXXX XXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXX
X XX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXX XXXXX XXX XXXXXXXXXXXX XXX XX XXXX XXXX XXXXXXXXXXX XXXXXXXXXX XX XXXXXXXXXX XX XXXX XXXX XXXX XXXXX XXXXXXX XX XXXXXXX XXXXXXXXXXXXXX XXX
XXXXXXXXX XXXX XXXXXXXXXX XXXXXXXXXXXX XX XXX XXXXXXXX XXXXXXXXXX
XXXXXXX XXXX XXX XXXXX XXXXX XX XXXXXXXXXXX XX XXXX XXXXXXX XX XXXXX XXXXXXXXX XX XXXXX XX XXX XXXXXXXXXXX XXXXXXXXXX XXXX XXX XXX X XXXXXXX XXXXXX XX XXXXXXX XX
XXXX XXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXXX XXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXX
X XX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXX XXX XXXX XXXXX XXXXXXX XX
XXX XX X XXXXXXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXX XXXXXX
XXXXXXX X XXXXX
X
XX XXXX XX XXXXXXX XXX XXX XXXXX XX XXX XXX XXXXXXX XXXX XX XXX XXXX XX
XXX
XX X
XXXXXXXXXXXXX XXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXX
XXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
|
import numpy as np
from menpo.visualize.base import Renderer
# The colour map used for all lines and markers
GLOBAL_CMAP = 'jet'
class MatplotlibRenderer(Renderer):
r"""
Abstract class for rendering visualizations using Matplotlib.
Parameters
----------
figure_id : `int` or ``None``
A figure id or ``None``. ``None`` assumes we maintain the Matplotlib
state machine and use `plt.gcf()`.
new_figure : `bool`
If ``True``, it creates a new figure to render on.
"""
def __init__(self, figure_id, new_figure):
super(MatplotlibRenderer, self).__init__(figure_id, new_figure)
# Set up data for saving
self._supported_ext = self.figure.canvas.get_supported_filetypes().keys()
# Create the extensions map, have to add . in front of the extensions
# and map every extension to the savefig method
n_ext = len(self._supported_ext)
func_list = [lambda obj, fp, **kwargs: self.figure.savefig(fp, **obj)] * n_ext
self._extensions_map = dict(zip(['.' + s for s in self._supported_ext],
func_list))
def get_figure(self):
r"""
Gets the figure specified by the combination of ``self.figure_id`` and
``self.new_figure``. If ``self.figure_id == None`` then ``plt.gcf()``
is used. ``self.figure_id`` is also set to the correct id of the figure
if a new figure is created.
Returns
-------
figure : Matplotlib figure object
The figure we will be rendering on.
"""
import matplotlib.pyplot as plt
if self.new_figure or self.figure_id is not None:
self.figure = plt.figure(self.figure_id)
else:
self.figure = plt.gcf()
self.figure_id = self.figure.number
return self.figure
def save_figure(self, filename, format='png', dpi=None, face_colour='w',
edge_colour='w', orientation='portrait',
paper_type='letter', transparent=False, pad_inches=0.1,
overwrite=False):
r"""
Method for saving the figure of the current `figure_id` to file.
Parameters
----------
filename : `str` or `file`-like object
The string path or file-like object to save the figure at/into.
format : `str`
The format to use. This must match the file path if the file path is
a `str`.
dpi : `int` > 0 or ``None``, optional
The resolution in dots per inch.
face_colour : See Below, optional
The face colour of the figure rectangle.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of len 3
edge_colour : See Below, optional
The edge colour of the figure rectangle.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of len 3
orientation : {``portrait``, ``landscape``}, optional
The page orientation.
paper_type : See Below, optional
The type of the paper.
Example options ::
{``letter``, ``legal``, ``executive``, ``ledger``,
``a0`` through ``a10``, ``b0` through ``b10``}
transparent : `bool`, optional
If ``True``, the axes patches will all be transparent; the figure
patch will also be transparent unless `face_colour` and/or
`edge_colour` are specified. This is useful, for example, for
displaying a plot on top of a coloured background on a web page.
The transparency of these patches will be restored to their original
values upon exit of this function.
pad_inches : `float`, optional
Amount of padding around the figure.
overwrite : `bool`, optional
If ``True``, the file will be overwritten if it already exists.
"""
from menpo.io.output.base import _export
save_fig_args = {'dpi': dpi, 'facecolour': face_colour,
'edgecolour': edge_colour, 'orientation': orientation,
'papertype': paper_type, 'format': format,
'transparent': transparent, 'pad_inches': pad_inches,
'bbox_inches': 'tight', 'frameon': None}
# Use the export code so that we have a consistent interface
_export(save_fig_args, filename, self._extensions_map, format,
overwrite=overwrite)
def save_figure_widget(self):
r"""
Method for saving the figure of the current ``figure_id`` to file using
`menpowidgets.base.save_matplotlib_figure` widget.
"""
from menpowidgets import save_matplotlib_figure
save_matplotlib_figure(self)
class MatplotlibSubplots(object):
def _subplot_layout(self, num_subplots):
if num_subplots < 2:
return [1, 1]
while self._is_prime(num_subplots) and num_subplots > 4:
num_subplots += 1
p = self._factor(num_subplots)
if len(p) == 1:
p.insert(0, 1)
return p
while len(p) > 2:
if len(p) >= 4:
p[0] = p[0] * p[-2]
p[1] = p[1] * p[-1]
del p[-2:]
else:
p[0] = p[0] * p[1]
del p[1]
p.sort()
# Reformat if the column/row ratio is too large: we want a roughly
# square design
while (p[1] / p[0]) > 2.5:
p = self._subplot_layout(num_subplots + 1)
return p
def _factor(self, n):
gaps = [1, 2, 2, 4, 2, 4, 2, 4, 6, 2, 6]
length, cycle = 11, 3
f, fs, next_ind = 2, [], 0
while f * f <= n:
while n % f == 0:
fs.append(f)
n /= f
f += gaps[next_ind]
next_ind += 1
if next_ind == length:
next_ind = cycle
if n > 1:
fs.append(n)
return fs
def _is_prime(self, n):
if n == 2 or n == 3:
return True
if n < 2 or n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
r = int(n ** 0.5)
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def _parse_cmap(cmap_name=None, image_shape_len=3):
import matplotlib.cm as cm
if cmap_name is not None:
return cm.get_cmap(cmap_name)
else:
if image_shape_len == 2:
# Single channels are viewed in Gray by default
return cm.gray
else:
return None
def _parse_axes_limits(min_x, max_x, min_y, max_y, axes_x_limits,
axes_y_limits):
if isinstance(axes_x_limits, int):
axes_x_limits = float(axes_x_limits)
if isinstance(axes_y_limits, int):
axes_y_limits = float(axes_y_limits)
if isinstance(axes_x_limits, float):
pad = (max_x - min_x) * axes_x_limits
axes_x_limits = [min_x - pad, max_x + pad]
if isinstance(axes_y_limits, float):
pad = (max_y - min_y) * axes_y_limits
axes_y_limits = [min_y - pad, max_y + pad]
return axes_x_limits, axes_y_limits
def _set_axes_options(ax, render_axes=True, inverted_y_axis=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, axes_x_label=None, axes_y_label=None,
title=None):
if render_axes:
# render axes
ax.set_axis_on()
# set font options
for l in (ax.get_xticklabels() + ax.get_yticklabels()):
l.set_fontsize(axes_font_size)
l.set_fontname(axes_font_name)
l.set_fontstyle(axes_font_style)
l.set_fontweight(axes_font_weight)
# set ticks
if axes_x_ticks is not None:
ax.set_xticks(axes_x_ticks)
if axes_y_ticks is not None:
ax.set_yticks(axes_y_ticks)
# set labels and title
if axes_x_label is None:
axes_x_label = ''
if axes_y_label is None:
axes_y_label = ''
if title is None:
title = ''
ax.set_xlabel(
axes_x_label, fontsize=axes_font_size, fontname=axes_font_name,
fontstyle=axes_font_style, fontweight=axes_font_weight)
ax.set_ylabel(
axes_y_label, fontsize=axes_font_size, fontname=axes_font_name,
fontstyle=axes_font_style, fontweight=axes_font_weight)
ax.set_title(
title, fontsize=axes_font_size, fontname=axes_font_name,
fontstyle=axes_font_style, fontweight=axes_font_weight)
else:
# do not render axes
ax.set_axis_off()
# also remove the ticks to get rid of the white area
ax.set_xticks([])
ax.set_yticks([])
# set axes limits
if axes_x_limits is not None:
ax.set_xlim(np.sort(axes_x_limits))
if axes_y_limits is None:
axes_y_limits = ax.get_ylim()
if inverted_y_axis:
ax.set_ylim(np.sort(axes_y_limits)[::-1])
else:
ax.set_ylim(np.sort(axes_y_limits))
def _set_grid_options(render_grid=True, grid_line_style='--', grid_line_width=2):
import matplotlib.pyplot as plt
if render_grid:
plt.grid('on', linestyle=grid_line_style, linewidth=grid_line_width)
else:
plt.grid('off')
def _set_figure_size(fig, figure_size=(10, 8)):
if figure_size is not None:
fig.set_size_inches(np.asarray(figure_size))
def _set_numbering(ax, centers, render_numbering=True,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal', numbers_font_weight='normal',
numbers_font_colour='k'):
if render_numbering:
for k, p in enumerate(centers):
ax.annotate(
str(k), xy=(p[0], p[1]),
horizontalalignment=numbers_horizontal_align,
verticalalignment=numbers_vertical_align,
size=numbers_font_size, family=numbers_font_name,
fontstyle=numbers_font_style, fontweight=numbers_font_weight,
color=numbers_font_colour)
def _set_legend(ax, legend_handles, render_legend=True, legend_title='',
legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal', legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False):
if render_legend:
# Options related to legend's font
prop = {'family': legend_font_name, 'size': legend_font_size,
'style': legend_font_style, 'weight': legend_font_weight}
# Render legend
ax.legend(
handles=legend_handles, title=legend_title, prop=prop,
loc=legend_location, bbox_to_anchor=legend_bbox_to_anchor,
borderaxespad=legend_border_axes_pad, ncol=legend_n_columns,
columnspacing=legend_horizontal_spacing,
labelspacing=legend_vertical_spacing, frameon=legend_border,
borderpad=legend_border_padding, shadow=legend_shadow,
fancybox=legend_rounded_corners, markerscale=legend_marker_scale)
class MatplotlibImageViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, image):
super(MatplotlibImageViewer2d, self).__init__(figure_id, new_figure)
self.image = image
self.axes_list = []
def render(self, interpolation='bilinear', cmap_name=None, alpha=1.,
render_axes=False, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', axes_x_limits=None,
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
import matplotlib.pyplot as plt
# parse colour map argument
cmap = _parse_cmap(cmap_name=cmap_name,
image_shape_len=len(self.image.shape))
# parse axes limits
axes_x_limits, axes_y_limits = _parse_axes_limits(
0., self.image.shape[1], 0., self.image.shape[0], axes_x_limits,
axes_y_limits)
# render image
plt.imshow(self.image, cmap=cmap, interpolation=interpolation,
alpha=alpha)
# store axes object
ax = plt.gca()
self.axes_list = [ax]
# set axes options
_set_axes_options(
ax, render_axes=render_axes, inverted_y_axis=True,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks)
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibImageSubplotsViewer2d(MatplotlibRenderer, MatplotlibSubplots):
def __init__(self, figure_id, new_figure, image):
super(MatplotlibImageSubplotsViewer2d, self).__init__(figure_id,
new_figure)
self.image = image
self.num_subplots = self.image.shape[2]
self.plot_layout = self._subplot_layout(self.num_subplots)
self.axes_list = []
def render(self, interpolation='bilinear', cmap_name=None, alpha=1.,
render_axes=False, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', axes_x_limits=None,
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
import matplotlib.pyplot as plt
# parse colour map argument
cmap = _parse_cmap(cmap_name=cmap_name, image_shape_len=2)
# parse axes limits
axes_x_limits, axes_y_limits = _parse_axes_limits(
0., self.image.shape[1], 0., self.image.shape[0], axes_x_limits,
axes_y_limits)
p = self.plot_layout
for i in range(self.image.shape[2]):
# create subplot and append the axes object
ax = plt.subplot(p[0], p[1], 1 + i)
self.axes_list.append(ax)
# render image
plt.imshow(self.image[:, :, i], cmap=cmap,
interpolation=interpolation, alpha=alpha)
# set axes options
_set_axes_options(
ax, render_axes=render_axes, inverted_y_axis=True,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks)
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibPointGraphViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, points, edges):
super(MatplotlibPointGraphViewer2d, self).__init__(figure_id,
new_figure)
self.points = points
self.edges = edges
def render(self, image_view=False, render_lines=True, line_colour='r',
line_style='-', line_width=1, render_markers=True,
marker_style='o', marker_size=5, marker_face_colour='r',
marker_edge_colour='k', marker_edge_width=1.,
render_numbering=False, numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal', numbers_font_weight='normal',
numbers_font_colour='k', render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 8), label=None):
from matplotlib import collections as mc
import matplotlib.pyplot as plt
# Flip x and y for viewing if points are tied to an image
points = self.points[:, ::-1] if image_view else self.points
# parse axes limits
min_x, min_y = np.min(points, axis=0)
max_x, max_y = np.max(points, axis=0)
axes_x_limits, axes_y_limits = _parse_axes_limits(
min_x, max_x, min_y, max_y, axes_x_limits, axes_y_limits)
# get current axes object
ax = plt.gca()
# Check if graph has edges to be rendered (for example a PointCloud
# won't have any edges)
if render_lines and np.array(self.edges).shape[0] > 0:
# Get edges to be rendered
lines = zip(points[self.edges[:, 0], :],
points[self.edges[:, 1], :])
# Draw line objects
lc = mc.LineCollection(lines, colors=line_colour,
linestyles=line_style, linewidths=line_width,
cmap=GLOBAL_CMAP, label=label)
ax.add_collection(lc)
# If a label is defined, it should only be applied to the lines, of
# a PointGraph, which represent each one of the labels, unless a
# PointCloud is passed in.
label = None
ax.autoscale()
if render_markers:
plt.plot(points[:, 0], points[:, 1], linewidth=0,
markersize=marker_size, marker=marker_style,
markeredgewidth=marker_edge_width,
markeredgecolor=marker_edge_colour,
markerfacecolor=marker_face_colour, label=label)
# set numbering
_set_numbering(ax, points, render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour)
# set axes options
_set_axes_options(
ax, render_axes=render_axes, inverted_y_axis=image_view,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks)
# set equal aspect ratio
ax.set_aspect('equal', adjustable='box')
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibLandmarkViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, group, pointcloud,
labels_to_masks):
super(MatplotlibLandmarkViewer2d, self).__init__(figure_id, new_figure)
self.group = group
self.pointcloud = pointcloud
self.labels_to_masks = labels_to_masks
def render(self, image_view=False, render_lines=True, line_colour='r',
line_style='-', line_width=1, render_markers=True,
marker_style='o', marker_size=5, marker_face_colour='r',
marker_edge_colour='k', marker_edge_width=1.,
render_numbering=False, numbers_horizontal_align='center',
numbers_vertical_align='bottom', numbers_font_name='sans-serif',
numbers_font_size=10, numbers_font_style='normal',
numbers_font_weight='normal', numbers_font_colour='k',
render_legend=True, legend_title='',
legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal', legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 8)):
import matplotlib.lines as mlines
from menpo.shape import TriMesh
from menpo.shape.graph import PointGraph
import matplotlib.pyplot as plt
# Regarding the labels colours, we may get passed either no colours (in
# which case we generate random colours) or a single colour to colour
# all the labels with
# TODO: All marker and line options could be defined as lists...
n_labels = len(self.labels_to_masks)
line_colour = _check_colours_list(
render_lines, line_colour, n_labels,
'Must pass a list of line colours with length n_labels or a single '
'line colour for all labels.')
marker_face_colour = _check_colours_list(
render_markers, marker_face_colour, n_labels,
'Must pass a list of marker face colours with length n_labels or '
'a single marker face colour for all labels.')
marker_edge_colour = _check_colours_list(
render_markers, marker_edge_colour, n_labels,
'Must pass a list of marker edge colours with length n_labels or '
'a single marker edge colour for all labels.')
# check axes limits
if image_view:
min_y, min_x = np.min(self.pointcloud.points, axis=0)
max_y, max_x = np.max(self.pointcloud.points, axis=0)
else:
min_x, min_y = np.min(self.pointcloud.points, axis=0)
max_x, max_y = np.max(self.pointcloud.points, axis=0)
axes_x_limits, axes_y_limits = _parse_axes_limits(
min_x, max_x, min_y, max_y, axes_x_limits, axes_y_limits)
# get pointcloud of each label
sub_pointclouds = self._build_sub_pointclouds()
# initialize legend_handles list
legend_handles = []
# for each pointcloud
for i, (label, pc) in enumerate(sub_pointclouds):
# render pointcloud
pc.view(figure_id=self.figure_id, image_view=image_view,
render_lines=render_lines, line_colour=line_colour[i],
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour[i],
marker_edge_colour=marker_edge_colour[i],
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
figure_size=None)
# set legend entry
if render_legend:
tmp_line = 'None'
if (render_lines and
(isinstance(pc, PointGraph) or isinstance(pc, TriMesh))):
tmp_line = line_style
tmp_marker = marker_style if render_markers else 'None'
legend_handles.append(
mlines.Line2D([], [], linewidth=line_width,
linestyle=tmp_line, color=line_colour[i],
marker=tmp_marker,
markersize=marker_size ** 0.5,
markeredgewidth=marker_edge_width,
markeredgecolor=marker_edge_colour[i],
markerfacecolor=marker_face_colour[i],
label='{0}: {1}'.format(self.group, label)))
# set legend
_set_legend(plt.gca(), legend_handles, render_legend=render_legend,
legend_title=legend_title, legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners)
# set figure size
_set_figure_size(self.figure, figure_size)
# If no limits are set then ensure that all sub-pointclouds fit in the
# view
if axes_x_limits is None and axes_y_limits is None:
plt.autoscale()
return self
def _build_sub_pointclouds(self):
sub_pointclouds = []
for label, indices in self.labels_to_masks.items():
mask = self.labels_to_masks[label]
sub_pointclouds.append((label, self.pointcloud.from_mask(mask)))
return sub_pointclouds
class MatplotlibAlignmentViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, alignment_transform):
super(MatplotlibAlignmentViewer2d, self).__init__(figure_id,
new_figure)
self.alignment_transform = alignment_transform
def render(self, image=False, **kwargs):
r"""
Visualize how points are affected by the warp in 2 dimensions.
"""
import matplotlib.pyplot as plt
source = self.alignment_transform.source.points
target = self.alignment_transform.target.points
# a factor by which the minimum and maximum x and y values of the warp
# will be increased by.
x_margin_factor, y_margin_factor = 0.5, 0.5
# the number of x and y samples to take
n_x, n_y = 50, 50
# {x y}_{min max} is the actual bounds on either source or target
# landmarks
x_min, y_min = np.vstack(
[target.min(0), source.min(0)]).min(0)
x_max, y_max = np.vstack(
[target.max(0), source.max(0)]).max(0)
x_margin = x_margin_factor * (x_max - x_min)
y_margin = y_margin_factor * (y_max - y_min)
# {x y}_{min max}_m is the bound once it has been grown by the factor
# of the spread in that dimension
x_min_m = x_min - x_margin
x_max_m = x_max + x_margin
y_min_m = y_min - y_margin
y_max_m = y_max + y_margin
# build sample points for the selected region
x = np.linspace(x_min_m, x_max_m, n_x)
y = np.linspace(y_min_m, y_max_m, n_y)
xx, yy = np.meshgrid(x, y)
sample_points = np.concatenate(
[xx.reshape([-1, 1]), yy.reshape([-1, 1])], axis=1)
warped_points = self.alignment_transform.apply(sample_points)
delta = warped_points - sample_points
# plot the sample points result
x, y, = 0, 1
if image:
# if we are overlaying points onto an image,
# we have to account for the fact that axis 0 is typically
# called 'y' and axis 1 is typically called 'x'. Flip them here
x, y = y, x
plt.quiver(sample_points[:, x], sample_points[:, y], delta[:, x],
delta[:, y])
delta = target - source
# plot how the landmarks move from source to target
plt.quiver(source[:, x], source[:, y], delta[:, x],
delta[:, y], angles='xy', scale_units='xy', scale=1)
# rescale to the bounds
plt.xlim((x_min_m, x_max_m))
plt.ylim((y_min_m, y_max_m))
if image:
# if we are overlaying points on an image, axis0 (the 'y' axis)
# is flipped.
plt.gca().invert_yaxis()
return self
class MatplotlibGraphPlotter(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, x_axis, y_axis, title=None,
legend_entries=None, x_label=None, y_label=None,
x_axis_limits=None, y_axis_limits=None, x_axis_ticks=None,
y_axis_ticks=None):
super(MatplotlibGraphPlotter, self).__init__(figure_id, new_figure)
self.x_axis = x_axis
self.y_axis = y_axis
if legend_entries is None:
legend_entries = ['Curve {}'.format(i) for i in range(len(y_axis))]
self.legend_entries = legend_entries
self.title = title
self.x_label = x_label
self.y_label = y_label
self.x_axis_ticks = x_axis_ticks
self.y_axis_ticks = y_axis_ticks
# parse axes limits
min_x = np.min(x_axis)
max_x = np.max(x_axis)
min_y = np.min([np.min(l) for l in y_axis])
max_y = np.max([np.max(l) for l in y_axis])
self.x_axis_limits, self.y_axis_limits = _parse_axes_limits(
min_x, max_x, min_y, max_y, x_axis_limits, y_axis_limits)
def render(self, render_lines=True, line_colour='r',
line_style='-', line_width=1, render_markers=True,
marker_style='o', marker_size=6, marker_face_colour='r',
marker_edge_colour='k', marker_edge_width=1.,
render_legend=True, legend_title='',
legend_font_name='sans-serif', legend_font_style='normal',
legend_font_size=10, legend_font_weight='normal',
legend_marker_scale=None, legend_location=2,
legend_bbox_to_anchor=(1.05, 1.), legend_border_axes_pad=None,
legend_n_columns=1, legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
figure_size=(10, 8), render_grid=True, grid_line_style='--',
grid_line_width=1):
import matplotlib.pyplot as plt
# Check the viewer options that can be different for each plotted curve
n_curves = len(self.y_axis)
render_lines = _check_render_flag(render_lines, n_curves,
'Must pass a list of different '
'render_lines flag for each curve or '
'a single render_lines flag for all '
'curves.')
render_markers = _check_render_flag(render_markers, n_curves,
'Must pass a list of different '
'render_markers flag for each '
'curve or a single render_markers '
'flag for all curves.')
line_colour = _check_colours_list(
True, line_colour, n_curves,
'Must pass a list of line colours with length n_curves or a single '
'line colour for all curves.')
line_style = _check_colours_list(
True, line_style, n_curves,
'Must pass a list of line styles with length n_curves or a single '
'line style for all curves.')
line_width = _check_colours_list(
True, line_width, n_curves,
'Must pass a list of line widths with length n_curves or a single '
'line width for all curves.')
marker_style = _check_colours_list(
True, marker_style, n_curves,
'Must pass a list of marker styles with length n_curves or a '
'single marker style for all curves.')
marker_size = _check_colours_list(
True, marker_size, n_curves,
'Must pass a list of marker sizes with length n_curves or a single '
'marker size for all curves.')
marker_face_colour = _check_colours_list(
True, marker_face_colour, n_curves,
'Must pass a list of marker face colours with length n_curves or a '
'single marker face colour for all curves.')
marker_edge_colour = _check_colours_list(
True, marker_edge_colour, n_curves,
'Must pass a list of marker edge colours with length n_curves or a '
'single marker edge colour for all curves.')
marker_edge_width = _check_colours_list(
True, marker_edge_width, n_curves,
'Must pass a list of marker edge widths with length n_curves or a '
'single marker edge width for all curves.')
# plot all curves
ax = plt.gca()
for i, y in enumerate(self.y_axis):
linestyle = line_style[i]
if not render_lines[i]:
linestyle = 'None'
marker = marker_style[i]
if not render_markers[i]:
marker = 'None'
plt.plot(self.x_axis, y, color=line_colour[i],
linestyle=linestyle,
linewidth=line_width[i], marker=marker,
markeredgecolor=marker_edge_colour[i],
markerfacecolor=marker_face_colour[i],
markeredgewidth=marker_edge_width[i],
markersize=marker_size[i], label=self.legend_entries[i])
# set legend
_set_legend(ax, legend_handles=None, render_legend=render_legend,
legend_title=legend_title, legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners)
# set axes options
_set_axes_options(
ax, render_axes=render_axes, inverted_y_axis=False,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,
axes_x_limits=self.x_axis_limits, axes_y_limits=self.y_axis_limits,
axes_x_ticks=self.x_axis_ticks, axes_y_ticks=self.y_axis_ticks,
axes_x_label=self.x_label, axes_y_label=self.y_label,
title=self.title)
# set grid options
_set_grid_options(render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibMultiImageViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, image_list):
super(MatplotlibMultiImageViewer2d, self).__init__(figure_id,
new_figure)
self.image_list = image_list
def render(self, interval=50, **kwargs):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
if len(self.image_list[0].shape) == 2:
# Single channels are viewed in Gray
_ax = plt.imshow(self.image_list[0], cmap=cm.Greys_r, **kwargs)
else:
_ax = plt.imshow(self.image_list[0], **kwargs)
def init():
return _ax,
def animate(j):
_ax.set_data(self.image_list[j])
return _ax,
self._ani = animation.FuncAnimation(self.figure, animate,
init_func=init,
frames=len(self.image_list),
interval=interval, blit=True)
return self
class MatplotlibMultiImageSubplotsViewer2d(MatplotlibRenderer,
MatplotlibSubplots):
def __init__(self, figure_id, new_figure, image_list):
super(MatplotlibMultiImageSubplotsViewer2d, self).__init__(figure_id,
new_figure)
self.image_list = image_list
self.num_subplots = self.image_list[0].shape[2]
self.plot_layout = self._subplot_layout(self.num_subplots)
def render(self, interval=50, **kwargs):
import matplotlib.cm as cm
import matplotlib.animation as animation
import matplotlib.pyplot as plt
p = self.plot_layout
_axs = []
for i in range(self.image_list[0].shape[2]):
plt.subplot(p[0], p[1], 1 + i)
# Hide the x and y labels
plt.axis('off')
_ax = plt.imshow(self.image_list[0][:, :, i], cmap=cm.Greys_r,
**kwargs)
_axs.append(_ax)
def init():
return _axs
def animate(j):
for k, _ax in enumerate(_axs):
_ax.set_data(self.image_list[j][:, :, k])
return _axs
self._ani = animation.FuncAnimation(self.figure, animate,
init_func=init,
frames=len(self.image_list),
interval=interval, blit=True)
return self
def sample_colours_from_colourmap(n_colours, colour_map):
import matplotlib.pyplot as plt
cm = plt.get_cmap(colour_map)
return [cm(1.*i/n_colours)[:3] for i in range(n_colours)]
def _check_colours_list(render_flag, colours_list, n_objects, error_str):
if render_flag:
if colours_list is None:
# sample colours from jet colour map
colours_list = sample_colours_from_colourmap(n_objects, GLOBAL_CMAP)
if isinstance(colours_list, list):
if len(colours_list) == 1:
colours_list *= n_objects
elif len(colours_list) != n_objects:
raise ValueError(error_str)
else:
colours_list = [colours_list] * n_objects
else:
colours_list = [None] * n_objects
return colours_list
def _check_render_flag(render_flag, n_objects, error_str):
if isinstance(render_flag, bool):
render_flag = [render_flag] * n_objects
elif isinstance(render_flag, list):
if len(render_flag) == 1:
render_flag *= n_objects
elif len(render_flag) != n_objects:
raise ValueError(error_str)
else:
raise ValueError(error_str)
return render_flag
|
|
from __future__ import unicode_literals
from collections import OrderedDict, defaultdict
from copy import copy
import nose
from xvalidator.element import Element, NameSpace, create_element, \
get_result_tag, Document, create_document
nameSpaces = [
NameSpace(prefix='xsi', uri='http://www.w3.org/2001/XMLSchema-instance'),
NameSpace(prefix='spirit', uri='http://www.spiritconsortium.org/XMLSchema/SPIRIT/1685-2009'),
NameSpace(prefix='', uri='http://www.spiritconsortium.org/XMLSchema/SPIRIT/1685-2009'),
]
def test_element_pass():
el = Element('test')
assert el.tag == 'test'
def test_element_repr():
id_string = str('ID42')
actual = repr(Element('tagName', value=[1, 2, 3],
attributes=dict(id=id_string), path='/root-0,name'))
expected = 'Element(tag=\'tagName\', attributes={\'id\': \'ID42\'}, ' \
'path="/root-0,name", value=[1, 2, 3])'
nose.tools.eq_(actual, str(expected))
def test_element_str():
actual = str(Element('tagName', value=[1, 2, 3],
attributes=dict(id='ID42'), path='/root-0,name'))
nose.tools.eq_(actual, '<tagName>')
def test_get_result_key_pass():
actual = get_result_tag('spirit:name')
nose.tools.eq_(actual, 'spirit:name')
def test_create_element_root_only():
el = create_element('root', {}, nameSpaces)
nose.tools.eq_(el, Element('root', value=[], path='/root-0,'))
def test_create_element_attribute_text_pass():
value = OrderedDict([('text', OrderedDict([('@attr', '42'),
('#text', 'root value')]))])
el = create_element('root', value, nameSpaces)
nose.tools.eq_(el, Element(tag='root', path="/root-0,", value=[
Element(tag='text', attributes=OrderedDict([('attr', '42')]),
path="/root-0,/text-0,", value='root value')]))
register = OrderedDict([('@id', 'ID6'), ('name', 'reg6'),
('addressOffset', '0'),
('size', '2'),
('access', 'writeOnce'),
('reset', OrderedDict([('value', '0')]))])
def test_create_element_one_level():
el = create_element('register', register, nameSpaces)
register_element = Element(tag='register', attributes=OrderedDict([
('id', 'ID6')]), path="/register-0,reg6", value=[
Element(tag='name', path="/register-0,reg6/name-0,", value='reg6'),
Element(tag='addressOffset', path="/register-0,reg6/addressOffset-0,",
value='0'),
Element(tag='size', path="/register-0,reg6/size-0,",
value='2'),
Element(tag='access', path="/register-0,reg6/access-0,",
value='writeOnce'),
Element(tag='reset', path="/register-0,reg6/reset-0,",
value=[
Element(tag='value', path="/register-0,reg6/reset-0,/value-0,",
value='0')])])
nose.tools.eq_(el, register_element)
def test_create_element_two_levels():
register_copy = copy(register)
field10 = OrderedDict(
[('@id', 'ID10'), ('name', 'field10'), ('bitOffset', '0'), ('bitWidth', '2'), ('volatile', 'false')])
field11 = OrderedDict(
[('@id', 'ID11'), ('name', 'field11'), ('bitOffset', '2'), ('bitWidth', '4'), ('volatile', 'true')])
register_copy['field'] = [field10, field11]
el = create_element('register', register_copy, nameSpaces)
expected = [
Element(tag='field', attributes=OrderedDict([('id', 'ID10')]), path="/register-0,reg6/field-0,field10",
value=[
Element(tag='name', path="/register-0,reg6/field-0,field10/name-0,", value='field10'),
Element(tag='bitOffset', path="/register-0,reg6/field-0,field10/bitOffset-0,", value='0'),
Element(tag='bitWidth', path="/register-0,reg6/field-0,field10/bitWidth-0,", value='2'),
Element(tag='volatile', path="/register-0,reg6/field-0,field10/volatile-0,", value='false')]),
Element(tag='field', attributes=OrderedDict([('id', 'ID11')]), path="/register-0,reg6/field-1,field11",
value=[
Element(tag='name', path="/register-0,reg6/field-1,field11/name-0,", value='field11'),
Element(tag='bitOffset', path="/register-0,reg6/field-1,field11/bitOffset-0,", value='2'),
Element(tag='bitWidth', path="/register-0,reg6/field-1,field11/bitWidth-0,", value='4'),
Element(tag='volatile', path="/register-0,reg6/field-1,field11/volatile-0,", value='true')])]
nose.tools.eq_(el.value[5:], expected)
def test_create_element_stats():
register_copy = copy(register)
field10 = OrderedDict([
('@id', 'ID10'), ('name', 'field10'), ('bitOffset', '0'),
('bitWidth', '2'), ('volatile', 'false')])
field11 = OrderedDict([
('@id', 'ID11'), ('name', 'field11'), ('bitOffset', '2'),
('bitWidth', '4'), ('volatile', 'true')])
register_copy['field'] = [field10, field11]
stats = defaultdict(int)
create_element('register', register_copy, nameSpaces, stats=stats)
expected_stats = {'reset': 1, 'bitOffset': 2, 'name': 3, 'bitWidth': 2,
'field.@id': 2, 'register.@id': 1, 'register': 1,
'addressOffset': 1, 'value': 1, 'access': 1, 'field': 2,
'volatile': 2, 'size': 1}
nose.tools.eq_(stats, expected_stats)
def test_create_element_stats_simple_value_list():
stats = defaultdict(int)
test = OrderedDict([('multiple', [1, 2, 3])])
create_element('test', test, nameSpaces, path='/', stats=stats)
nose.tools.eq_(stats, {'multiple': 1, 'test': 1})
def test_create_document():
register_copy = copy(register)
register_copy['@xmlns'] = 'http://www.spiritconsortium.org/XMLSchema/SPIRIT/1685-2009'
field10 = OrderedDict([
('@id', 'ID10'), ('name', 'field10'), ('bitOffset', '0'),
('bitWidth', '2'), ('volatile', 'false')])
field11 = OrderedDict([
('@id', 'ID11'), ('name', 'field11'), ('bitOffset', '2'),
('bitWidth', '4'), ('volatile', 'true')])
register_copy['field'] = [field10, field11]
doc = create_document('test_elements.py: register', OrderedDict(
[('register', register_copy)]))
expected_stats = {'reset': 1, 'bitOffset': 2, 'name': 3, 'bitWidth': 2,
'field.@id': 2, 'register.@id': 1, 'register': 1,
'addressOffset': 1, 'value': 1, 'access': 1, 'field': 2,
'volatile': 2, 'size': 1}
nose.tools.eq_(doc.stats, expected_stats)
def test_element_to_dict_pass():
element = Element('test', value=False)
actual = element.to_dict
nose.tools.eq_(actual, 'false')
def test_element_to_dict_list_pass():
element = Element('test', value=[1, 2.0, 'name'])
actual = element.to_dict
nose.tools.eq_(actual, ['1', '2.0', 'name'])
def test_element_to_dict_attribute_no_value_pass():
element = Element('test', attributes={'valid': False})
actual = element.to_dict
nose.tools.eq_(actual, OrderedDict([('@valid', 'false')]))
def test_element_to_dict_attribute_value_empty_list_pass():
element = Element('test', value=[], attributes={'valid': False})
actual = element.to_dict
nose.tools.eq_(actual, OrderedDict([('@valid', 'false')]))
def test_element_to_dict_id_attribute_pass():
element = Element('test', value=True, attributes={'id': 'ID42'})
actual = element.to_dict
nose.tools.eq_(actual, OrderedDict([('@id', 'ID42'), ('#text', 'true')]))
def test_element_to_dict_three_levels_mix_ns_prefix_pass():
level2 = Element('post:level2', value='level2Name')
level1 = Element('pre:level1', value=[level2])
root = Element('parent', value=[level1])
actual = root.to_dict
nose.tools.eq_(actual, OrderedDict([
('pre:level1', OrderedDict([('post:level2', 'level2Name')]))]))
def test_element_to_dict_two_levels_three_children_pass():
child0 = Element('pre:child', value='childName1')
child1 = Element('pre:child', value='childName2')
child2 = Element('pre:child', value='childName3')
parent = Element('pre:parent', value=[child0, child1, child2])
actual = parent.to_dict
nose.tools.eq_(actual, OrderedDict([
('pre:child', ['childName1', 'childName2', 'childName3'])]))
def test_document_to_dict_two_levels_three_children_pass():
child0 = Element('xsi:child', value='childName1')
child1 = Element('xsi:child', value='childName2')
child2 = Element('xsi:child', value='childName3')
root = Element('test:parent', value=[child0, child1, child2])
doc = Document('test', [nameSpaces[0]], root)
actual = doc.to_dict
nose.tools.eq_(actual, OrderedDict([
('test:parent', OrderedDict([
('@xmlns:xsi', 'http://www.w3.org/2001/XMLSchema-instance'),
('xsi:child', ['childName1', 'childName2', 'childName3'])
]))]))
def test_document_to_dict_two_levels_empty_ns_prefix_pass():
child = Element('child', value='childName1')
root = Element('prefix:parent', value=[child])
doc = Document('test', [nameSpaces[2]], root)
actual = doc.to_dict
nose.tools.eq_(actual, OrderedDict([
('prefix:parent', OrderedDict([
('@xmlns', 'http://www.spiritconsortium.org/XMLSchema/SPIRIT/1685-2009'),
('child', 'childName1')
]))]))
def test_document_to_dict_two_levels_round_trip_pass():
p = '/xsi:parent-0,/xsi:child-'
child = Element('xsi:child', value='childName1', path=p + '0,')
root = Element('xsi:parent', value=[child], path='/xsi:parent-0,')
stats = defaultdict(int)
stats['xsi:parent'] = 1
stats['xsi:child'] = 1
doc = Document('test', [nameSpaces[0]], root, stats=stats)
xml_dict = doc.to_dict
doc_new = create_document('test', xml_dict)
nose.tools.eq_(doc.__dict__.items(), doc_new.__dict__.items())
def test_document_to_dict_two_levels_top_attribute_round_trip_pass():
p = '/xsi:parent-0,/xsi:child-'
child = Element('xsi:child', value='childName1', path=p + '0,')
root = Element('xsi:parent', value=[child], path='/xsi:parent-0,')
stats = defaultdict(int)
stats['xsi:parent'] = 1
stats['xsi:child'] = 1
stats['xsi:parent.@id'] = 1
doc = Document('test', [nameSpaces[0]], root, stats=stats,
attributes=OrderedDict([('id', 'ID42')])
)
xml_dict = doc.to_dict
doc_new = create_document('test', xml_dict)
nose.tools.eq_(doc.__dict__.items(), doc_new.__dict__.items())
|
|
#!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Runner for standalone program tests of Nuitka.
These tests aim at showing that one specific module works in standalone
mode, trying to find issues with that packaging.
"""
import os
import sys
# Find nuitka package relative to us. The replacement is for POSIX python
# and Windows paths on command line.
sys.path.insert(
0,
os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__.replace("\\", os.sep))), "..", ".."
)
),
)
# isort:start
from nuitka.freezer.RuntimeTracing import getRuntimeTraceOfLoadedFiles
from nuitka.tools.testing.Common import (
checkLoadedFileAccesses,
checkRequirements,
compareWithCPython,
createSearchMode,
decideFilenameVersionSkip,
displayFileContents,
displayFolderContents,
displayRuntimeTraces,
reportSkip,
setup,
test_logger,
)
from nuitka.utils.FileOperations import removeDirectory
from nuitka.utils.Timing import TimerReport
from nuitka.utils.Utils import getOS
def displayError(dirname, filename):
assert dirname is None
dist_path = filename[:-3] + ".dist"
displayFolderContents("dist folder", dist_path)
inclusion_log_path = filename[:-3] + ".py.inclusion.log"
displayFileContents("inclusion log", inclusion_log_path)
def main():
# Complex stuff, even more should become common code or project options though.
# pylint: disable=too-many-branches,too-many-statements
python_version = setup(needs_io_encoding=True)
search_mode = createSearchMode()
for filename in sorted(os.listdir(".")):
if not filename.endswith(".py"):
continue
if not decideFilenameVersionSkip(filename):
continue
active = search_mode.consider(dirname=None, filename=filename)
if not active:
test_logger.info("Skipping %s" % filename)
continue
extra_flags = [
"expect_success",
"--standalone",
"remove_output",
# Cache the CPython results for re-use, they will normally not change.
"cpython_cache",
# To understand what is slow.
"timing",
]
# skip each test if their respective requirements are not met
requirements_met, error_message = checkRequirements(filename)
if not requirements_met:
reportSkip(error_message, ".", filename)
continue
if filename == "Urllib3Using.py" and os.name == "nt":
reportSkip(
"Socket module early import not working on Windows currently",
".",
filename,
)
continue
if "Idna" in filename:
# For the warnings of Python2.
if python_version < (3,):
extra_flags.append("ignore_stderr")
if filename == "CtypesUsing.py":
extra_flags.append("plugin_disable:pylint-warnings")
if filename == "GtkUsing.py":
# Don't test on platforms not supported by current Debian testing, and
# which should be considered irrelevant by now.
if python_version < (2, 7):
reportSkip("irrelevant Python version", ".", filename)
continue
# For the warnings.
extra_flags.append("ignore_warnings")
if filename.startswith("Win"):
if os.name != "nt":
reportSkip("Windows only test", ".", filename)
continue
if filename == "TkInterUsing.py":
if getOS() == "Darwin":
reportSkip("Not working macOS yet", ".", filename)
continue
if getOS() == "Windows":
reportSkip("Can hang on Windows CI.", ".", filename)
continue
# For the plug-in information.
extra_flags.append("plugin_enable:tk-inter")
if filename == "FlaskUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename == "NumpyUsing.py":
# TODO: Disabled for now.
reportSkip("numpy.test not fully working yet", ".", filename)
continue
if filename == "PandasUsing.py":
extra_flags.append("plugin_enable:numpy")
extra_flags.append("plugin_disable:pylint-warnings")
extra_flags.append("plugin_disable:pyqt5")
extra_flags.append("plugin_disable:pyside2")
extra_flags.append("plugin_disable:pyside6")
if filename == "PmwUsing.py":
extra_flags.append("plugin_enable:pmw-freezer")
if filename == "OpenGLUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename == "GlfwUsing.py":
# For the warnings.
extra_flags.append("plugin_enable:glfw")
extra_flags.append("plugin_enable:numpy")
if filename == "PasslibUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename == "Win32ComUsing.py":
# For the warnings.
extra_flags.append("ignore_warnings")
if filename.startswith(("PySide2", "PySide6", "PyQt5", "PyQt6")):
# Don't test on platforms not supported by current Debian testing, and
# which should be considered irrelevant by now.
if python_version < (2, 7) or ((3,) <= python_version < (3, 7)):
reportSkip("irrelevant Python version", ".", filename)
continue
# For the plug-in information
if filename.startswith("PySide2"):
extra_flags.append("plugin_enable:pyside6")
elif filename.startswith("PySide6"):
extra_flags.append("plugin_enable:pyside6")
elif filename.startswith("PyQt5"):
extra_flags.append("plugin_enable:pyqt5")
elif filename.startswith("PyQt6"):
extra_flags.append("plugin_enable:pyqt6")
test_logger.info(
"Consider output of standalone mode compiled program: %s" % filename
)
# First compare so we know the program behaves identical.
compareWithCPython(
dirname=None,
filename=filename,
extra_flags=extra_flags,
search_mode=search_mode,
needs_2to3=False,
on_error=displayError,
)
# Second check if glibc libraries haven't been accidentally
# shipped with the standalone executable
found_glibc_libs = []
for dist_filename in os.listdir(os.path.join(filename[:-3] + ".dist")):
if os.path.basename(dist_filename).startswith(
(
"ld-linux-x86-64.so",
"libc.so.",
"libpthread.so.",
"libm.so.",
"libdl.so.",
"libBrokenLocale.so.",
"libSegFault.so",
"libanl.so.",
"libcidn.so.",
"libcrypt.so.",
"libmemusage.so",
"libmvec.so.",
"libnsl.so.",
"libnss_compat.so.",
"libnss_db.so.",
"libnss_dns.so.",
"libnss_files.so.",
"libnss_hesiod.so.",
"libnss_nis.so.",
"libnss_nisplus.so.",
"libpcprofile.so",
"libresolv.so.",
"librt.so.",
"libthread_db-1.0.so",
"libthread_db.so.",
"libutil.so.",
)
):
found_glibc_libs.append(dist_filename)
if found_glibc_libs:
test_logger.warning(
"Should not ship glibc libraries with the standalone executable (found %s)"
% found_glibc_libs
)
sys.exit(1)
binary_filename = os.path.join(
filename[:-3] + ".dist", filename[:-3] + (".exe" if os.name == "nt" else "")
)
# Then use "strace" on the result.
with TimerReport(
"Determining run time loaded files took %.2f", logger=test_logger
):
loaded_filenames = getRuntimeTraceOfLoadedFiles(
logger=test_logger, command=[binary_filename]
)
illegal_accesses = checkLoadedFileAccesses(
loaded_filenames=loaded_filenames, current_dir=os.getcwd()
)
if illegal_accesses:
displayError(None, filename)
displayRuntimeTraces(test_logger, binary_filename)
test_logger.warning(
"Should not access these file(s): '%r'." % illegal_accesses
)
search_mode.onErrorDetected(1)
removeDirectory(filename[:-3] + ".dist", ignore_errors=True)
search_mode.finish()
if __name__ == "__main__":
main()
|
|
"""Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey,
parseAsPrivateKey
"""
from compat import *
from RSAKey import RSAKey
from Python_RSAKey import Python_RSAKey
import cryptomath
if cryptomath.m2cryptoLoaded:
from OpenSSL_RSAKey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from PyCrypto_RSAKey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
def parseXMLKey(s, private=False, public=False, implementations=["python"]):
"""Parse an XML-format key.
The XML format used here is specific to tlslite and cryptoIDlib. The
format can store the public component of a key, or the public and
private components. For example::
<publicKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
</publicKey>
<privateKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
<d>JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy...
<p>5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc...
<q>/E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ...
<dP>mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6...
<dQ>qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB...
<qInv>j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr...
</privateKey>
@type s: str
@param s: A string containing an XML public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the private
key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will be
discarded, so this function will always return a public key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "python":
key = Python_RSAKey.parseXML(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse an XML or PEM-formatted public key.
@type s: str
@param s: A string containing an XML or PEM-encoded public or private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, public=True)
except:
return parseXMLKey(s, public=True)
def parsePrivateKey(s):
"""Parse an XML or PEM-formatted private key.
@type s: str
@param s: A string containing an XML or PEM-encoded private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, private=True)
except:
return parseXMLKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
|
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import shutil
import subprocess
import sys
import util
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return ['python2'] # The official test262.py isn't python3 compatible, but has python shebang.
def get_arguments():
execution_runtime = os.environ.get('RUNTIME', '')
parser = argparse.ArgumentParser()
parser.add_argument('--runtime', metavar='FILE', default=execution_runtime,
help='Execution runtime (e.g. qemu)')
parser.add_argument('--engine', metavar='FILE', required=True,
help='JerryScript binary to run tests with')
parser.add_argument('--test-dir', metavar='DIR', required=True,
help='Directory contains test262 test suite')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--es51', action='store_true',
help='Run test262 ES5.1 version')
group.add_argument('--es2015', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES2015. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
group.add_argument('--esnext', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES.next. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-test-list', metavar='LIST',
help='Add a comma separated list of tests or directories to run in test262 test suite')
args = parser.parse_args()
if args.es2015:
args.test_dir = os.path.join(args.test_dir, 'es2015')
args.test262_harness_dir = os.path.abspath(os.path.dirname(__file__))
args.test262_git_hash = 'fd44cd73dfbce0b515a2474b7cd505d6176a9eb5'
args.excludelist_path = os.path.join('tests', 'test262-es6-excludelist.xml')
elif args.esnext:
args.test_dir = os.path.join(args.test_dir, 'esnext')
args.test262_harness_dir = os.path.abspath(os.path.dirname(__file__))
args.test262_git_hash = '281eb10b2844929a7c0ac04527f5b42ce56509fd'
args.excludelist_path = os.path.join('tests', 'test262-esnext-excludelist.xml')
else:
args.test_dir = os.path.join(args.test_dir, 'es51')
args.test262_harness_dir = args.test_dir
args.test262_git_hash = 'es5-tests'
args.mode = args.es2015 or args.esnext
return args
def prepare_test262_test_suite(args):
if os.path.isdir(os.path.join(args.test_dir, '.git')):
return 0
return_code = subprocess.call(['git', 'clone', '--no-checkout',
'https://github.com/tc39/test262.git', args.test_dir])
if return_code:
print('Cloning test262 repository failed.')
return return_code
return_code = subprocess.call(['git', 'checkout', args.test262_git_hash], cwd=args.test_dir)
assert not return_code, 'Cloning test262 repository failed - invalid git revision.'
if args.es51:
path_to_remove = os.path.join(args.test_dir, 'test', 'suite', 'bestPractice')
if os.path.isdir(path_to_remove):
shutil.rmtree(path_to_remove)
path_to_remove = os.path.join(args.test_dir, 'test', 'suite', 'intl402')
if os.path.isdir(path_to_remove):
shutil.rmtree(path_to_remove)
return 0
def update_exclude_list(args):
print("=== Summary - updating excludelist ===\n")
passing_tests = set()
failing_tests = set()
new_passing_tests = set()
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'r') as report_file:
for line in report_file:
match = re.match('(=== )?(.*) (?:failed|passed) in (?:non-strict|strict)', line)
if match:
(unexpected, test) = match.groups()
test = test.replace('\\', '/')
if unexpected:
failing_tests.add(test + '.js')
else:
passing_tests.add(test + '.js')
# Tests pass in strict-mode but fail in non-strict-mode (or vice versa) should be considered as failures
passing_tests = passing_tests - failing_tests
with open(args.excludelist_path, 'r+') as exclude_file:
lines = exclude_file.readlines()
exclude_file.seek(0)
exclude_file.truncate()
# Skip the last line "</excludeList>" to be able to insert new failing tests.
for line in lines[:-1]:
match = re.match(r" <test id=\"(\S*)\">", line)
if match:
test = match.group(1)
if test in failing_tests:
failing_tests.remove(test)
exclude_file.write(line)
elif test in passing_tests:
new_passing_tests.add(test)
else:
exclude_file.write(line)
else:
exclude_file.write(line)
if failing_tests:
print("New failing tests added to the excludelist")
for test in sorted(failing_tests):
exclude_file.write(' <test id="' + test + '"><reason></reason></test>\n')
print(" " + test)
print("")
exclude_file.write('</excludeList>\n')
if new_passing_tests:
print("New passing tests removed from the excludelist")
for test in sorted(new_passing_tests):
print(" " + test)
print("")
if failing_tests or new_passing_tests:
print("Excludelist was updated succesfully.")
return 1
print("Excludelist was already up-to-date.")
return 0
def main(args):
return_code = prepare_test262_test_suite(args)
if return_code:
return return_code
if sys.platform == 'win32':
original_timezone = util.get_timezone()
util.set_sighdl_to_reset_timezone(original_timezone)
util.set_timezone('Pacific Standard Time')
command = (args.runtime + ' ' + args.engine).strip()
if args.es2015 or args.esnext:
try:
subprocess.check_output(["timeout", "--version"])
command = "timeout 5 " + command
except subprocess.CalledProcessError:
pass
kwargs = {}
if sys.version_info.major >= 3:
kwargs['errors'] = 'ignore'
if args.es51:
test262_harness_path = os.path.join(args.test262_harness_dir, 'tools/packaging/test262.py')
else:
test262_harness_path = os.path.join(args.test262_harness_dir, 'test262-harness.py')
test262_command = get_platform_cmd_prefix() + \
[test262_harness_path,
'--command', command,
'--tests', args.test_dir,
'--summary']
if 'excludelist_path' in args and args.mode == 'default':
test262_command.extend(['--exclude-list', args.excludelist_path])
if args.test262_test_list:
test262_command.extend(args.test262_test_list.split(','))
proc = subprocess.Popen(test262_command,
universal_newlines=True,
stdout=subprocess.PIPE,
**kwargs)
return_code = 1
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'w') as output_file:
counter = 0
summary_found = False
summary_end_found = False
while True:
output = proc.stdout.readline()
if not output:
break
output_file.write(output)
if output.startswith('=== Summary ==='):
summary_found = True
print('')
if summary_found:
if not summary_end_found:
print(output, end='')
if not output.strip():
summary_end_found = True
if 'All tests succeeded' in output:
return_code = 0
elif re.search('in (non-)?strict mode', output):
counter += 1
if (counter % 100) == 0:
print(".", end='')
if (counter % 5000) == 0:
print(" Executed %d tests." % counter)
proc.wait()
if sys.platform == 'win32':
util.set_timezone(original_timezone)
if args.mode == 'update':
return_code = update_exclude_list(args)
return return_code
if __name__ == "__main__":
sys.exit(main(get_arguments()))
|
|
#!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
import json
import urllib2
import functools
import subprocess
from sparktestsupport import SPARK_HOME, ERROR_CODES
from sparktestsupport.shellutils import run_cmd
def print_err(msg):
"""
Given a set of arguments, will print them to the STDERR stream
"""
print(msg, file=sys.stderr)
def post_message_to_github(msg, ghprb_pull_id):
print("Attempting to post to Github...")
url = "https://api.github.com/repos/apache/spark/issues/" + ghprb_pull_id + "/comments"
github_oauth_key = os.environ["GITHUB_OAUTH_KEY"]
posted_message = json.dumps({"body": msg})
request = urllib2.Request(url,
headers={
"Authorization": "token %s" % github_oauth_key,
"Content-Type": "application/json"
},
data=posted_message)
try:
response = urllib2.urlopen(request)
if response.getcode() == 201:
print(" > Post successful.")
except urllib2.HTTPError as http_e:
print_err("Failed to post message to Github.")
print_err(" > http_code: %s" % http_e.code)
print_err(" > api_response: %s" % http_e.read())
print_err(" > data: %s" % posted_message)
except urllib2.URLError as url_e:
print_err("Failed to post message to Github.")
print_err(" > urllib2_status: %s" % url_e.reason[1])
print_err(" > data: %s" % posted_message)
def pr_message(build_display_name,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url,
msg,
post_msg=''):
# align the arguments properly for string formatting
str_args = (build_display_name,
msg,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url,
str(' ' + post_msg + '.') if post_msg else '.')
return '**[Test build %s %s](%stestReport)** for PR %s at commit [`%s`](%s)%s' % str_args
def run_pr_checks(pr_tests, ghprb_actual_commit, sha1):
"""
Executes a set of pull request checks to ease development and report issues with various
components such as style, linting, dependencies, compatibilities, etc.
@return a list of messages to post back to Github
"""
# Ensure we save off the current HEAD to revert to
current_pr_head = run_cmd(['git', 'rev-parse', 'HEAD'], return_output=True).strip()
pr_results = list()
for pr_test in pr_tests:
test_name = pr_test + '.sh'
pr_results.append(run_cmd(['bash', os.path.join(SPARK_HOME, 'dev', 'tests', test_name),
ghprb_actual_commit, sha1],
return_output=True).rstrip())
# Ensure, after each test, that we're back on the current PR
run_cmd(['git', 'checkout', '-f', current_pr_head])
return pr_results
def run_tests(tests_timeout):
"""
Runs the `dev/run-tests` script and responds with the correct error message
under the various failure scenarios.
@return a tuple containing the test result code and the result note to post to Github
"""
test_result_code = subprocess.Popen(['timeout',
tests_timeout,
os.path.join(SPARK_HOME, 'dev', 'run-tests')]).wait()
failure_note_by_errcode = {
1: 'executing the `dev/run-tests` script', # error to denote run-tests script failures
ERROR_CODES["BLOCK_GENERAL"]: 'some tests',
ERROR_CODES["BLOCK_RAT"]: 'RAT tests',
ERROR_CODES["BLOCK_SCALA_STYLE"]: 'Scala style tests',
ERROR_CODES["BLOCK_JAVA_STYLE"]: 'Java style tests',
ERROR_CODES["BLOCK_PYTHON_STYLE"]: 'Python style tests',
ERROR_CODES["BLOCK_R_STYLE"]: 'R style tests',
ERROR_CODES["BLOCK_DOCUMENTATION"]: 'to generate documentation',
ERROR_CODES["BLOCK_BUILD"]: 'to build',
ERROR_CODES["BLOCK_BUILD_TESTS"]: 'build dependency tests',
ERROR_CODES["BLOCK_MIMA"]: 'MiMa tests',
ERROR_CODES["BLOCK_SPARK_UNIT_TESTS"]: 'Spark unit tests',
ERROR_CODES["BLOCK_PYSPARK_UNIT_TESTS"]: 'PySpark unit tests',
ERROR_CODES["BLOCK_PYSPARK_PIP_TESTS"]: 'PySpark pip packaging tests',
ERROR_CODES["BLOCK_SPARKR_UNIT_TESTS"]: 'SparkR unit tests',
ERROR_CODES["BLOCK_TIMEOUT"]: 'from timeout after a configured wait of \`%s\`' % (
tests_timeout)
}
if test_result_code == 0:
test_result_note = ' * This patch passes all tests.'
else:
note = failure_note_by_errcode.get(
test_result_code, "due to an unknown error code, %s" % test_result_code)
test_result_note = ' * This patch **fails %s**.' % note
return [test_result_code, test_result_note]
def main():
# Important Environment Variables
# ---
# $ghprbActualCommit
# This is the hash of the most recent commit in the PR.
# The merge-base of this and master is the commit from which the PR was branched.
# $sha1
# If the patch merges cleanly, this is a reference to the merge commit hash
# (e.g. "origin/pr/2606/merge").
# If the patch does not merge cleanly, it is equal to $ghprbActualCommit.
# The merge-base of this and master in the case of a clean merge is the most recent commit
# against master.
ghprb_pull_id = os.environ["ghprbPullId"]
ghprb_actual_commit = os.environ["ghprbActualCommit"]
ghprb_pull_title = os.environ["ghprbPullTitle"]
sha1 = os.environ["sha1"]
# Marks this build as a pull request build.
os.environ["AMP_JENKINS_PRB"] = "true"
# Switch to a Maven-based build if the PR title contains "test-maven":
if "test-maven" in ghprb_pull_title:
os.environ["AMPLAB_JENKINS_BUILD_TOOL"] = "maven"
# Switch the Hadoop profile based on the PR title:
if "test-hadoop2.6" in ghprb_pull_title:
os.environ["AMPLAB_JENKINS_BUILD_PROFILE"] = "hadoop2.6"
if "test-hadoop2.7" in ghprb_pull_title:
os.environ["AMPLAB_JENKINS_BUILD_PROFILE"] = "hadoop2.7"
build_display_name = os.environ["BUILD_DISPLAY_NAME"]
build_url = os.environ["BUILD_URL"]
commit_url = "https://github.com/apache/spark/commit/" + ghprb_actual_commit
# GitHub doesn't auto-link short hashes when submitted via the API, unfortunately. :(
short_commit_hash = ghprb_actual_commit[0:7]
# format: http://linux.die.net/man/1/timeout
# must be less than the timeout configured on Jenkins (currently 400m)
tests_timeout = "340m"
# Array to capture all test names to run on the pull request. These tests are represented
# by their file equivalents in the dev/tests/ directory.
#
# To write a PR test:
# * the file must reside within the dev/tests directory
# * be an executable bash script
# * accept three arguments on the command line, the first being the Github PR long commit
# hash, the second the Github SHA1 hash, and the final the current PR hash
# * and, lastly, return string output to be included in the pr message output that will
# be posted to Github
pr_tests = [
"pr_merge_ability",
"pr_public_classes"
]
# `bind_message_base` returns a function to generate messages for Github posting
github_message = functools.partial(pr_message,
build_display_name,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url)
# post start message
post_message_to_github(github_message('has started'), ghprb_pull_id)
pr_check_results = run_pr_checks(pr_tests, ghprb_actual_commit, sha1)
test_result_code, test_result_note = run_tests(tests_timeout)
# post end message
result_message = github_message('has finished')
result_message += '\n' + test_result_note + '\n'
result_message += '\n'.join(pr_check_results)
post_message_to_github(result_message, ghprb_pull_id)
sys.exit(test_result_code)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWS IAM Functionality"""
import datetime
import json
import os
from typing import TYPE_CHECKING, Optional, Tuple
from libcloudforensics import errors
from libcloudforensics import logging_utils
from libcloudforensics.providers.aws.internal import common
if TYPE_CHECKING:
# TYPE_CHECKING is always False at runtime, therefore it is safe to ignore
# the following cyclic import, as it it only used for type hints
from libcloudforensics.providers.aws.internal import account # pylint: disable=cyclic-import
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
IAM_POLICY_DIR = './iampolicies'
# Policy allowing an instance to image a volume
EBS_COPY_POLICY_DOC = 'ebs_copy_to_s3_policy.json'
# Policy doc to allow EC2 to assume the role. Necessary for instance profiles
EC2_ASSUME_ROLE_POLICY_DOC = 'ec2_assume_role_policy.json'
# Policy to deny all session tokens generated after a date
IAM_DENY_ALL_AFTER_TOKEN_ISSUE_DATE = 'revoke_old_sessions.json'
class IAM:
"""Class that represents AWS IAM services"""
def __init__(self,
aws_account: 'account.AWSAccount') -> None:
"""Initialize the AWS IAM client object.
Args:
aws_account (AWSAccount): An AWS account object.
"""
self.aws_account = aws_account
self.client = self.aws_account.ClientApi(common.IAM_SERVICE)
def CheckInstanceProfileExists(self, profile_name: str) -> bool:
"""Check if an instance role exists.
Args:
profile_name (str): Instance profile name.
Returns:
bool: True if the Instance Profile exists, false otherwise.
"""
try:
self.client.get_instance_profile(InstanceProfileName=profile_name)
return True
except self.client.exceptions.NoSuchEntityException:
return False
def CreatePolicy(self, name: str, policy_doc: str) -> Tuple[str, bool]:
"""Creates an IAM policy using the name and policy doc passed in.
If the policy exists already, return the Arn of the existing policy.
Args:
name (str): Name for the policy
policy_doc (str): IAM Policy document as a json string.
Returns:
Tuple[str, bool]: A tuple containing:
str: The policy Amazon Resource Name (ARN).
bool: True if the policy was created, False if it existed already.
Raises:
ResourceNotFoundError: If the policy failed creation due to already
existing, but then could not be found
"""
logger.info('Creating IAM policy {0:s}'.format(name))
try:
policy = self.client.create_policy(
PolicyName=name, PolicyDocument=policy_doc)
return str(policy['Policy']['Arn']), True
except self.client.exceptions.EntityAlreadyExistsException as exception:
logger.info('Policy exists already, using existing')
policies = self.client.list_policies(Scope='Local')
while True:
for policy in policies['Policies']:
if policy['PolicyName'] == name:
return str(policy['Arn']), False
if not policies['IsTruncated']:
# If we reached here it means the policy was deleted between the
# creation failure and lookup
# pylint: disable=line-too-long
raise errors.ResourceNotFoundError('Could not locate policy with name {0:s} after creation failure due to EntityAlreadyExistsException'
# pylint: enable=line-too-long
.format(name), __name__) from exception
policies = self.client.list_policies(
Scope='Local', Marker=policies['Marker'])
def DeletePolicy(self, arn: str) -> None:
"""Deletes the IAM policy with the given name.
Args:
name (str): The ARN of the policy to delete.
"""
logger.info('Deleting IAM policy {0:s}'.format(arn))
try:
self.client.delete_policy(PolicyArn=arn)
except self.client.exceptions.NoSuchEntityException:
logger.info('IAM policy {0:s} did not exist'.format(arn))
def CreateInstanceProfile(self, name: str) -> Tuple[str, bool]:
"""Create an EC2 instance Profile. If the profile exists already, returns
the Arn of the existing.
Args:
name (str): The name of the instance profile.
Returns:
Tuple[str, bool]: A tuple containing:
str: The instance profile Amazon Resource Name (ARN).
bool: True if the instance profile was created, False if it existed
already.
Raises:
ResourceNotFoundError: If the profile failed creation due to already
existing, but then could not be found
"""
logger.info('Creating IAM Instance Profile {0:s}'.format(name))
try:
profile = self.client.create_instance_profile(InstanceProfileName=name)
return str(profile['InstanceProfile']['Arn']), True
except self.client.exceptions.EntityAlreadyExistsException as exception:
logger.info('Instance Profile exists already, using existing')
profiles = self.client.list_instance_profiles()
while True:
for profile in profiles['InstanceProfiles']:
if profile['InstanceProfileName'] == name:
return str(profile['Arn']), False
if not profiles['IsTruncated']:
# If we reached here it means the profile was deleted between the
# creation failure and lookup
# pylint: disable=line-too-long
raise errors.ResourceNotFoundError('Could not locate instance profile with name {0:s} after creation failure due to EntityAlreadyExistsException'
# pylint: enable=line-too-long
.format(name), __name__) from exception
profiles = self.client.list_instance_profiles(Marker=profiles['Marker'])
def DeleteInstanceProfile(self, profile_name: str) -> None:
"""Deletes an instance profile.
Args:
profile_name (str): The name of the instance profile to delete.
"""
logger.info('Deleting instance profile {0:s}'.format(profile_name))
try:
self.client.delete_instance_profile(InstanceProfileName=profile_name)
except self.client.exceptions.NoSuchEntityException:
logger.info('IAM role {0:s} did not exist'.format(profile_name))
def CreateRole(self, name: str, assume_role_policy_doc: str) \
-> Tuple[str, bool]:
"""Create an AWS IAM role. If it exists, return the existing.
Args;
name (str): The name of the role.
assume_role_policy_doc (str): Assume Role policy doc.
Returns:
Tuple[str, bool]: A tuple
str: The Arn of the role.
bool: True if the role was created, false if it existed already.
Raises:
ResourceNotFoundError: If the role failed creation due to already
existing, but then could not be found
"""
logger.info('Creating IAM Role {0:s}'.format(name))
try:
role = self.client.create_role(RoleName=name,
AssumeRolePolicyDocument=assume_role_policy_doc)
return str(role['Role']['Arn']), True
except self.client.exceptions.EntityAlreadyExistsException as exception:
logger.info('Role exists already, using existing')
roles = self.client.list_roles()
while True:
for role in roles['Roles']:
if role['RoleName'] == name:
return str(role['Arn']), False
if not roles['IsTruncated']:
# If we reached here it means the role was deleted between the
# creation failure and lookup
# pylint: disable=line-too-long
raise errors.ResourceNotFoundError('Could not locate role with name {0:s} after creation failure due to EntityAlreadyExistsException'
# pylint: enable=line-too-long
.format(name), __name__) from exception
roles = self.client.list_roles(Marker=roles['Marker'])
def DeleteRole(self, role_name: str) -> None:
"""Delete an IAM role.
Args:
role_name (str): The name of the role to delete.
"""
logger.info('Deleting IAM role {0:s}'.format(role_name))
try:
self.client.delete_role(RoleName=role_name)
except self.client.exceptions.NoSuchEntityException:
logger.info('IAM role {0:s} did not exist'.format(role_name))
def AttachPolicyToRole(self, policy_arn: str, role_name: str) -> None:
"""Attaches an IAM policy to an IAM role.
Args:
policy_arn (str): The Policy Arn.
role_name (str): The Role Name.
"""
logger.info('Attaching policy {0:s} to role {1:s}'
.format(policy_arn, role_name))
try:
self.client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn)
except Exception as e:
raise errors.ResourceNotFoundError(
'Attaching policy {0:s} to role {1:s} failed'
.format(policy_arn, role_name), __name__) from e
def DetachPolicyFromRole(self, policy_arn: str, role_name :str) -> None:
"""Detach a policy from a role.
Args:
policy_arn (str): The Arn of the policy to remove.
role_name (str): The name of the role.
"""
logger.info('Detaching policy {0:s} from role {1:s}'
.format(policy_arn, role_name))
try:
self.client.detach_role_policy(
RoleName=role_name, PolicyArn=policy_arn)
except self.client.exceptions.NoSuchEntityException:
pass
# It doesn't matter if this fails.
def AttachInstanceProfileToRole(self,
instance_profile_name: str,
role_name: str) -> None:
"""Attach a role to an instance profile.
Args:
instance_profile_name: The name fo the instance profile.
role_name: The role name.
"""
logger.info('Attaching role {0:s} to instance profile {1:s}'
.format(role_name, instance_profile_name))
try:
self.client.add_role_to_instance_profile(
InstanceProfileName=instance_profile_name, RoleName=role_name)
except self.client.exceptions.LimitExceededException:
# pylint: disable=line-too-long
logger.info('Instance profile {0:s} already has a role attached. Proceeding on assumption this is the correct attachment'
# pylint: enable=line-too-long
.format(instance_profile_name))
def DetachInstanceProfileFromRole(self, role_name: str, profile_name: str) \
-> None:
"""Detach a role from an instance profile.
Args:
role_name (str): The name of the role.
profile_name (str): The name of the instance profile.
"""
logger.info('Detaching role {0:s} from instance profile {1:s}'
.format(role_name, profile_name))
try:
self.client.remove_role_from_instance_profile(
InstanceProfileName=profile_name, RoleName=role_name)
except self.client.exceptions.NoSuchEntityException:
pass
# It doesn't matter if this fails.
def RevokeOldSessionsForRole(self, role_name: str) -> None:
"""Revoke old session tokens for a role.
This is achieved by adding an inline policy to the role, Deny *:* on the
condition of TokenIssueTime.
Args:
role_name (str): The role name to act on.
"""
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")
policy = json.loads(ReadPolicyDoc(IAM_DENY_ALL_AFTER_TOKEN_ISSUE_DATE))
policy['Statement'][0]['Condition']['DateLessThan']['aws:TokenIssueTime']\
= now
policy = json.dumps(policy)
try:
self.client.put_role_policy(
RoleName=role_name,
PolicyName='RevokeOldSessions',
PolicyDocument=policy
)
except self.client.exceptions.ClientError as exception:
raise errors.ResourceNotFoundError(
'Could not add inline policy to IAM role {0:s}: {1!s}'.format(
role_name, exception), __name__) from exception
def ReadPolicyDoc(filename: str) -> str:
"""Read and return the IAM policy doc at filename.
Args:
filename (str): the name of the policy file in the iampolicies directory.
Returns:
str: The policy doc.
Raises:
OSError: If the policy file cannot be opened, read or closed.
"""
try:
policy_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), IAM_POLICY_DIR, filename)
with open(policy_path, encoding='utf-8') as policy_doc:
return policy_doc.read()
except OSError as exception:
raise OSError(
'Could not open/read/close the policy doc {0:s}: {1:s}'.format(
policy_path, str(exception))) from exception
|
|
################################################################################
#
# Copyright 2015 Crown copyright (c)
# Land Information New Zealand and the New Zealand Government.
# All rights reserved
#
# This program is released under the terms of the 3 clause BSD license. See the
# LICENSE file for more information.
#
################################################################################
from datetime import datetime as DT
#from functools import wraps
import os
import sys
import re
import logging
import threading
from AimsApi import AimsApi
from AimsUtility import FeedRef,ActionType,ApprovalType,GroupActionType,GroupApprovalType,UserActionType,FeatureType,FeedType,SupplementalHack
from AimsUtility import AimsException
from Const import ENABLE_ENTITY_EVALUATION,MERGE_RESPONSE,MERGE_EXCLUDE,MAX_FEATURE_COUNT
from Address import Entity, EntityValidation, EntityAddress
from AimsLogging import Logger
from FeatureFactory import FeatureFactory
from Observable import Observable
aimslog = None
class DataUpdaterSelectionException(AimsException):pass
class DataUpdater(Observable):
'''Mantenence thread comtrolling data updates and api interaction.
Instantiates an amisapi instance with wrappers for initialisation of local data store
and change/resolution feed updating
'''
#et = FeatureType.ADDRESS
#ft = FeedType.FEATURES
address = None
pno = 0
changeId = 0
global aimslog
aimslog = Logger.setup()
getfeat = None
def __init__(self,params,queue):
'''DataUpdater base initialiser.
@param params: List of configuration parameters
@type params: List<?>
@param queue: Response queue
@type queues: Queue.Queue
'''
super(DataUpdater,self).__init__()
self.ref,self.conf,self.factory = params
self.queue = queue
#self._stop = threading.Event()
self.api = AimsApi(self.conf)
def setup(self,etft,sw,ne,pno):
'''Parameter setup.
@param etft: Feed/Feature identifier
@type etft: FeedRef
@param sw: South-West corner, coordinate value pair
@type sw: List<Double>{2}
@param ne: North-East corner, coordinate value pair
@type ne: List<Double>{2}
@param pno: Feed page number
@type pno: Integer
'''
self.etft = etft
self.sw,self.ne = sw,ne
self.pno = pno
def run(self):
'''Main updater run method fetching single page of addresses from API'''
aimslog.info('GET.{} {} - Page{}'.format(self.ref,self.etft,self.pno))
featlist = []
#for page in self.api.getOnePage(self.etft,self.sw,self.ne,self.pno):
# featlist.append(self.processPage(page,self.etft))
ce,pages = self.api.getOnePage(self.etft,self.sw,self.ne,self.pno)
if any(ce.values()): aimslog.error('Single-page request failure {}'.format(ce))
if pages.has_key('entities'):
for page in pages['entities']:
featlist.append(self.processPage(page,self.etft))
else:
aimslog.error('Single-page response missing entities')
self.queue.put(featlist)
self.notify(self.ref)
def processPage(self,page,etft):
'''Process an individual page. If page is resolution type optionally re-query at individual level
@param page: Processed results from pno request
@type page: Dict<?>
@param etft: Feed/Feature identifier
@type etft: FeedRef
'''
if etft.ft == FeedType.RESOLUTIONFEED and ENABLE_ENTITY_EVALUATION:
cid = self.cid(page)
ce,feat = self.api.getOneFeature(etft,cid)
if any(ce.values()): aimslog.error('Single-feature request failure {}'.format(ce))
if feat == {u'class': [u'error']}:
#if the pno request returns the not-supposed-to-happen error, it gets special treatment
aimslog.error('Invalid API response {}'.format(feat))
#return self.factory.get(model=pno['properties'])
else:
return self._processEntity(feat,cid,etft)
else:
#just return the main feedlevel address objects
return self.factory.get(model=page['properties'])
def _processEntity(self,feat,cid,etft):
'''Identify and select group, address or entity processing for a reolutionfeed feature
@param feat: dict representation of feature before object processing
@type feat: Dict
@param cid: Change ID or group change ID
@type cid: Integer
@param etft: Feed/Feature identifier
@type etft: FeedRef
@return: Instantiated feature object
'''
if feat['class'][0] == 'validation':
return self._processValidationEntity(feat)
#e = EntityValidation.getInstance(feat)# self.getEntityInstance()
#-------------------------------
# Resolution Group
elif feat['class'][0] == 'resolutiongroup':
return self._processResolutionGroup(feat,cid,etft)
# Address List
elif feat['class'][0] == 'addressresolution':
return self._processAddressResolution(feat)
#--------------------------------
# Simple Entity object
else:
return self._processSimpleEntity(self.factory.get,feat)
def _processValidationEntity(self,feat):
'''Wraps call to validation entity instantiator
@param feat: dict representation of feature before object processing
@type feat: Dict
@return: Instantiated validation Entity
'''
return EntityValidation.getInstance(feat)
def _processAddressEntity(self,feat):
'''Processes feature data into address object
@param feat: dict representation of feature before object processing
@type feat: Dict
@return: Instantiated Address entity
'''
#return EntityAddress.getInstance(feat)
return self._processSimpleEntity(FeatureFactory.getInstance(FeedRef((FeatureType.ADDRESS,FeedType.RESOLUTIONFEED))).get,feat)
def _processSimpleEntity(self,fact,feat):
'''Default processor for generic entities but the same as address resolution processor (below).
@param fact: Link to factory, object instantiation method
@type fact: <Function>
@param feat: dict representation of feature before object processing
@type feat: Dict
@return: Instantiated Address entity
'''
featurelist = []
a = fact(model=feat['properties'])
if feat.has_key('entities'):
for e in feat['entities']:
featurelist.append(self._populateEntity(e))
a._setEntities(featurelist)
return a
def _processAddressResolution(self,feat):
'''Processes entries in the addressresolution entities list
@param feat: dict representation of feature before object processing
@type feat: Dict
@return: Instantiated Address entity
'''
featurelist = []
a = self.factory.get(model=feat['properties'])
for e in feat['entities']:
featurelist.append(self._populateEntity(e))
a._setEntities(featurelist)
return a
def _processResolutionGroup(self,feat,cid,etft):
'''Processes the res-address objects in a res-group. Subsequently populates the sub entities as feature-addresses.
@param feat: dict representation of feature before object processing
@type feat: Dict
@param cid: Change ID or group change ID
@type cid: Integer
@param etft: Feed/Feature identifier
@type etft: FeedRef
@return: Instantiated feature object
'''
featurelist = []
g = self.factory.get(model=feat['properties'])#group
#HACK subst cid for cid+count string
ce,feat2 = self.api.getOneFeature(etft,'{}/address?count={}'.format(cid,MAX_FEATURE_COUNT))#group entity/adr list
if any(ce.values()): aimslog.error('Single-feature request failure {}'.format(ce))
etft2 = FeedRef((FeatureType.ADDRESS,FeedType.RESOLUTIONFEED))
factory2 = FeatureFactory.getInstance(etft2)
for f in feat2['entities']:
a = factory2.get(model=f['properties'])
elist2 = []
for e in f['entities']:
elist2.append(self._populateEntity(e))
a._setEntities(elist2)
featurelist.append(a)
g._setEntities(featurelist)
return g
def _populateEntity(self,ent):
'''Selects type and instantiates appropriate entity object.
@param ent: dict representation of feature before object processing
@type ent: Dict
'''
if ent['class'][0] == 'validation':
return self._processValidationEntity(ent)
elif ent['class'][0] == 'address':
###res factory might work here instead
#etft3 = FeedRef((FeatureType.ADDRESS,FeedType.FEATURES))
#factory3 = FeatureFactory.getInstance(etft3)
#return factory3.get(model=e['properties'])
return self._processAddressEntity(ent)
else:
return Entity.getInstance(ent)
@staticmethod
def getInstance(etft):
'''Based on the provided FeedRef this getInstance returns a group,address or user updater object
@param etft: Feed/Feature identifier
@type etft: FeedRef
'''
if etft.et == FeatureType.GROUPS: return DataUpdaterGroup
elif etft.et == FeatureType.ADDRESS: return DataUpdaterAddress
elif etft.et == FeatureType.USERS: return DataUpdaterUser
else: raise DataUpdaterSelectionException('Select Address,Groups or Users')
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def close(self):
aimslog.info('Queue {} stopped'.format(self.queue.qsize()))
self.queue.task_done()
#executed by subclass
def cid(self,_): pass
#---------------------------------------------------------------
#simple subclasses to assign getaddress/getgroup function
class DataUpdaterAddress(DataUpdater):
'''Dataupdater subclass for Address objects'''
def __init__(self,params,queue):
'''Initialises Address DataUpdater
@param params: List of configuration parameters
@type params: List<?>
@param queue: Response queue
@type queues: Queue.Queue
'''
super(DataUpdaterAddress,self).__init__(params,queue)
def cid(self,f):
return f['properties']['changeId']
# def getEntityInstance(self,ref):
# return EntityValidation(ref)
class DataUpdaterGroup(DataUpdater):
'''Dataupdater subclass for Group objects'''
def __init__(self,params,queue):
'''Initialises Group DataUpdater
@param params: List of configuration parameters
@type params: List<?>
@param queue: Response queue
@type queues: Queue.Queue
'''
super(DataUpdaterGroup,self).__init__(params,queue)
def cid(self,f):
return f['properties']['changeGroupId']
# def getEntityInstance(self,ref):
# return EntityAddress(ref)
class DataUpdaterUser(DataUpdater):
'''Dataupdater subclass for User objects'''
def __init__(self,params,queue):
'''Initialises User DataUpdater
@param params: List of configuration parameters
@type params: List<?>
@param queue: Response queue
@type queues: Queue.Queue
'''
super(DataUpdaterUser,self).__init__(params,queue)
#TODO Consolidate group/address + action/approve subclasses. might be enough variation to retain seperate classes
#NOTES variables incl; oft=FF/RF,id=addressId/changeId/groupChangeId, action=approve/action/groupaction
class DataUpdaterDRC(DataUpdater):
'''Super class for DRC DataUpdater classes'''
#instantiated in subclass
oft,etft,identifier,payload,actiontype,action,agu,at,build,requestId = 10*(None,)
def version(self):
'''Quick self checker for existing version number to save additional request
I{This functionality is under development in the API}
'''
return self.agu._version if hasattr(self.agu,'_version') and self.agu._version else self._version()
def _version(self):
'''Function to read AIMS version value from single Feature pages
@return: Integer. Feature version number
'''
_,cid = SupplementalHack.strip(self.identifier)
ce,jc = self.api.getOneFeature(FeedRef((self.etft.et,self.oft)),cid)
if any(ce.values()): aimslog.error('Single-feature request failure {}'.format(ce))
if jc['properties'].has_key('version'):
return jc['properties']['version']
else:
#WORKAROUND
aimslog.warn('No version number available for address/groupId={}'.format(self.identifier))
return 1
def run(self):
'''One pass run method to intercat with APi and return sincel page results
- Call appropriate API method
- Parse response Entities and attach to feature
- Attach error messages and request ID
- Merge response object with request object
- Put featre on output queue
- Notify listeners
'''
aimslog.info('DUr.{} {} - AGU{}'.format(self.ref,self.actiontype.reverse[self.at],self.agu))
payload = self.factory.convert(self.agu,self.at)
err,resp = self.action(self.at,payload,self.identifier)
featurelist = []
feature = self.factory.get(model=resp['properties'])
if hasattr(resp,'entities'):
for e in resp['entities']:
featurelist.append(self._populateEntity(e))
feature._setEntities(featurelist)
#feature = self.processPage(feature,self.etft)
#print 'feature',feature
if err: feature.setErrors(err)
if self.requestId: feature.setRequestId(self.requestId)
if MERGE_RESPONSE:
aimslog.info('Merge req/res for {}'.format(self.agu))
self.agu.setVersion(None)
self.agu.merge(feature,MERGE_EXCLUDE)
self.queue.put(self.agu)
else: self.queue.put(feature)
self.notify(self.ref)
class DataUpdaterAction(DataUpdaterDRC):
'''DataUpdater class for Address Action requests on the changefeed'''
#et = FeatureType.ADDRESS
#ft = FeedType.CHANGEFEED
oft = FeedType.FEATURES
def setup(self,etft,aat,address,_):
'''Set Address Action specific parameters
@param etft: Validation Entity feedref
@type etft: FeedRef
@param aat: Action type for this address
@type aat: ActionType
@param address: Address object detailing action changes
@type address: Address
'''
self.etft = etft
self.at = aat
self.agu = address
self.identifier = self.agu.getAddressId()
self.requestId = self.agu.getRequestId()
if aat != ActionType.ADD: self.agu.setVersion(self.version())
#run actions
self.actiontype = ActionType
self.action = self.api.addressAction
class DataUpdaterApproval(DataUpdaterDRC):
'''DataUpdater class for Address Approval requests on the resolutionfeed'''
#et = FeatureType.ADDRESS
#ft = FeedType.RESOLUTIONFEED
oft = FeedType.RESOLUTIONFEED
def setup(self,etft,aat,address,_):
'''Set Address Approval specific parameters
@param etft: Validation Entity feedref
@type etft: FeedRef
@param aat: Approval type for this address
@type aat: ApprovalType
@param address: Address object detailing approval action
@type address: Address
'''
self.etft = etft
self.at = aat
self.agu = address
self.identifier = self.agu.getChangeId()
self.requestId = self.agu.getRequestId()
self.agu.setVersion(self.version())
#run actions
self.actiontype = ApprovalType
self.action = self.api.addressApprove
class DataUpdaterGroupAction(DataUpdaterDRC):
'''DataUpdater class for Group Action requests on the changefeed'''
#et = FeatureType.ADDRESS
#ft = FeedType.CHANGEFEED
oft = FeedType.FEATURES
def setup(self,etft,gat,group,_):
'''Set Group Action specific parameters
@param etft: Validation Entity feedref
@type etft: FeedRef
@param gat: Group action type for this address
@type gat: GroupActionType
@param address: Address object detailing action changes
@type address: Address
'''
self.etft = etft
self.at = gat
self.agu = group
self.identifier = self.agu.getChangeGroupId()
self.requestId = self.agu.getRequestId()
self.agu.setVersion(self.version())
#run actions
self.actiontype = GroupActionType
self.action = self.api.groupAction
class DataUpdaterGroupApproval(DataUpdaterDRC):
'''DataUpdater class for Group Approval requests on the resolutionfeed'''
#et = FeatureType.ADDRESS
#ft = FeedType.CHANGEFEED
oft = FeedType.RESOLUTIONFEED
def setup(self,etft,gat,group,_):
'''Set Group Approval specific parameters
@param etft: Validation Entity feedref
@type etft: FeedRef
@param gat: Group approval type for this address
@type gat: GroupApprovalType
@param address: Address object detailing approval action
@type address: Address
'''
self.etft = etft
self.at = gat
self.agu = group
self.identifier = self.agu.getChangeGroupId()
self.requestId = self.agu.getRequestId()
self.agu.setVersion(self.version())
#run actions
self.actiontype = GroupApprovalType
self.action = self.api.groupApprove
class DataUpdaterUserAction(DataUpdaterDRC):
'''DataUpdater class for User Action requests on the adminfeed'''
#et = FeatureType.ADDRESS
#ft = FeedType.CHANGEFEED
oft = FeedType.ADMIN
def setup(self,etft,uat,user,_):
'''Set User specific parameters
@param etft: Validation Entity feedref
@type etft: FeedRef
@param uat: User action type for this user
@type uat: UserActionType
@param user: User object detailing admin action
@type user: User
'''
self.etft = etft
self.at = uat
self.agu = user
self.identifier = self.agu.getUserId()
self.requestId = self.agu.getRequestId()
#self.agu.setVersion(self.version())
#run actions
self.actiontype = UserActionType
self.action = self.api.userAction
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import zip_longest
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import ctc_ops
from tensorflow.python.platform import test
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
class CTCGreedyDecoderTest(test.TestCase):
def _testCTCDecoder(self,
decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
expected_err_re=None,
**decoder_args):
inputs_t = [ops.convert_to_tensor(x) for x in inputs]
# convert inputs_t into a [max_time x batch_size x depth] tensor
# from a len time python list of [batch_size x depth] tensors
inputs_t = array_ops.stack(inputs_t)
with self.cached_session(use_gpu=False) as sess:
decoded_list, log_probability = decoder(
inputs_t, sequence_length=seq_lens, **decoder_args)
decoded_unwrapped = list(
flatten([(st.indices, st.values, st.dense_shape) for st in
decoded_list]))
if expected_err_re is None:
outputs = sess.run(decoded_unwrapped + [log_probability])
# Group outputs into (ix, vals, shape) tuples
output_sparse_tensors = list(grouper(outputs[:-1], 3))
output_log_probability = outputs[-1]
# Check the number of decoded outputs (top_paths) match
self.assertEqual(len(output_sparse_tensors), len(decode_truth))
# For each SparseTensor tuple, compare (ix, vals, shape)
for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth,
decoded_list):
self.assertAllEqual(out_st[0], truth_st[0]) # ix
self.assertAllEqual(out_st[1], truth_st[1]) # vals
self.assertAllEqual(out_st[2], truth_st[2]) # shape
# Compare the shapes of the components with the truth. The
# `None` elements are not known statically.
self.assertEqual([None, truth_st[0].shape[1]],
tf_st.indices.get_shape().as_list())
self.assertEqual([None], tf_st.values.get_shape().as_list())
self.assertShapeEqual(truth_st[2], tf_st.dense_shape)
# Make sure decoded probabilities match
self.assertAllClose(output_log_probability, log_prob_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run(decoded_unwrapped + [log_probability])
@test_util.run_deprecated_v1
def testCTCGreedyDecoder(self):
"""Test two batch entries - best path decoder."""
max_time_steps = 6
# depth == 4
seq_len_0 = 4
input_prob_matrix_0 = np.asarray(
[
[1.0, 0.0, 0.0, 0.0], # t=0
[0.0, 0.0, 0.4, 0.6], # t=1
[0.0, 0.0, 0.4, 0.6], # t=2
[0.0, 0.9, 0.1, 0.0], # t=3
[0.0, 0.0, 0.0, 0.0], # t=4 (ignored)
[0.0, 0.0, 0.0, 0.0]
], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
seq_len_1 = 5
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[
[0.1, 0.9, 0.0, 0.0], # t=0
[0.0, 0.9, 0.1, 0.0], # t=1
[0.0, 0.0, 0.1, 0.9], # t=2
[0.0, 0.9, 0.1, 0.1], # t=3
[0.9, 0.1, 0.0, 0.0], # t=4
[0.0, 0.0, 0.0, 0.0] # t=5 (ignored)
],
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
# len max_time_steps array of batch_size x depth matrices
inputs = np.array([
np.vstack(
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
for t in range(max_time_steps)
])
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32)
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
np.sum(-np.log([1.0, 0.6, 0.6, 0.9])),
np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9]))
], np.float32)[:, np.newaxis]
# decode_truth: one SparseTensor (ix, vals, shape)
decode_truth = [
(
np.array(
[
[0, 0], # batch 0, 2 outputs
[0, 1],
[1, 0], # batch 1, 3 outputs
[1, 1],
[1, 2]
],
dtype=np.int64),
np.array(
[
0, # batch 0, 2 values
1,
1, # batch 1, 3 values
1,
0
],
dtype=np.int64),
# shape is batch x max_decoded_length
np.array([2, 3], dtype=np.int64)),
]
# Test without defining blank_index
self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens,
log_prob_truth, decode_truth)
# Shift blank_index to be somewhere in the middle of inputs
blank_index = 2
inputs = np.concatenate(
(inputs[:, :, :blank_index], inputs[:, :, -1:], inputs[:, :,
blank_index:-1]),
axis=2)
# Test positive value in blank_index
self._testCTCDecoder(
ctc_ops.ctc_greedy_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
blank_index=2)
# Test negative value in blank_index
self._testCTCDecoder(
ctc_ops.ctc_greedy_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
blank_index=-2)
@test_util.run_deprecated_v1
def testCTCDecoderBeamSearch(self):
"""Test one batch, two beams - hibernating beam search."""
# max_time_steps == 8
depth = 6
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
[
[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
# Random entry added in at time=5
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]
],
dtype=np.float32)
# Add arbitrary offset - this is fine
input_prob_matrix_0 = input_prob_matrix_0 + 2.0
# len max_time_steps array of batch_size x depth matrices
inputs = ([
input_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0)
] # Pad to max_time_steps = 8
+ 2 * [np.zeros(
(1, depth), dtype=np.float32)])
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0], dtype=np.int32)
# batch_size length vector of log probabilities
log_prob_truth = np.array(
[
-5.811451, # output beam 0
-6.63339 # output beam 1
],
np.float32)[np.newaxis, :]
# decode_truth: two SparseTensors, (ix, values, shape)
decode_truth = [
# beam 0, batch 0, two outputs decoded
(np.array(
[[0, 0], [0, 1]], dtype=np.int64), np.array(
[1, 0], dtype=np.int64), np.array(
[1, 2], dtype=np.int64)),
# beam 1, batch 0, one output decoded
(np.array(
[[0, 0]], dtype=np.int64), np.array(
[1], dtype=np.int64), np.array(
[1, 1], dtype=np.int64)),
]
# Test correct decoding.
self._testCTCDecoder(
ctc_ops.ctc_beam_search_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
beam_width=2,
top_paths=2)
# Requesting more paths than the beam width allows.
with self.assertRaisesRegex(errors.InvalidArgumentError,
(".*requested more paths than the beam "
"width.*")):
self._testCTCDecoder(
ctc_ops.ctc_beam_search_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
beam_width=2,
top_paths=3)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import time
from datetime import datetime
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from desktop.conf import TIME_ZONE
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str, smart_unicode
from desktop.lib.rest.http_client import RestException
from desktop.lib.view_util import format_duration_in_millis
from desktop.log.access import access_warn
from desktop.models import Document, Document2
from hadoop.fs.hadoopfs import Hdfs
from liboozie.oozie_api import get_oozie
from liboozie.credentials import Credentials
from liboozie.submission2 import Submission
from oozie.conf import OOZIE_JOBS_COUNT, ENABLE_CRON_SCHEDULING, ENABLE_V2
from oozie.forms import RerunForm, ParameterForm, RerunCoordForm, RerunBundleForm, UpdateCoordinatorForm
from oozie.models import Workflow as OldWorkflow, Job, utc_datetime_format, Bundle, Coordinator, get_link, History as OldHistory
from oozie.models2 import History, Workflow, WORKFLOW_NODE_PROPERTIES
from oozie.settings import DJANGO_APPS
from oozie.utils import convert_to_server_timezone
def get_history():
if ENABLE_V2.get():
return History
else:
return OldHistory
def get_workflow():
if ENABLE_V2.get():
return Workflow
else:
return OldWorkflow
LOG = logging.getLogger(__name__)
"""
Permissions:
A Workflow/Coordinator/Bundle can:
* be accessed only by its owner or a superuser or by a user with 'dashboard_jobs_access' permissions
* be submitted/modified only by its owner or a superuser
Permissions checking happens by calling:
* check_job_access_permission()
* check_job_edition_permission()
"""
def _get_workflows(user):
return [{
'name': workflow.name,
'owner': workflow.owner.username,
'value': workflow.uuid,
'id': workflow.id
} for workflow in [d.content_object for d in Document.objects.get_docs(user, Document2, extra='workflow2')]
]
def manage_oozie_jobs(request, job_id, action):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage an Oozie job.'))
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
response = {'status': -1, 'data': ''}
try:
oozie_api = get_oozie(request.user)
params = None
if action == 'change':
pause_time_val = request.POST.get('pause_time')
if request.POST.get('clear_pause_time') == 'true':
pause_time_val = ''
end_time_val = request.POST.get('end_time')
if end_time_val:
end_time_val = convert_to_server_timezone(end_time_val, TIME_ZONE.get())
if pause_time_val:
pause_time_val = convert_to_server_timezone(pause_time_val, TIME_ZONE.get())
params = {'value': 'endtime=%s' % (end_time_val) + ';'
'pausetime=%s' % (pause_time_val) + ';'
'concurrency=%s' % (request.POST.get('concurrency'))}
elif action == 'ignore':
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'type': 'action',
'scope': ','.join(job.aggreate(request.POST.get('actions').split())),
}
response['data'] = oozie_api.job_control(job_id, action, parameters=params)
response['status'] = 0
if 'notification' in request.POST:
request.info(_(request.POST.get('notification')))
except RestException, ex:
ex_message = ex.message
if ex._headers.get('oozie-error-message'):
ex_message = ex._headers.get('oozie-error-message')
msg = "Error performing %s on Oozie job %s: %s." % (action, job_id, ex_message)
LOG.exception(msg)
response['data'] = _(msg)
return JsonResponse(response)
def bulk_manage_oozie_jobs(request):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage the Oozie jobs.'))
response = {'status': -1, 'data': ''}
if 'job_ids' in request.POST and 'action' in request.POST:
jobs = request.POST.get('job_ids').split()
response = {'totalRequests': len(jobs), 'totalErrors': 0, 'messages': ''}
oozie_api = get_oozie(request.user)
for job_id in jobs:
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
oozie_api.job_control(job_id, request.POST.get('action'))
except RestException, ex:
LOG.exception("Error performing bulk operation for job_id=%s", job_id)
response['totalErrors'] = response['totalErrors'] + 1
response['messages'] += str(ex)
return JsonResponse(response)
def show_oozie_error(view_func):
def decorate(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except RestException, ex:
LOG.exception("Error communicating with Oozie in %s", view_func.__name__)
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail) or 'Connection refused' in str(detail):
detail = _('The Oozie server is not running')
raise PopupException(_('An error occurred with Oozie.'), detail=detail)
return wraps(view_func)(decorate)
@show_oozie_error
def list_oozie_workflows(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
just_sla = request.GET.get('justsla') == 'true'
if request.GET.get('startcreatedtime'):
kwargs['filters'].extend([('startcreatedtime', request.GET.get('startcreatedtime'))])
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
wf_list = oozie_api.get_workflows(**kwargs)
json_jobs = wf_list.jobs
total_jobs = wf_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_job(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user, just_sla)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_workflows.mako', request, {
'user': request.user,
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_coordinators(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
co_list = oozie_api.get_coordinators(**kwargs)
json_jobs = co_list.jobs
total_jobs = co_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_coordinators.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
'enable_cron_scheduling': enable_cron_scheduling,
})
@show_oozie_error
def list_oozie_bundles(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
bundle_list = oozie_api.get_bundles(**kwargs)
json_jobs = bundle_list.jobs
total_jobs = bundle_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_bundles.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
if oozie_coordinator is not None:
setattr(oozie_workflow, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(oozie_workflow, 'oozie_bundle', oozie_bundle)
oozie_parent = oozie_workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
workflow_data = None
credentials = None
doc = None
hue_workflow = None
workflow_graph = 'MISSING' # default to prevent loading the graph tab for deleted workflows
full_node_list = None
if ENABLE_V2.get():
try:
# To update with the new History document model
hue_coord = get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: hue_coord.workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow: hue_workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow:
workflow_graph = ''
full_node_list = hue_workflow.nodes
workflow_id = hue_workflow.id
wid = {
'id': workflow_id
}
doc = Document2.objects.get(type='oozie-workflow2', **wid)
new_workflow = get_workflow()(document=doc)
workflow_data = new_workflow.get_data()
credentials = Credentials()
else:
# For workflows submitted from CLI or deleted in the editor
# Until better parsing in https://issues.cloudera.org/browse/HUE-2659
workflow_graph, full_node_list = OldWorkflow.gen_status_graph_from_xml(request.user, oozie_workflow)
except:
LOG.exception("Error generating full page for running workflow %s" % job_id)
else:
history = get_history().cross_reference_submission_history(request.user, job_id)
hue_coord = history and history.get_coordinator() or get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or (history and history.get_workflow()) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: Job.objects.can_read_or_exception(request, hue_coord.workflow.id)
if hue_workflow: Job.objects.can_read_or_exception(request, hue_workflow.id)
if hue_workflow:
workflow_graph = hue_workflow.gen_status_graph(oozie_workflow)
full_node_list = hue_workflow.node_list
else:
workflow_graph, full_node_list = get_workflow().gen_status_graph_from_xml(request.user, oozie_workflow)
parameters = oozie_workflow.conf_dict.copy()
for action in oozie_workflow.actions:
action.oozie_coordinator = oozie_coordinator
action.oozie_bundle = oozie_bundle
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(full_node_list),
'graph': workflow_graph,
'actions': massaged_workflow_actions_for_json(oozie_workflow.get_working_actions(), oozie_coordinator, oozie_bundle)
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_workflow.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_workflow.id,
'parent_id': oozie_workflow.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
return render('dashboard/list_oozie_workflow.mako', request, {
'oozie_workflow': oozie_workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
'oozie_slas': oozie_slas,
'hue_workflow': hue_workflow,
'hue_coord': hue_coord,
'parameters': parameters,
'has_job_edition_permission': has_job_edition_permission,
'workflow_graph': workflow_graph,
'layout_json': json.dumps(workflow_data['layout'], cls=JSONEncoderForHTML) if workflow_data else '',
'workflow_json': json.dumps(workflow_data['workflow'], cls=JSONEncoderForHTML) if workflow_data else '',
'credentials_json': json.dumps(credentials.credentials.keys(), cls=JSONEncoderForHTML) if credentials else '',
'workflow_properties_json': json.dumps(WORKFLOW_NODE_PROPERTIES, cls=JSONEncoderForHTML),
'doc1_id': doc.doc.get().id if doc else -1,
'subworkflows_json': json.dumps(_get_workflows(request.user), cls=JSONEncoderForHTML),
'can_edit_json': json.dumps(doc is None or doc.doc.get().is_editable(request.user))
})
@show_oozie_error
def list_oozie_coordinator(request, job_id):
kwargs = {'cnt': 50, 'filters': []}
kwargs['offset'] = request.GET.get('offset', 1)
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
oozie_coordinator = check_job_access_permission(request, job_id, **kwargs)
# Cross reference the submission history (if any)
coordinator = get_history().get_coordinator_from_config(oozie_coordinator.conf_dict)
try:
if not ENABLE_V2.get():
coordinator = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
LOG.exception("Ignoring error getting oozie job coordinator for job_id=%s", job_id)
oozie_bundle = None
if request.GET.get('bundle_job_id'):
try:
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
except:
LOG.exception("Ignoring error getting oozie bundle for job_id=%s", job_id)
if request.GET.get('format') == 'json':
actions = massaged_coordinator_actions_for_json(oozie_coordinator, oozie_bundle)
return_obj = {
'id': oozie_coordinator.id,
'status': oozie_coordinator.status,
'progress': oozie_coordinator.get_progress(),
'nextTime': format_time(oozie_coordinator.nextMaterializedTime),
'endTime': format_time(oozie_coordinator.endTime),
'actions': actions,
'total_actions': oozie_coordinator.total
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_coordinator.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_coordinator.id,
'parent_id': oozie_coordinator.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
update_coord_form = UpdateCoordinatorForm(oozie_coordinator=oozie_coordinator)
return render('dashboard/list_oozie_coordinator.mako', request, {
'oozie_coordinator': oozie_coordinator,
'oozie_slas': oozie_slas,
'coordinator': coordinator,
'oozie_bundle': oozie_bundle,
'has_job_edition_permission': has_job_edition_permission,
'enable_cron_scheduling': enable_cron_scheduling,
'update_coord_form': update_coord_form,
})
@show_oozie_error
def list_oozie_bundle(request, job_id):
oozie_bundle = check_job_access_permission(request, job_id)
# Cross reference the submission history (if any)
bundle = None
try:
if ENABLE_V2.get():
bundle = get_history().get_bundle_from_config(oozie_bundle.conf_dict)
else:
bundle = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
LOG.exception("Ignoring error getting oozie job bundle for job_id=%s", job_id)
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_bundle.id,
'status': oozie_bundle.status,
'progress': oozie_bundle.get_progress(),
'endTime': format_time(oozie_bundle.endTime),
'actions': massaged_bundle_actions_for_json(oozie_bundle)
}
return HttpResponse(json.dumps(return_obj).replace('\\\\', '\\'), content_type="application/json")
return render('dashboard/list_oozie_bundle.mako', request, {
'oozie_bundle': oozie_bundle,
'bundle': bundle,
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow_action(request, action):
try:
action = get_oozie(request.user).get_action(action)
workflow = check_job_access_permission(request, action.id.split('@')[0])
except RestException, ex:
msg = _("Error accessing Oozie action %s.") % (action,)
LOG.exception(msg)
raise PopupException(msg, detail=ex.message)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
workflow.oozie_coordinator = oozie_coordinator
workflow.oozie_bundle = oozie_bundle
oozie_parent = workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
return render('dashboard/list_oozie_workflow_action.mako', request, {
'action': action,
'workflow': workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
})
@show_oozie_error
def get_oozie_job_log(request, job_id):
oozie_api = get_oozie(request.user, api_version="v2")
check_job_access_permission(request, job_id)
kwargs = {'logfilter' : []}
if request.GET.get('format') == 'json':
if request.GET.get('recent'):
kwargs['logfilter'].extend([('recent', val) for val in request.GET.get('recent').split(':')])
if request.GET.get('limit'):
kwargs['logfilter'].extend([('limit', request.GET.get('limit'))])
if request.GET.get('loglevel'):
kwargs['logfilter'].extend([('loglevel', request.GET.get('loglevel'))])
if request.GET.get('text'):
kwargs['logfilter'].extend([('text', request.GET.get('text'))])
status_resp = oozie_api.get_job_status(job_id)
log = oozie_api.get_job_log(job_id, **kwargs)
return_obj = {
'id': job_id,
'status': status_resp['status'],
'log': log,
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
@show_oozie_error
def list_oozie_info(request):
api = get_oozie(request.user)
configuration = api.get_configuration()
oozie_status = api.get_oozie_status()
instrumentation = {}
metrics = {}
if 'org.apache.oozie.service.MetricsInstrumentationService' in [c.strip() for c in configuration.get('oozie.services.ext', '').split(',')]:
api2 = get_oozie(request.user, api_version="v2")
metrics = api2.get_metrics()
else:
instrumentation = api.get_instrumentation()
return render('dashboard/list_oozie_info.mako', request, {
'instrumentation': instrumentation,
'metrics': metrics,
'configuration': configuration,
'oozie_status': oozie_status,
})
@show_oozie_error
def list_oozie_sla(request):
oozie_api = get_oozie(request.user, api_version="v2")
if request.method == 'POST':
params = {}
job_name = request.POST.get('job_name')
if re.match('.*-oozie-oozi-[WCB]', job_name):
params['id'] = job_name
params['parent_id'] = job_name
else:
params['app_name'] = job_name
if 'useDates' in request.POST:
if request.POST.get('start'):
params['nominal_start'] = request.POST.get('start')
if request.POST.get('end'):
params['nominal_end'] = request.POST.get('end')
oozie_slas = oozie_api.get_oozie_slas(**params)
else:
oozie_slas = [] # or get latest?
if request.REQUEST.get('format') == 'json':
massaged_slas = []
for sla in oozie_slas:
massaged_slas.append(massaged_sla_for_json(sla, request))
return HttpResponse(json.dumps({'oozie_slas': massaged_slas}), content_type="text/json")
configuration = oozie_api.get_configuration()
show_slas_hint = 'org.apache.oozie.sla.service.SLAService' not in configuration.get('oozie.services.ext', '')
return render('dashboard/list_oozie_sla.mako', request, {
'oozie_slas': oozie_slas,
'show_slas_hint': show_slas_hint
})
def massaged_sla_for_json(sla, request):
massaged_sla = {
'slaStatus': sla['slaStatus'],
'id': sla['id'],
'appType': sla['appType'],
'appName': sla['appName'],
'appUrl': get_link(sla['id']),
'user': sla['user'],
'nominalTime': sla['nominalTime'],
'expectedStart': sla['expectedStart'],
'actualStart': sla['actualStart'],
'expectedEnd': sla['expectedEnd'],
'actualEnd': sla['actualEnd'],
'jobStatus': sla['jobStatus'],
'expectedDuration': sla['expectedDuration'],
'actualDuration': sla['actualDuration'],
'lastModified': sla['lastModified']
}
return massaged_sla
@show_oozie_error
def sync_coord_workflow(request, job_id):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
hue_coord = get_history().get_coordinator_from_config(job.conf_dict)
hue_wf = (hue_coord and hue_coord.workflow) or get_history().get_workflow_from_config(job.conf_dict)
wf_application_path = job.conf_dict.get('wf_application_path') and Hdfs.urlsplit(job.conf_dict['wf_application_path'])[2] or ''
coord_application_path = job.conf_dict.get('oozie.coord.application.path') and Hdfs.urlsplit(job.conf_dict['oozie.coord.application.path'])[2] or ''
properties = hue_coord and hue_coord.properties and dict([(param['name'], param['value']) for param in hue_coord.properties]) or None
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
# Update workflow params in coordinator
hue_coord.clear_workflow_params()
properties = dict([(param['name'], param['value']) for param in hue_coord.properties])
# Deploy WF XML
submission = Submission(user=request.user, job=hue_wf, fs=request.fs, jt=request.jt, properties=properties)
submission._create_file(wf_application_path, hue_wf.XML_FILE_NAME, hue_wf.to_xml(mapping=properties), do_as=True)
# Deploy Coordinator XML
job.conf_dict.update(mapping)
submission = Submission(user=request.user, job=hue_coord, fs=request.fs, jt=request.jt, properties=job.conf_dict, oozie_id=job.id)
submission._create_file(coord_application_path, hue_coord.XML_FILE_NAME, hue_coord.to_xml(mapping=job.conf_dict), do_as=True)
# Server picks up deployed Coordinator XML changes after running 'update' action
submission.update_coord()
request.info(_('Successfully updated Workflow definition'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
new_params = hue_wf and hue_wf.find_all_parameters() or []
new_params = dict([(param['name'], param['value']) for param in new_params])
# Set previous values
if properties:
new_params = dict([(key, properties[key]) if key in properties.keys() else (key, new_params[key]) for key, value in new_params.iteritems()])
initial_params = ParameterForm.get_initial_params(new_params)
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor2/submit_job_popup.mako', request, {
'params_form': params_form,
'name': _('Job'),
'header': _('Sync Workflow definition?'),
'action': reverse('oozie:sync_coord_workflow', kwargs={'job_id': job_id})
}, force_template=True).content
return JsonResponse(popup, safe=False)
@show_oozie_error
def rerun_oozie_job(request, job_id, app_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
oozie_workflow = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_workflow, request.user)
if request.method == 'POST':
rerun_form = RerunForm(request.POST, oozie_workflow=oozie_workflow)
params_form = ParametersFormSet(request.POST)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
if request.POST['rerun_form_choice'] == 'fail_nodes':
args['fail_nodes'] = 'true'
else:
args['skip_nodes'] = ','.join(rerun_form.cleaned_data['skip_nodes'])
args['deployment_dir'] = app_path
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_workflow(request, job_id, args, mapping)
request.info(_('Workflow re-running.'))
return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s %s' % (rerun_form.errors, params_form.errors)))
else:
rerun_form = RerunForm(oozie_workflow=oozie_workflow)
initial_params = ParameterForm.get_initial_params(oozie_workflow.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_job_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_job', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_workflow(request, oozie_id, run_args, mapping):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, properties=mapping, oozie_id=oozie_id)
job_id = submission.rerun(**run_args)
return job_id
except RestException, ex:
msg = _("Error re-running workflow %s.") % (oozie_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_coordinator(request, job_id, app_path):
oozie_coordinator = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_coordinator, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunCoordForm(request.POST, oozie_coordinator=oozie_coordinator)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'type': 'action',
'scope': ','.join(oozie_coordinator.aggreate(rerun_form.cleaned_data['actions'])),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_coordinator(request, job_id, args, params, properties)
request.info(_('Coordinator re-running.'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s') % smart_unicode(rerun_form.errors))
return list_oozie_coordinator(request, job_id)
else:
rerun_form = RerunCoordForm(oozie_coordinator=oozie_coordinator)
initial_params = ParameterForm.get_initial_params(oozie_coordinator.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_coord_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_coord', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_coordinator(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
job_id = submission.rerun_coord(params=params, **args)
return job_id
except RestException, ex:
msg = _("Error re-running coordinator %s.") % (oozie_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_bundle(request, job_id, app_path):
oozie_bundle = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_bundle, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunBundleForm(request.POST, oozie_bundle=oozie_bundle)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'coord-scope': ','.join(rerun_form.cleaned_data['coordinators']),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
if rerun_form.cleaned_data['start'] and rerun_form.cleaned_data['end']:
date = {
'date-scope':
'%(start)s::%(end)s' % {
'start': utc_datetime_format(rerun_form.cleaned_data['start']),
'end': utc_datetime_format(rerun_form.cleaned_data['end'])
}
}
params.update(date)
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_bundle(request, job_id, args, params, properties)
request.info(_('Bundle re-running.'))
return redirect(reverse('oozie:list_oozie_bundle', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % (rerun_form.errors,)))
return list_oozie_bundle(request, job_id)
else:
rerun_form = RerunBundleForm(oozie_bundle=oozie_bundle)
initial_params = ParameterForm.get_initial_params(oozie_bundle.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_bundle_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_bundle', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_bundle(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
job_id = submission.rerun_bundle(params=params, **args)
return job_id
except RestException, ex:
msg = _("Error re-running bundle %s.") % (oozie_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers.get('oozie-error-message', ex))
def submit_external_job(request, application_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
mapping['dryrun'] = request.POST.get('dryrun_checkbox') == 'on'
application_name = os.path.basename(application_path)
application_class = Bundle if application_name == 'bundle.xml' else Coordinator if application_name == 'coordinator.xml' else get_workflow()
mapping[application_class.get_application_path_key()] = application_path
try:
submission = Submission(request.user, fs=request.fs, jt=request.jt, properties=mapping)
job_id = submission.run(application_path)
except RestException, ex:
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail):
detail = '%s: %s' % (_('The Oozie server is not running'), detail)
LOG.exception(smart_str(detail))
raise PopupException(_("Error submitting job %s") % (application_path,), detail=detail)
request.info(_('Oozie job submitted'))
view = 'list_oozie_bundle' if application_name == 'bundle.xml' else 'list_oozie_coordinator' if application_name == 'coordinator.xml' else 'list_oozie_workflow'
return redirect(reverse('oozie:%s' % view, kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
parameters = Submission(request.user, fs=request.fs, jt=request.jt).get_external_parameters(application_path)
initial_params = ParameterForm.get_initial_params(parameters)
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor/submit_job_popup.mako', request, {
'params_form': params_form,
'name': _('Job'),
'action': reverse('oozie:submit_external_job', kwargs={'application_path': application_path}),
'show_dryrun': os.path.basename(application_path) != 'bundle.xml'
}, force_template=True).content
return JsonResponse(popup, safe=False)
def massaged_workflow_actions_for_json(workflow_actions, oozie_coordinator, oozie_bundle):
actions = []
for action in workflow_actions:
if oozie_coordinator is not None:
setattr(action, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(action, 'oozie_bundle', oozie_bundle)
massaged_action = {
'id': action.id,
'log': action.get_absolute_log_url(),
'url': action.get_absolute_url(),
'name': action.name,
'type': action.type,
'status': action.status,
'externalIdUrl': action.get_external_id_url(),
'externalId': action.externalId,
'startTime': format_time(action.startTime),
'endTime': format_time(action.endTime),
'retries': action.retries,
'errorCode': action.errorCode,
'errorMessage': action.errorMessage,
'transition': action.transition,
'data': action.data,
}
actions.append(massaged_action)
return actions
def massaged_coordinator_actions_for_json(coordinator, oozie_bundle):
coordinator_id = coordinator.id
coordinator_actions = coordinator.get_working_actions()
actions = []
related_job_ids = []
related_job_ids.append('coordinator_job_id=%s' % coordinator_id)
if oozie_bundle is not None:
related_job_ids.append('bundle_job_id=%s' %oozie_bundle.id)
for action in coordinator_actions:
massaged_action = {
'id': action.id,
'url': action.externalId and reverse('oozie:list_oozie_workflow', kwargs={'job_id': action.externalId}) + '?%s' % '&'.join(related_job_ids) or '',
'number': action.actionNumber,
'type': action.type,
'status': action.status,
'externalId': action.externalId or '-',
'externalIdUrl': action.externalId and reverse('oozie:list_oozie_workflow_action', kwargs={'action': action.externalId}) or '',
'nominalTime': format_time(action.nominalTime),
'title': action.title,
'createdTime': format_time(action.createdTime),
'lastModifiedTime': format_time(action.lastModifiedTime),
'errorCode': action.errorCode,
'errorMessage': action.errorMessage,
'missingDependencies': action.missingDependencies
}
actions.append(massaged_action)
# Sorting for Oozie < 4.1 backward compatibility
actions.sort(key=lambda k: k['number'], reverse=True)
return actions
def massaged_bundle_actions_for_json(bundle):
bundle_actions = bundle.get_working_actions()
actions = []
for action in bundle_actions:
massaged_action = {
'id': action.coordJobId,
'url': action.coordJobId and reverse('oozie:list_oozie_coordinator', kwargs={'job_id': action.coordJobId}) + '?bundle_job_id=%s' % bundle.id or '',
'name': action.coordJobName,
'type': action.type,
'status': action.status,
'externalId': action.coordExternalId or '-',
'frequency': action.frequency,
'timeUnit': action.timeUnit,
'nextMaterializedTime': action.nextMaterializedTime,
'concurrency': action.concurrency,
'pauseTime': action.pauseTime,
'user': action.user,
'acl': action.acl,
'timeOut': action.timeOut,
'coordJobPath': action.coordJobPath,
'executionPolicy': action.executionPolicy,
'startTime': action.startTime,
'endTime': action.endTime,
'lastAction': action.lastAction
}
actions.insert(0, massaged_action)
return actions
def format_time(st_time):
if st_time is None:
return '-'
elif type(st_time) == time.struct_time:
return time.strftime("%a, %d %b %Y %H:%M:%S", st_time)
else:
return st_time
def catch_unicode_time(u_time):
if type(u_time) == time.struct_time:
return u_time
else:
return datetime.timetuple(datetime.strptime(u_time, '%a, %d %b %Y %H:%M:%S %Z'))
def massaged_oozie_jobs_for_json(oozie_jobs, user, just_sla=False):
jobs = []
for job in oozie_jobs:
if not just_sla or (just_sla and job.has_sla) and job.appName != 'pig-app-hue-script':
last_modified_time_millis = hasattr(job, 'lastModTime') and job.lastModTime and (time.time() - time.mktime(job.lastModTime)) * 1000 or 0
duration_millis = job.endTime and job.startTime and ((time.mktime(job.endTime) - time.mktime(job.startTime)) * 1000) or 0
massaged_job = {
'id': job.id,
'lastModTime': hasattr(job, 'lastModTime') and job.lastModTime and format_time(job.lastModTime) or None,
'lastModTimeInMillis': last_modified_time_millis,
'lastModTimeFormatted': last_modified_time_millis and format_duration_in_millis(last_modified_time_millis) or None,
'kickoffTime': hasattr(job, 'kickoffTime') and job.kickoffTime and format_time(job.kickoffTime) or '',
'kickoffTimeInMillis': hasattr(job, 'kickoffTime') and job.kickoffTime and time.mktime(catch_unicode_time(job.kickoffTime)) or 0,
'nextMaterializedTime': hasattr(job, 'nextMaterializedTime') and job.nextMaterializedTime and format_time(job.nextMaterializedTime) or '',
'nextMaterializedTimeInMillis': hasattr(job, 'nextMaterializedTime') and job.nextMaterializedTime and time.mktime(job.nextMaterializedTime) or 0,
'timeOut': hasattr(job, 'timeOut') and job.timeOut or None,
'endTime': job.endTime and format_time(job.endTime) or None,
'pauseTime': hasattr(job, 'pauseTime') and job.pauseTime and format_time(job.endTime) or None,
'concurrency': hasattr(job, 'concurrency') and job.concurrency or None,
'endTimeInMillis': job.endTime and time.mktime(job.endTime) or 0,
'status': job.status,
'isRunning': job.is_running(),
'duration': duration_millis and format_duration_in_millis(duration_millis) or None,
'durationInMillis': duration_millis,
'appName': job.appName,
'progress': job.get_progress(),
'user': job.user,
'absoluteUrl': job.get_absolute_url(),
'canEdit': has_job_edition_permission(job, user),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'kill'}),
'suspendUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'suspend'}),
'resumeUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'resume'}),
'created': hasattr(job, 'createdTime') and job.createdTime and format_time(job.createdTime) or '',
'createdInMillis': hasattr(job, 'createdTime') and job.createdTime and time.mktime(catch_unicode_time(job.createdTime)) or 0,
'startTime': hasattr(job, 'startTime') and format_time(job.startTime) or None,
'startTimeInMillis': hasattr(job, 'startTime') and job.startTime and time.mktime(job.startTime) or 0,
'run': hasattr(job, 'run') and job.run or 0,
'frequency': hasattr(job, 'frequency') and Coordinator.CRON_MAPPING.get(job.frequency, job.frequency) or None,
'timeUnit': hasattr(job, 'timeUnit') and job.timeUnit or None,
'parentUrl': hasattr(job, 'parentId') and job.parentId and get_link(job.parentId) or '',
'submittedManually': hasattr(job, 'parentId') and _submitted_manually(job, user)
}
jobs.append(massaged_job)
return { 'jobs': jobs }
def check_job_access_permission(request, job_id, **kwargs):
"""
Decorator ensuring that the user has access to the job submitted to Oozie.
Arg: Oozie 'workflow', 'coordinator' or 'bundle' ID.
Return: the Oozie workflow, coordinator or bundle or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
if job_id is not None:
oozie_api = get_oozie(request.user)
if job_id.endswith('W'):
get_job = oozie_api.get_job
elif job_id.endswith('C'):
get_job = oozie_api.get_coordinator
else:
get_job = oozie_api.get_bundle
try:
if job_id.endswith('C'):
oozie_job = get_job(job_id, **kwargs)
else:
oozie_job = get_job(job_id)
except RestException, ex:
msg = _("Error accessing Oozie job %s.") % (job_id,)
LOG.exception(msg)
raise PopupException(msg, detail=ex._headers['oozie-error-message', ''])
if request.user.is_superuser \
or oozie_job.user == request.user.username \
or has_dashboard_jobs_access(request.user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to access job %(id)s.") % \
{'username': request.user.username, 'id': oozie_job.id}
access_warn(request, message)
raise PopupException(message)
def check_job_edition_permission(oozie_job, user):
if has_job_edition_permission(oozie_job, user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to modify job %(id)s.") % \
{'username': user.username, 'id': oozie_job.id}
raise PopupException(message)
def has_job_edition_permission(oozie_job, user):
return user.is_superuser or oozie_job.user == user.username
def has_dashboard_jobs_access(user):
return user.is_superuser or user.has_hue_permission(action="dashboard_jobs_access", app=DJANGO_APPS[0])
def _submitted_manually(job, user):
parent_id = job.parentId
if not parent_id:
return True
if 'C@' in parent_id:
return False
oozie_api = get_oozie(user)
if parent_id.endswith('W'):
get_job = oozie_api.get_job
elif parent_id.endswith('C'):
get_job = oozie_api.get_coordinator
else:
get_job = oozie_api.get_bundle
try:
job = get_job(parent_id)
except:
LOG.exception('failed to get job')
return True
return _submitted_manually(job, user)
|
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import List, Optional, Set
from unittest import skip
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.resources import Resources
from pants.build_graph.target import Target
from pants.rules.core import filedeps
from pants.testutil.goal_rule_test_base import GoalRuleTestBase
class FileDepsTest(GoalRuleTestBase):
goal_cls = filedeps.Filedeps
@classmethod
def rules(cls):
return super().rules() + filedeps.rules()
@classmethod
def alias_groups(cls) -> BuildFileAliases:
return BuildFileAliases(
targets={
"target": Target,
"resources": Resources,
"java_library": JavaLibrary,
"java_thrift_library": JavaThriftLibrary,
"jvm_app": JvmApp,
"jvm_binary": JvmBinary,
"scala_library": ScalaLibrary,
"python_library": PythonLibrary,
},
)
def create_python_library(
self,
path: str,
*,
sources: Optional[List[str]] = None,
dependencies: Optional[List[str]] = None
) -> None:
self.create_library(
path=path,
target_type="python_library",
name="target",
sources=sources or [],
dependencies=dependencies or [],
)
def assert_filedeps(
self, *, targets: List[str], expected: Set[str], globs: bool = False
) -> None:
args = ["--no-filedeps2-absolute"]
if globs:
args.append("--filedeps2-globs")
self.assert_console_output(*expected, args=args + targets)
def test_no_target(self) -> None:
self.assert_filedeps(targets=[], expected=set())
def test_one_target_no_source(self) -> None:
self.add_to_build_file("some/target", target="target()")
self.assert_filedeps(targets=["some/target"], expected={"some/target/BUILD"})
def test_one_target_one_source(self) -> None:
self.create_python_library("some/target", sources=["file.py"])
self.assert_filedeps(
targets=["some/target"], expected={"some/target/BUILD", "some/target/file.py"}
)
def test_one_target_multiple_source(self) -> None:
self.create_python_library("some/target", sources=["file1.py", "file2.py"])
self.assert_filedeps(
targets=["some/target"],
expected={"some/target/BUILD", "some/target/file1.py", "some/target/file2.py"},
)
def test_one_target_no_source_one_dep(self) -> None:
self.create_python_library("dep/target", sources=["file.py"])
self.create_python_library("some/target", dependencies=["dep/target"])
self.assert_filedeps(
targets=["some/target"],
expected={"some/target/BUILD", "dep/target/BUILD", "dep/target/file.py"},
)
def test_one_target_one_source_with_dep(self) -> None:
self.create_python_library("dep/target", sources=["file.py"])
self.create_python_library("some/target", sources=["file.py"], dependencies=["dep/target"])
self.assert_filedeps(
targets=["some/target"],
expected={
"some/target/BUILD",
"some/target/file.py",
"dep/target/BUILD",
"dep/target/file.py",
},
)
def test_multiple_targets_one_source(self) -> None:
self.create_python_library("some/target", sources=["file.py"])
self.create_python_library("other/target", sources=["file.py"])
self.assert_filedeps(
targets=["some/target", "other/target"],
expected={
"some/target/BUILD",
"some/target/file.py",
"other/target/BUILD",
"other/target/file.py",
},
)
def test_multiple_targets_one_source_with_dep(self) -> None:
self.create_python_library("dep1/target", sources=["file.py"])
self.create_python_library("dep2/target", sources=["file.py"])
self.create_python_library("some/target", sources=["file.py"], dependencies=["dep1/target"])
self.create_python_library(
"other/target", sources=["file.py"], dependencies=["dep2/target"]
)
self.assert_filedeps(
targets=["some/target", "other/target"],
expected={
"some/target/BUILD",
"some/target/file.py",
"other/target/BUILD",
"other/target/file.py",
"dep1/target/BUILD",
"dep1/target/file.py",
"dep2/target/BUILD",
"dep2/target/file.py",
},
)
def test_multiple_targets_one_source_overlapping(self) -> None:
self.create_python_library("dep/target", sources=["file.py"])
self.create_python_library("some/target", sources=["file.py"], dependencies=["dep/target"])
self.create_python_library("other/target", sources=["file.py"], dependencies=["dep/target"])
self.assert_filedeps(
targets=["some/target", "other/target"],
expected={
"some/target/BUILD",
"some/target/file.py",
"other/target/BUILD",
"other/target/file.py",
"dep/target/BUILD",
"dep/target/file.py",
},
)
def test_globs(self) -> None:
self.create_files("some/target", ["test1.py", "test2.py"])
self.add_to_build_file("some/target", target="target(name='target', sources=['test*.py'])")
self.assert_filedeps(
targets=["some/target"],
expected={"some/target/BUILD", "some/target/test*.py"},
globs=True,
)
def test_build_with_file_ext(self) -> None:
self.create_file("some/target/BUILD.ext", contents="target()")
self.assert_filedeps(targets=["some/target"], expected={"some/target/BUILD.ext"})
def test_resources(self) -> None:
self.create_resources("src/resources", "data", "data.json")
self.assert_filedeps(
targets=["src/resources:data"],
expected={"src/resources/BUILD", "src/resources/data.json"},
)
@skip(
"V2 does not yet hydrate java_sources for scala_library targets. Once this happens, "
"we must teach filedeps.py to check the target_adaptor for java_sources."
)
def test_scala_with_java_sources(self) -> None:
self.create_file("src/java/j.java")
self.create_file("src/scala/s.scala")
self.add_to_build_file(
"src/java", target="java_library(sources=['j.java'], dependencies=['src/scala'])"
)
self.add_to_build_file(
"src/scala", target="scala_library(sources=['s.scala'], java_sources=['src/java'])"
)
expected = {"src/java/BUILD", "src/java/j.java", "src/scala/BUILD", "src/scala/s.scala"}
self.assert_filedeps(targets=["src/java"], expected=expected)
self.assert_filedeps(targets=["src/scala"], expected=expected)
def test_filter_out_synthetic_targets(self) -> None:
self.create_library(
path="src/thrift/storage",
target_type="java_thrift_library",
name="storage",
sources=["data_types.thrift"],
)
java_lib = self.create_library(
path="src/java/lib",
target_type="java_library",
name="lib",
sources=["lib1.java"],
dependencies=["src/thrift/storage"],
)
self.create_file(".pants.d/gen/thrift/java/storage/Angle.java")
synthetic_java_lib = self.make_target(
spec=".pants.d/gen/thrift/java/storage",
target_type=JavaLibrary,
derived_from=self.target("src/thrift/storage"),
sources=["Angle.java"],
)
java_lib.inject_dependency(synthetic_java_lib.address)
self.assert_filedeps(
targets=["src/java/lib"],
expected={
"src/java/lib/BUILD",
"src/java/lib/lib1.java",
"src/thrift/storage/BUILD",
"src/thrift/storage/data_types.thrift",
},
)
@skip(
"V2 does not yet hydrate bundles or binary attributes for jvm_app. After this is added,"
"we must add similar logic to the V1 filedeps goal for jvm_apps."
)
def test_jvm_app(self) -> None:
self.create_library(
path="src/thrift/storage",
target_type="java_thrift_library",
name="storage",
sources=["data_types.thrift"],
)
self.create_library(
path="src/java/lib",
target_type="java_library",
name="lib",
sources=["lib1.java"],
dependencies=["src/thrift/storage"],
)
self.create_library(
path="src/java/bin",
target_type="jvm_binary",
name="bin",
sources=["main.java"],
dependencies=["src/java/lib"],
)
self.create_file("project/config/app.yaml")
self.create_library(
path="project", target_type="jvm_app", name="app",
)
self.assert_filedeps(
targets=["project:app"],
expected={
"project/BUILD",
"project/config/app.yaml",
"src/java/bin/BUILD",
"src/java/bin/main.java",
"src/java/lib/BUILD",
"src/java/lib/lib1.java",
"src/thrift/storage/BUILD" "src/thrift/storage/data_types.thrift",
},
)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/review/review.py."""
__author__ = [
'[email protected] (John Cox)',
]
import types
from models import models
from models import student_work
from modules.review import domain
from modules.review import peer
from modules.review import review as review_module
from tests.functional import actions
from google.appengine.ext import db
class ManagerTest(actions.TestBase):
"""Tests for review.Manager."""
def setUp(self):
super(ManagerTest, self).setUp()
self.reviewee = models.Student(key_name='[email protected]')
self.reviewee_key = self.reviewee.put()
self.reviewer = models.Student(key_name='[email protected]')
self.reviewer_key = self.reviewer.put()
self.unit_id = '1'
self.submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=self.reviewee_key, unit_id=self.unit_id))
def test_add_reviewer_adds_new_step_and_summary(self):
step_key = review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step = db.get(step_key)
summary = db.get(step.review_summary_key)
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_add_reviewer_existing_raises_assertion_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'no_summary_found_for_key')
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
AssertionError, review_module.Manager.add_reviewer, self.unit_id,
self.submission_key, self.reviewee_key, self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_completed(self):
summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_unremoved_existing_changes_expired_to_assigned(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_add_reviewer_removed_unremoves_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
def test_add_reviewer_removed_unremoves_completed_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.completed_count)
def test_add_reviewer_removed_unremoves_and_assigns_expired_step(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_delete_reviewer_marks_step_removed_and_decrements_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
deleted_key = review_module.Manager.delete_reviewer(step_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(step_key, deleted_key)
self.assertTrue(step.removed)
self.assertEqual(0, summary.assigned_count)
def test_delete_reviewer_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_delete_reviewer_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer, step_key)
def test_delete_reviewer_raises_removed_error_if_already_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.delete_reviewer,
step_key)
def test_expire_review_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.expire_review,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_expire_review_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.expire_review, step_key)
def test_expire_review_raises_transition_error_when_state_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_transition_error_when_state_expired(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_removed_error_when_step_removed(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.expire_review, step_key)
def test_expire_review_transitions_state_and_updates_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
expired_key = review_module.Manager.expire_review(step_key)
step, summary = db.get([expired_key, summary_key])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, step.state)
def test_expire_old_reviews_for_unit_expires_found_reviews(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
first_step, second_step, summary = db.get(
[first_step_key, second_step_key, summary_key])
self.assertEqual(
[domain.REVIEW_STATE_EXPIRED, domain.REVIEW_STATE_EXPIRED],
[step.state for step in [first_step, second_step]])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(2, summary.expired_count)
def test_expire_old_reviews_skips_errors_and_continues_processing(self):
# Create and bind a function that we can swap in to generate a query
# that will pick up bad results so we can tell that we skip them.
query_containing_unprocessable_entities = peer.ReviewStep.all(
keys_only=True)
query_fn = types.MethodType(
lambda x, y, z: query_containing_unprocessable_entities,
review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, 'get_expiry_query', query_fn)
summary_key = peer.ReviewSummary(
assigned_count=1, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
processable_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
error_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
processed_step, error_step, summary = db.get(
[processable_step_key, error_step_key, summary_key])
self.assertEqual(domain.REVIEW_STATE_COMPLETED, error_step.state)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, processed_step.state)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(1, summary.expired_count)
def test_get_assignment_candidates_query_filters_and_orders_correctly(self):
unused_wrong_unit_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=str(int(self.unit_id) + 1)
).put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
older_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='[email protected]').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
younger_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=third_reviewee_key,
submission_key=third_submission_key, unit_id=self.unit_id
).put()
fourth_reviewee_key = models.Student(
key_name='[email protected]').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
completed_but_not_assigned_key = peer.ReviewSummary(
assigned_count=0, completed_count=1,
reviewee_key=fourth_reviewee_key,
submission_key=fourth_submission_key, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='[email protected]').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
assigned_but_not_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=0,
reviewee_key=fifth_reviewee_key,
submission_key=fifth_submission_key, unit_id=self.unit_id
).put()
results = review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(5)
self.assertEqual([
assigned_but_not_completed_key,
completed_but_not_assigned_key,
older_assigned_and_completed_key,
younger_assigned_and_completed_key
], [r.key() for r in results])
def test_get_expiry_query_filters_and_orders_correctly(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_completed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
unused_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='[email protected]').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
unused_other_unit_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=third_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=third_submission_key,
state=domain.REVIEW_STATE_ASSIGNED,
unit_id=str(int(self.unit_id) + 1)
).put()
fourth_reviewee_key = models.Student(
key_name='[email protected]').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
first_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fourth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fourth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='[email protected]').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
second_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fifth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fifth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
zero_review_window_query = review_module.Manager.get_expiry_query(
0, self.unit_id)
future_review_window_query = review_module.Manager.get_expiry_query(
1, self.unit_id)
self.assertEqual(
[first_assigned_step_key, second_assigned_step_key],
zero_review_window_query.fetch(3))
# No items are > 1 minute old, so we expect an empty result set.
self.assertEqual(None, future_review_window_query.get())
def test_get_new_review_creates_step_and_updates_summary(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
summary = db.get(summary_key)
self.assertEqual(0, summary.assigned_count)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertEqual(summary.key(), step.review_summary_key)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_raises_key_error_when_summary_missing(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side effect delete the review summary, causing a
# the lookup by key to fail.
def pick_and_remove(unused_cls, candidates):
db.delete(summary_key)
return candidates[0]
fn = types.MethodType(
pick_and_remove, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
KeyError, review_module.Manager.get_new_review, self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_already_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
already_completed_unremoved_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
db.delete(already_completed_unremoved_step_key)
unused_already_completed_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_review_is_for_self(self):
peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_no_candidates(self):
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_retry_limit_hit(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key, max_retries=0)
def test_get_new_review_raises_not_assignable_when_summary_updated(self):
summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
summary.put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the summary so we'll reject it
# as a candidate.
def pick_and_update(unused_cls, candidates):
db.put(summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_reassigns_removed_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_assigned_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_reassigns_removed_expired_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_expired_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_get_new_review_retries_successfully(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step = db.get(step_key)
self.assertEqual(lower_priority_summary_key, step.review_summary_key)
def test_get_review_step_keys_by_returns_list_of_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='[email protected]')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertEqual(
[matching_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_keys_in_sorted_order(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='[email protected]').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
[first_step_key, second_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_empty_list_when_no_matches(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='[email protected]')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_different_reviewer_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id,
).put()
unused_non_matching_step_different_unit_id_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=str(int(self.unit_id) + 1),
).put()
self.assertEqual(
[], review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_steps_by_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewer_key = models.Student(
key_name='[email protected]').put()
missing_step_key = db.Key.from_path(
peer.ReviewStep.kind(),
peer.ReviewStep.key_name(
self.submission_key, second_reviewer_key))
model_objects = db.get([step_key, missing_step_key])
domain_objects = review_module.Manager.get_review_steps_by_keys(
[step_key, missing_step_key])
model_step, model_miss = model_objects
domain_step, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_step.assigner_kind, domain_step.assigner_kind)
self.assertEqual(model_step.change_date, domain_step.change_date)
self.assertEqual(model_step.create_date, domain_step.create_date)
self.assertEqual(model_step.key(), domain_step.key)
self.assertEqual(model_step.removed, domain_step.removed)
self.assertEqual(model_step.review_key, domain_step.review_key)
self.assertEqual(
model_step.review_summary_key, domain_step.review_summary_key)
self.assertEqual(model_step.reviewee_key, domain_step.reviewee_key)
self.assertEqual(model_step.reviewer_key, domain_step.reviewer_key)
self.assertEqual(model_step.state, domain_step.state)
self.assertEqual(model_step.submission_key, domain_step.submission_key)
self.assertEqual(model_step.unit_id, domain_step.unit_id)
def test_get_reviews_by_keys(self):
review_key = student_work.Review(
contents='contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id
).put()
missing_review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(
str(int(self.unit_id) + 1), self.reviewee_key,
self.reviewer_key))
model_objects = db.get([review_key, missing_review_key])
domain_objects = review_module.Manager.get_reviews_by_keys(
[review_key, missing_review_key])
model_review, model_miss = model_objects
domain_review, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_review.contents, domain_review.contents)
self.assertEqual(model_review.key(), domain_review.key)
def test_get_submission_and_review_step_keys_no_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, []),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_with_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewee_key = models.Student(
key_name='[email protected]').put()
non_matching_submission_key = student_work.Submission(
contents='contents2', reviewee_key=non_matching_reviewee_key,
unit_id=self.unit_id).put()
unused_non_matching_step_different_submission_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key,
reviewee_key=non_matching_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=non_matching_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, [matching_step_key]),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_returns_none_on_miss(self):
self.assertIsNone(
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submissions_by_keys(self):
submission_key = student_work.Submission(
contents='contents', reviewee_key=self.reviewee_key,
unit_id=self.unit_id).put()
missing_submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
str(int(self.unit_id) + 1), self.reviewee_key))
domain_models = db.get([submission_key, missing_submission_key])
domain_objects = review_module.Manager.get_submissions_by_keys(
[submission_key, missing_submission_key])
model_submission, model_miss = domain_models
domain_submission, domain_miss = domain_objects
self.assertEqual(2, len(domain_models))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_submission.contents, domain_submission.contents)
self.assertEqual(model_submission.key(), domain_submission.key)
def test_start_review_process_for_succeeds(self):
key = review_module.Manager.start_review_process_for(
self.unit_id, self.submission_key, self.reviewee_key)
summary = db.get(key)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_start_review_process_for_throws_if_already_started(self):
collision = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
collision.put()
self.assertRaises(
domain.ReviewProcessAlreadyStartedError,
review_module.Manager.start_review_process_for,
self.unit_id, self.submission_key, self.reviewee_key)
def test_write_review_raises_constraint_error_if_key_but_no_review(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_constraint_error_if_no_summary(self):
missing_summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(self.submission_key))
review_key = student_work.Review(
contents='contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=missing_summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_key_error_if_no_step(self):
bad_step_key = db.Key.from_path(peer.ReviewStep.kind(), 'missing')
self.assertRaises(
KeyError, review_module.Manager.write_review, bad_step_key,
'payload')
def test_write_review_raises_removed_error_if_step_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.write_review, step_key,
'payload')
def test_write_review_raises_transition_error_if_step_completed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_with_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_state_assigned_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_state_expired_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_two_students_creates_different_reviews(self):
reviewee1 = models.Student(key_name='[email protected]')
reviewee1_key = reviewee1.put()
reviewee2 = models.Student(key_name='[email protected]')
reviewee2_key = reviewee2.put()
submission1_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee1_key, unit_id=self.unit_id))
submission2_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee2_key, unit_id=self.unit_id))
summary1_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee1_key,
submission_key=submission1_key, unit_id=self.unit_id
).put()
step1_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary1_key, reviewee_key=reviewee1_key,
reviewer_key=self.reviewer_key, submission_key=submission1_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step1_key).review_key)
updated_step1_key = review_module.Manager.write_review(
step1_key, 'contents1', mark_completed=False)
self.assertEqual(step1_key, updated_step1_key)
summary2_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee2_key,
submission_key=submission2_key, unit_id=self.unit_id
).put()
step2_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary2_key, reviewee_key=reviewee2_key,
reviewer_key=self.reviewer_key, submission_key=submission2_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step2_key).review_key)
updated_step2_key = review_module.Manager.write_review(
step2_key, 'contents2', mark_completed=False)
self.assertEqual(step2_key, updated_step2_key)
step1, summary1 = db.get([updated_step1_key, summary1_key])
updated_review = db.get(step1.review_key)
self.assertEqual(1, summary1.assigned_count)
self.assertEqual(0, summary1.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step1.state)
self.assertEqual(step1.review_key, updated_review.key())
self.assertEqual('contents1', updated_review.contents)
step2, summary2 = db.get([updated_step2_key, summary2_key])
updated_review = db.get(step2.review_key)
self.assertEqual(1, summary2.assigned_count)
self.assertEqual(0, summary2.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step2.state)
self.assertEqual(step2.review_key, updated_review.key())
self.assertEqual('contents2', updated_review.contents)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mimetypes
import os
import socket
from swift import gettext_ as _
from random import shuffle
from time import time
import itertools
import functools
import sys
from eventlet import Timeout
from swift import __canonical_version__ as swift_version
from swift.common import constraints
from swift.common.storage_policy import POLICIES
from swift.common.ring import Ring
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate, list_from_csv, \
register_swift_info
from swift.common.constraints import check_utf8, valid_api_version
from swift.proxy.controllers import AccountController, ContainerController, \
ObjectControllerRouter, InfoController, MigrationController
from swift.proxy.controllers.base import get_container_info
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, HTTPException, Request, HTTPServiceUnavailable
from swift.common.exceptions import APIVersionError
#mjw dedupe
from swift.dedupe.deduplication import ChunkStore, InformationDatabase
# List of entry points for mandatory middlewares.
#
# Fields:
#
# "name" (required) is the entry point name from setup.py.
#
# "after_fn" (optional) a function that takes a PipelineWrapper object as its
# single argument and returns a list of middlewares that this middleware
# should come after. Any middlewares in the returned list that are not present
# in the pipeline will be ignored, so you can safely name optional middlewares
# to come after. For example, ["catch_errors", "bulk"] would install this
# middleware after catch_errors and bulk if both were present, but if bulk
# were absent, would just install it after catch_errors.
required_filters = [
{'name': 'catch_errors'},
{'name': 'gatekeeper',
'after_fn': lambda pipe: (['catch_errors']
if pipe.startswith('catch_errors')
else [])},
{'name': 'dlo', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']},
{'name': 'versioned_writes', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']}]
class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, memcache=None, logger=None, account_ring=None,
container_ring=None):
if conf is None:
conf = {}
if logger is None:
self.logger = get_logger(conf, log_route='proxy-server')
else:
self.logger = logger
self._error_limiting = {}
#mjwtom: deduplication
self.deduplication = config_true_value(conf.get('deduplication', 'false'))
if self.deduplication:
self.chunk_store = ChunkStore(conf, self)
self.info_database = InformationDatabase(conf)
self.fixed_chunk = config_true_value(conf.get('fixed_chunk', 'false'))
self.chunk_size = int(conf.get('chunk_size', 4096))
swift_dir = conf.get('swift_dir', '/etc/swift')
self.swift_dir = swift_dir
self.node_timeout = int(conf.get('node_timeout', 10))
self.recoverable_node_timeout = int(
conf.get('recoverable_node_timeout', self.node_timeout))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.trans_id_suffix = conf.get('trans_id_suffix', '')
self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
self.error_suppression_interval = \
int(conf.get('error_suppression_interval', 60))
self.error_suppression_limit = \
int(conf.get('error_suppression_limit', 10))
self.recheck_container_existence = \
int(conf.get('recheck_container_existence', 60))
self.recheck_account_existence = \
int(conf.get('recheck_account_existence', 60))
self.allow_account_management = \
config_true_value(conf.get('allow_account_management', 'no'))
self.object_post_as_copy = \
config_true_value(conf.get('object_post_as_copy', 'true'))
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
# ensure rings are loaded for all configured storage policies
for policy in POLICIES:
policy.load_ring(swift_dir)
self.obj_controller_router = ObjectControllerRouter()
self.memcache = memcache
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
config_true_value(conf.get('account_autocreate', 'no'))
self.auto_create_account_prefix = (
conf.get('auto_create_account_prefix') or '.')
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.max_containers_per_account = \
int(conf.get('max_containers_per_account') or 0)
self.max_containers_whitelist = [
a.strip()
for a in conf.get('max_containers_whitelist', '').split(',')
if a.strip()]
self.deny_host_headers = [
host.strip() for host in
conf.get('deny_host_headers', '').split(',') if host.strip()]
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')
if a.strip()]
self.strict_cors_mode = config_true_value(
conf.get('strict_cors_mode', 't'))
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
self.max_large_object_get_time = float(
conf.get('max_large_object_get_time', '86400'))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value * replicas
else:
raise ValueError(
'Invalid request_node_count value: %r' % ''.join(value))
try:
self._read_affinity = read_affinity = conf.get('read_affinity', '')
self.read_affinity_sort_key = affinity_key_function(read_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid read_affinity value: %r (%s)" %
(read_affinity, err.message))
try:
write_affinity = conf.get('write_affinity', '')
self.write_affinity_is_local_fn \
= affinity_locality_predicate(write_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid write_affinity value: %r (%s)" %
(write_affinity, err.message))
value = conf.get('write_affinity_node_count',
'2 * replicas').lower().split()
if len(value) == 1:
wanc_value = int(value[0])
self.write_affinity_node_count = lambda replicas: wanc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
wanc_value = int(value[0])
self.write_affinity_node_count = \
lambda replicas: wanc_value * replicas
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' % ''.join(value))
# swift_owner_headers are stripped by the account and container
# controllers; we should extend header stripping to object controller
# when a privileged object header is implemented.
swift_owner_headers = conf.get(
'swift_owner_headers',
'x-container-read, x-container-write, '
'x-container-sync-key, x-container-sync-to, '
'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
'x-account-access-control')
self.swift_owner_headers = [
name.strip().title()
for name in swift_owner_headers.split(',') if name.strip()]
# Initialization was successful, so now apply the client chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because it affects the client as well, currently, we use the
# client chunk size as the govenor and not the object chunk size.
socket._fileobject.default_bufsize = self.client_chunk_size
self.expose_info = config_true_value(
conf.get('expose_info', 'yes'))
self.disallowed_sections = list_from_csv(
conf.get('disallowed_sections', 'swift.valid_api_versions'))
self.admin_key = conf.get('admin_key', None)
register_swift_info(
version=swift_version,
strict_cors_mode=self.strict_cors_mode,
policies=POLICIES.get_policy_info(),
allow_account_management=self.allow_account_management,
account_autocreate=self.account_autocreate,
**constraints.EFFECTIVE_CONSTRAINTS)
def check_config(self):
"""
Check the configuration for possible errors
"""
if self._read_affinity and self.sorting_method != 'affinity':
self.logger.warn("sorting_method is set to '%s', not 'affinity'; "
"read_affinity setting will have no effect." %
self.sorting_method)
def get_object_ring(self, policy_idx):
"""
Get the ring object to use to handle a request based on its policy.
:param policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def get_controller(self, req):
"""
Get the controller to handle a request.
:param req: the request
:returns: tuple of (controller class, path dictionary)
:raises: ValueError (thrown by split_path) if given invalid path
"""
if req.path == '/info':
d = dict(version=None,
expose_info=self.expose_info,
disallowed_sections=self.disallowed_sections,
admin_key=self.admin_key)
return InfoController, d
#mjw: MigrationController is the only one deals with disk failure
if req.method == 'DISK_FAILURE':
version, account, device = split_path(req.path, 1, 3, True)
d = dict(version=version,
account_name=account,
device = device)
return MigrationController, d
version, account, container, obj = split_path(req.path, 1, 4, True)
d = dict(version=version,
account_name=account,
container_name=container,
object_name=obj)
if account and not valid_api_version(version):
raise APIVersionError('Invalid path')
if obj and container and account:
info = get_container_info(req.environ, self)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
info['storage_policy'])
policy = POLICIES.get_by_index(policy_index)
if not policy:
# This indicates that a new policy has been created,
# with rings, deployed, released (i.e. deprecated =
# False), used by a client to create a container via
# another proxy that was restarted after the policy
# was released, and is now cached - all before this
# worker was HUPed to stop accepting new
# connections. There should never be an "unknown"
# index - but when there is - it's probably operator
# error and hopefully temporary.
raise HTTPServiceUnavailable('Unknown Storage Policy')
return self.obj_controller_router[policy], d
elif container and account:
return ContainerController, d
elif account and not container and not obj:
return AccountController, d
return None, d
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
if self.memcache is None:
self.memcache = cache_from_env(env, True)
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:
err = HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
return err(env, start_response)
except (Exception, Timeout):
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def update_request(self, req):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return req
def handle_request(self, req):
"""
Entry point for proxy server.
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
try:
self.logger.set_statsd_prefix('proxy-server')
if req.content_length and req.content_length < 0:
self.logger.increment('errors')
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
try:
if not check_utf8(req.path_info):
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
except UnicodeError:
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
controller, path_parts = self.get_controller(req)
p = req.path_info
if isinstance(p, unicode):
p = p.encode('utf-8')
except APIVersionError:
self.logger.increment('errors')
return HTTPBadRequest(request=req)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not controller:
self.logger.increment('errors')
return HTTPPreconditionFailed(request=req, body='Bad URL')
if self.deny_host_headers and \
req.host.split(':')[0] in self.deny_host_headers:
return HTTPForbidden(request=req, body='Invalid host header')
self.logger.set_statsd_prefix('proxy-server.' +
controller.server_type.lower())
controller = controller(self, **path_parts)
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id_suffix = self.trans_id_suffix
trans_id_extra = req.headers.get('x-trans-id-extra')
if trans_id_extra:
trans_id_suffix += '-' + trans_id_extra[:32]
trans_id = generate_trans_id(trans_id_suffix)
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
try:
handler = getattr(controller, req.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
allowed_methods = getattr(controller, 'allowed_methods', set())
return HTTPMethodNotAllowed(
request=req, headers={'Allow': ', '.join(allowed_methods)})
old_authorize = None
if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called
# again. If not authorized, we return the denial unless the
# controller's method indicates it'd like to gather more
# information and try again later.
resp = req.environ['swift.authorize'](req)
if not resp and not req.headers.get('X-Copy-From-Account') \
and not req.headers.get('Destination-Account'):
# No resp means authorized, no delayed recheck required.
old_authorize = req.environ['swift.authorize']
else:
# Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now.
if not getattr(handler, 'delay_denial', None):
return resp
# Save off original request method (GET, POST, etc.) in case it
# gets mutated during handling. This way logging can display the
# method the client actually sent.
req.environ['swift.orig_req_method'] = req.method
try:
if old_authorize:
req.environ.pop('swift.authorize', None)
return handler(req)
finally:
if old_authorize:
req.environ['swift.authorize'] = old_authorize
except HTTPException as error_response:
return error_response
except (Exception, Timeout):
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)
def sort_nodes(self, nodes):
'''
Sorts nodes in-place (and returns the sorted list) according to
the configured strategy. The default "sorting" is to randomly
shuffle the nodes. If the "timing" strategy is chosen, the nodes
are sorted according to the stored timing data.
'''
# In the case of timing sorting, shuffling ensures that close timings
# (ie within the rounding resolution) won't prefer one over another.
# Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/)
shuffle(nodes)
if self.sorting_method == 'timing':
now = time()
def key_func(node):
timing, expires = self.node_timings.get(node['ip'], (-1.0, 0))
return timing if expires > now else -1.0
nodes.sort(key=key_func)
elif self.sorting_method == 'affinity':
nodes.sort(key=self.read_affinity_sort_key)
return nodes
def set_node_timing(self, node, timing):
if self.sorting_method != 'timing':
return
now = time()
timing = round(timing, 3) # sort timings to the millisecond
self.node_timings[node['ip']] = (timing, now + self.timing_expiry)
def _error_limit_node_key(self, node):
return "{ip}:{port}/{device}".format(**node)
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time()
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.get(node_key)
if error_stats is None or 'errors' not in error_stats:
return False
if 'last_error' in error_stats and error_stats['last_error'] < \
now - self.error_suppression_interval:
self._error_limiting.pop(node_key, None)
return False
limited = error_stats['errors'] > self.error_suppression_limit
if limited:
self.logger.debug(
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node, msg):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`error_occurred`.
:param node: dictionary of node to error limit
:param msg: error message
"""
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.setdefault(node_key, {})
error_stats['errors'] = self.error_suppression_limit + 1
error_stats['last_error'] = time()
self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def _incr_node_errors(self, node):
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.setdefault(node_key, {})
error_stats['errors'] = error_stats.get('errors', 0) + 1
error_stats['last_error'] = time()
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
self._incr_node_errors(node)
self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def iter_nodes(self, ring, partition, node_iter=None):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(part_nodes,
ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
primary_nodes = self.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
handoff_nodes = node_iter
nodes_left = self.request_node_count(len(primary_nodes))
log_handoffs_threshold = nodes_left - len(primary_nodes)
for node in primary_nodes:
if not self.error_limited(node):
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
handoffs = 0
for node in handoff_nodes:
if not self.error_limited(node):
handoffs += 1
if self.log_handoffs and handoffs > log_handoffs_threshold:
self.logger.increment('handoff_count')
self.logger.warning(
'Handoff requested (%d)' % handoffs)
if handoffs - log_handoffs_threshold == len(primary_nodes):
self.logger.increment('handoff_all_count')
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
def exception_occurred(self, node, typ, additional_info,
**kwargs):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
self._incr_node_errors(node)
if 'level' in kwargs:
log = functools.partial(self.logger.log, kwargs.pop('level'))
if 'exc_info' not in kwargs:
kwargs['exc_info'] = sys.exc_info()
else:
log = self.logger.exception
log(_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s'
' re: %(info)s'),
{'type': typ, 'ip': node['ip'],
'port': node['port'], 'device': node['device'],
'info': additional_info},
**kwargs)
def modify_wsgi_pipeline(self, pipe):
"""
Called during WSGI pipeline creation. Modifies the WSGI pipeline
context to ensure that mandatory middleware is present in the pipeline.
:param pipe: A PipelineWrapper object
"""
pipeline_was_modified = False
for filter_spec in reversed(required_filters):
filter_name = filter_spec['name']
if filter_name not in pipe:
afters = filter_spec.get('after_fn', lambda _junk: [])(pipe)
insert_at = 0
for after in afters:
try:
insert_at = max(insert_at, pipe.index(after) + 1)
except ValueError: # not in pipeline; ignore it
pass
self.logger.info(
'Adding required filter %s to pipeline at position %d' %
(filter_name, insert_at))
ctx = pipe.create_filter(filter_name)
pipe.insert_filter(ctx, index=insert_at)
pipeline_was_modified = True
if pipeline_was_modified:
self.logger.info("Pipeline was modified. New pipeline is \"%s\".",
pipe)
else:
self.logger.debug("Pipeline is \"%s\"", pipe)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
app = Application(conf)
app.check_config()
return app
|
|
import os
import signal
import asyncio
import subprocess
import shlex
import sys
import psutil
import argparse
import socket
import logging
class KillerUDPServer(
asyncio.DatagramProtocol,
):
def __init__(
self,
async_loop,
killer,
):
super().__init__()
self.async_loop = async_loop
self.killer = killer
def connection_made(
self,
transport,
):
self.transport = transport
udp_port = self.transport._sock.getsockname()[1]
sys.stdout.write(str(udp_port) + '\n')
sys.stdout.flush()
def datagram_received(
self,
data,
addr,
):
if data == b'start':
self.killer.start()
elif data == b'stop':
self.killer.stop()
elif data == b'reset':
self.killer.reset()
elif data == b'shutdown':
logging.getLogger('asyncio').disabled = True
sys.exit(1)
class Killer:
def __init__(
self,
async_loop,
pid_to_kill,
sleep_interval,
soft_timeout,
hard_timeout,
critical_timeout,
):
self.pid_to_kill = pid_to_kill
self.async_loop = async_loop
self.sleep_interval = sleep_interval
self.time_elapsed = 0.0
self.soft_timeout = soft_timeout
self.hard_timeout = hard_timeout
self.critical_timeout = critical_timeout
self.soft_timeout_signal = signal.SIGINT
self.hard_timeout_signal = signal.SIGABRT
self.critical_timeout_signal = signal.SIGTERM
self.kill_loop_task = async_loop.create_task(
coro=self.kill_loop(),
)
self.running = False
async def kill_loop(
self,
):
while True:
if not psutil.pid_exists(
pid=self.pid_to_kill,
):
sys.exit(1)
if self.running:
if self.soft_timeout != 0 and self.time_elapsed >= self.soft_timeout:
self.kill_process(
pid=self.pid_to_kill,
signal=self.soft_timeout_signal,
)
if self.hard_timeout != 0 and self.time_elapsed >= self.hard_timeout:
self.kill_process(
pid=self.pid_to_kill,
signal=self.hard_timeout_signal,
)
if self.critical_timeout != 0 and self.time_elapsed >= self.critical_timeout:
self.kill_process(
pid=self.pid_to_kill,
signal=self.critical_timeout_signal,
)
self.time_elapsed += self.sleep_interval
await asyncio.sleep(
delay=self.sleep_interval,
loop=self.async_loop,
)
def kill_process(
self,
pid,
signal,
):
try:
os.kill(pid, signal)
except Exception as exception:
print(repr(exception))
def start(
self,
):
self.running = True
def stop(
self,
):
self.running = False
def reset(
self,
):
self.time_elapsed = 0.0
class KillerClient:
def __init__(
self,
port,
):
self.port = port
self.killer_socker = socket.socket(
family=socket.AF_INET,
type=socket.SOCK_DGRAM,
)
self.address = (
'127.0.0.1',
self.port,
)
def start(
self,
):
self.killer_socker.sendto(b'start', self.address)
def stop(
self,
):
self.killer_socker.sendto(b'stop', self.address)
def reset(
self,
):
self.killer_socker.sendto(b'reset', self.address)
def shutdown(
self,
):
self.killer_socker.sendto(b'shutdown', self.address)
def __del__(
self,
):
self.shutdown()
@staticmethod
def create_a_killer(
pid_to_kill,
sleep_interval,
soft_timeout,
hard_timeout,
critical_timeout,
):
killer_process = subprocess.Popen(
args=shlex.split(
s='python3 -m killer --pid-to-kill {pid_to_kill} --sleep-interval {sleep_interval} --soft-timeout {soft_timeout} --hard-timeout {hard_timeout} --critical-timeout {critical_timeout}'.format(
pid_to_kill=pid_to_kill,
sleep_interval=sleep_interval,
soft_timeout=soft_timeout,
hard_timeout=hard_timeout,
critical_timeout=critical_timeout,
),
),
cwd=os.path.join(
os.path.dirname(
p=os.path.realpath(
filename=__file__,
),
),
),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=True,
)
port = killer_process.stdout.readline()
killer_process.stdout.close()
killer_client = KillerClient(
port=int(port),
)
return killer_client
def main():
parser = argparse.ArgumentParser(
description='Process Killer',
)
parser.add_argument(
'--pid-to-kill',
help='pid to kill on timeouts',
type=int,
required=True,
dest='pid_to_kill',
)
parser.add_argument(
'--sleep-interval',
help='time to sleep between checks',
type=float,
required=True,
dest='sleep_interval',
)
parser.add_argument(
'--soft-timeout',
help='soft timeout',
type=float,
required=True,
dest='soft_timeout',
)
parser.add_argument(
'--hard-timeout',
help='hard timeout',
type=float,
required=True,
dest='hard_timeout',
)
parser.add_argument(
'--critical-timeout',
help='critical timeout',
type=float,
required=True,
dest='critical_timeout',
)
args = parser.parse_args()
async_loop = asyncio.new_event_loop()
killer_obj = Killer(
async_loop=async_loop,
pid_to_kill=args.pid_to_kill,
sleep_interval=args.sleep_interval,
soft_timeout=args.soft_timeout,
hard_timeout=args.hard_timeout,
critical_timeout=args.critical_timeout,
)
killer_udp_server_endpoint = async_loop.create_datagram_endpoint(
protocol_factory=lambda: KillerUDPServer(async_loop, killer_obj),
local_addr=(
'127.0.0.1',
0,
),
)
async_loop.run_until_complete(killer_udp_server_endpoint)
async_loop.run_forever()
async_loop.close()
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2012-2013 Paul Tagliamonte <[email protected]>
# Copyright (c) 2013 Leo Cavaille <[email protected]>
# Copyright (c) 2014 Jon Severinsson <[email protected]>
# Copyright (c) 2014-2015 Clement Schreiner <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from sqlalchemy.sql import exists
from debile.utils.log import start_logging
from debile.master.utils import session
from debile.master.orm import Person, Builder, Job
from debile.master.interface import NAMESPACE, DebileMasterInterface
import SocketServer
import signal
import hashlib
import logging
import logging.handlers
import os.path
import ssl
def check_shutdown():
with session() as s:
shutdown = not s.query(exists().where(
(Job.assigned_at != None) & (Job.finished_at == None))
).scalar()
if shutdown:
raise SystemExit(0)
class DebileMasterAuthMixIn(SimpleXMLRPCRequestHandler):
def authenticate(self):
cert = self.connection.getpeercert(True)
fingerprint = hashlib.sha1(cert).hexdigest().upper()
NAMESPACE.machine = NAMESPACE.session.query(Builder).filter_by(
ssl=fingerprint
).first()
NAMESPACE.user = NAMESPACE.session.query(Person).filter_by(
ssl=fingerprint
).first()
return NAMESPACE.machine or NAMESPACE.user
def parse_request(self, *args):
if SimpleXMLRPCRequestHandler.parse_request(self, *args):
if self.authenticate():
return True
else:
self.send_error(401, 'Authentication failed')
return False
def handle_one_request(self):
try:
with session() as s:
NAMESPACE.session = s
SimpleXMLRPCRequestHandler.handle_one_request(self)
finally:
NAMESPACE.session = None
NAMESPACE.machine = None
NAMESPACE.user = None
if DebileMasterInterface.shutdown_request:
check_shutdown()
class DebileMasterSimpleAuthMixIn(SimpleXMLRPCRequestHandler):
def authenticate(self):
client_address, _ = self.client_address
NAMESPACE.machine = NAMESPACE.session.query(Builder).filter_by(
ip=client_address
).first()
NAMESPACE.user = NAMESPACE.session.query(Person).filter_by(
ip=client_address
).first()
return NAMESPACE.machine or NAMESPACE.user
def parse_request(self, *args):
if SimpleXMLRPCRequestHandler.parse_request(self, *args):
if self.authenticate():
return True
else:
self.send_error(401, 'Authentication failed')
return False
def handle_one_request(self):
try:
with session() as s:
NAMESPACE.session = s
SimpleXMLRPCRequestHandler.handle_one_request(self)
finally:
NAMESPACE.session = None
NAMESPACE.machine = None
NAMESPACE.user = None
if DebileMasterInterface.shutdown_request:
check_shutdown()
class SimpleAsyncXMLRPCServer(SocketServer.ThreadingMixIn,
DebileMasterSimpleAuthMixIn):
pass
class AsyncXMLRPCServer(SocketServer.ThreadingMixIn, DebileMasterAuthMixIn):
pass
class SimpleAuthXMLRPCServer(SimpleXMLRPCServer):
def __init__(self, addr,
requestHandler=SimpleXMLRPCRequestHandler,
bind_and_activate=True,
allow_none=False):
SimpleXMLRPCServer.__init__(self, addr,
requestHandler=requestHandler,
bind_and_activate=bind_and_activate,
allow_none=allow_none)
class SecureXMLRPCServer(SimpleXMLRPCServer):
def __init__(
self, addr, keyfile, certfile, ca_certs,
requestHandler=SimpleXMLRPCRequestHandler, logRequests=True,
allow_none=False, encoding=None, bind_and_activate=True
):
SimpleXMLRPCServer.__init__(self, addr,
requestHandler=requestHandler,
logRequests=logRequests,
allow_none=allow_none,
encoding=encoding,
bind_and_activate=False)
cert_reqs = (ssl.CERT_NONE if ca_certs is None
else ssl.CERT_REQUIRED)
self.socket = ssl.wrap_socket(self.socket,
keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_TLSv1)
if bind_and_activate:
self.server_bind()
self.server_activate()
def serve(server_addr, port, auth_method,
keyfile=None, certfile=None, ssl_keyring=None, pgp_keyring=None):
logger = logging.getLogger('debile')
logger.info("Serving on `{server_addr}' on port `{port}'".format(**locals()))
logger.info("Authentication method: {0}".format(auth_method))
if auth_method == 'ssl':
logger.info("Using keyfile=`{keyfile}', certfile=`{certfile}', "
"ssl_keyring=`{ssl_keyring}'".format(**locals()))
logger.info("Using pgp_keyring=`{pgp_keyring}'".format(**locals()))
server = None
if auth_method == 'simple':
server = SimpleAuthXMLRPCServer((server_addr, port),
requestHandler=SimpleAsyncXMLRPCServer,
allow_none=True)
else:
server = SecureXMLRPCServer((server_addr, port), keyfile, certfile,
ca_certs=ssl_keyring,
requestHandler=AsyncXMLRPCServer,
allow_none=True)
server.register_introspection_functions()
server.register_instance(DebileMasterInterface(ssl_keyring, pgp_keyring))
server.serve_forever()
def system_exit_handler(signum, frame):
raise SystemExit(1)
def shutdown_request_handler(signum, frame):
DebileMasterInterface.shutdown_request = True
check_shutdown()
def main(args, config):
start_logging(args)
signal.signal(signal.SIGQUIT, system_exit_handler)
signal.signal(signal.SIGABRT, system_exit_handler)
signal.signal(signal.SIGTERM, system_exit_handler)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGUSR1, shutdown_request_handler)
logger = logging.getLogger('debile')
if not os.path.isfile(config['keyrings']['pgp']):
logger.info("Can not find pgp keyring `{file}'".format(file=config['keyrings']['pgp']))
if args.auth_method == 'ssl':
if not os.path.isfile(config['xmlrpc']['keyfile']):
logger.error("Can not find ssl keyfile `{file}'".format(file=config['xmlrpc']['keyfile']))
if not os.path.isfile(config['xmlrpc']['certfile']):
logger.error("Can not find ssl certfile `{file}'".format(file=config['xmlrpc']['certfile']))
if not os.path.isfile(config['keyrings']['ssl']):
logger.error("Can not find ssl keyring `{file}'".format(file=config['keyrings']['ssl']))
serve(config['xmlrpc']['addr'], config['xmlrpc']['port'],
args.auth_method,
config['xmlrpc'].get('keyfile'),
config['xmlrpc'].get('certfile'),
config['keyrings'].get('ssl'),
config["keyrings"].get('pgp'))
|
|
from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Defualt :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + alp + ' Conf. Int.]']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + alp + ' Conf. Int.]']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
#center confidence intervals if they are unequal lengths
# confint = ["(%#6.3g, %#6.3g)" % tuple(conf_int[i]) for i in \
# exog_idx]
confint = ["%s %s" % tuple(lmap(forg, conf_int[i])) for i in \
exog_idx]
len_ci = lmap(len, confint)
max_ci = max(len_ci)
min_ci = min(len_ci)
if min_ci < max_ci:
confint = [ci.center(max_ci) for ci in confint]
#explicit f/g formatting, now uses forg, f or g depending on values
# params_data = lzip(["%#6.4g" % (params[i]) for i in exog_idx],
# ["%#6.4f" % (std_err[i]) for i in exog_idx],
# ["%#6.3f" % (tvalues[i]) for i in exog_idx],
# ["%#6.3f" % (pvalues[i]) for i in exog_idx],
# confint
## ["(%#6.3g, %#6.3g)" % tuple(conf_int[i]) for i in \
## exog_idx]
# )
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
confint
# ["(%#6.3g, %#6.3g)" % tuple(conf_int[i]) for i in \
# exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
|
|
# -*- coding: utf-8 -*-
"""
Pdb debugger class.
Modified from the standard pdb.Pdb class to avoid including readline, so that
the command line completion of other programs which include this isn't
damaged.
In the future, this class will be expanded with improvements over the standard
pdb.
The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
changes. Licensing should therefore be under the standard Python terms. For
details on the PSF (Python Software Foundation) standard license, see:
http://www.python.org/2.2.3/license.html"""
#*****************************************************************************
#
# This file is licensed under the PSF license.
#
# Copyright (C) 2001 Python Software Foundation, www.python.org
# Copyright (C) 2005-2006 Fernando Perez. <[email protected]>
#
#
#*****************************************************************************
from __future__ import print_function
import bdb
import functools
import linecache
import sys
from IPython import get_ipython
from IPython.utils import PyColorize, ulinecache
from IPython.utils import coloransi, io, py3compat
from IPython.core.excolors import exception_colors
from IPython.testing.skipdoctest import skip_doctest
# See if we can use pydb.
has_pydb = False
prompt = 'ipdb> '
#We have to check this directly from sys.argv, config struct not yet available
if '--pydb' in sys.argv:
try:
import pydb
if hasattr(pydb.pydb, "runl") and pydb.version>'1.17':
# Version 1.17 is broken, and that's what ships with Ubuntu Edgy, so we
# better protect against it.
has_pydb = True
except ImportError:
print("Pydb (http://bashdb.sourceforge.net/pydb/) does not seem to be available")
if has_pydb:
from pydb import Pdb as OldPdb
#print "Using pydb for %run -d and post-mortem" #dbg
prompt = 'ipydb> '
else:
from pdb import Pdb as OldPdb
# Allow the set_trace code to operate outside of an ipython instance, even if
# it does so with some limitations. The rest of this support is implemented in
# the Tracer constructor.
def BdbQuit_excepthook(et, ev, tb, excepthook=None):
"""Exception hook which handles `BdbQuit` exceptions.
All other exceptions are processed using the `excepthook`
parameter.
"""
if et==bdb.BdbQuit:
print('Exiting Debugger.')
elif excepthook is not None:
excepthook(et, ev, tb)
else:
# Backwards compatibility. Raise deprecation warning?
BdbQuit_excepthook.excepthook_ori(et,ev,tb)
def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
print('Exiting Debugger.')
class Tracer(object):
"""Class for local debugging, similar to pdb.set_trace.
Instances of this class, when called, behave like pdb.set_trace, but
providing IPython's enhanced capabilities.
This is implemented as a class which must be initialized in your own code
and not as a standalone function because we need to detect at runtime
whether IPython is already active or not. That detection is done in the
constructor, ensuring that this code plays nicely with a running IPython,
while functioning acceptably (though with limitations) if outside of it.
"""
@skip_doctest
def __init__(self,colors=None):
"""Create a local debugger instance.
Parameters
----------
colors : str, optional
The name of the color scheme to use, it must be one of IPython's
valid color schemes. If not given, the function will default to
the current IPython scheme when running inside IPython, and to
'NoColor' otherwise.
Examples
--------
::
from IPython.core.debugger import Tracer; debug_here = Tracer()
Later in your code::
debug_here() # -> will open up the debugger at that point.
Once the debugger activates, you can use all of its regular commands to
step through code, set breakpoints, etc. See the pdb documentation
from the Python standard library for usage details.
"""
ip = get_ipython()
if ip is None:
# Outside of ipython, we set our own exception hook manually
sys.excepthook = functools.partial(BdbQuit_excepthook,
excepthook=sys.excepthook)
def_colors = 'NoColor'
try:
# Limited tab completion support
import readline
readline.parse_and_bind('tab: complete')
except ImportError:
pass
else:
# In ipython, we use its custom exception handler mechanism
def_colors = ip.colors
ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
if colors is None:
colors = def_colors
# The stdlib debugger internally uses a modified repr from the `repr`
# module, that limits the length of printed strings to a hardcoded
# limit of 30 characters. That much trimming is too aggressive, let's
# at least raise that limit to 80 chars, which should be enough for
# most interactive uses.
try:
try:
from reprlib import aRepr # Py 3
except ImportError:
from repr import aRepr # Py 2
aRepr.maxstring = 80
except:
# This is only a user-facing convenience, so any error we encounter
# here can be warned about but can be otherwise ignored. These
# printouts will tell us about problems if this API changes
import traceback
traceback.print_exc()
self.debugger = Pdb(colors)
def __call__(self):
"""Starts an interactive debugger at the point where called.
This is similar to the pdb.set_trace() function from the std lib, but
using IPython's enhanced debugger."""
self.debugger.set_trace(sys._getframe().f_back)
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the ``do_...`` commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper
def _file_lines(fname):
"""Return the contents of a named file as a list of lines.
This function never raises an IOError exception: if the file can't be
read, it simply returns an empty list."""
try:
outfile = open(fname)
except IOError:
return []
else:
out = outfile.readlines()
outfile.close()
return out
class Pdb(OldPdb):
"""Modified Pdb class, does not load readline."""
def __init__(self,color_scheme='NoColor',completekey=None,
stdin=None, stdout=None):
# Parent constructor:
if has_pydb and completekey is None:
OldPdb.__init__(self,stdin=stdin,stdout=io.stdout)
else:
OldPdb.__init__(self,completekey,stdin,stdout)
self.prompt = prompt # The default prompt is '(Pdb)'
# IPython changes...
self.is_pydb = has_pydb
self.shell = get_ipython()
if self.shell is None:
# No IPython instance running, we must create one
from IPython.terminal.interactiveshell import \
TerminalInteractiveShell
self.shell = TerminalInteractiveShell.instance()
if self.is_pydb:
# interactiveshell.py's ipalias seems to want pdb's checkline
# which located in pydb.fn
import pydb.fns
self.checkline = lambda filename, lineno: \
pydb.fns.checkline(self, filename, lineno)
self.curframe = None
self.do_restart = self.new_do_restart
self.old_all_completions = self.shell.Completer.all_completions
self.shell.Completer.all_completions=self.all_completions
self.do_list = decorate_fn_with_doc(self.list_command_pydb,
OldPdb.do_list)
self.do_l = self.do_list
self.do_frame = decorate_fn_with_doc(self.new_do_frame,
OldPdb.do_frame)
self.aliases = {}
# Create color table: we copy the default one from the traceback
# module and add a few attributes needed for debugging
self.color_scheme_table = exception_colors()
# shorthands
C = coloransi.TermColors
cst = self.color_scheme_table
cst['NoColor'].colors.breakpoint_enabled = C.NoColor
cst['NoColor'].colors.breakpoint_disabled = C.NoColor
cst['Linux'].colors.breakpoint_enabled = C.LightRed
cst['Linux'].colors.breakpoint_disabled = C.Red
cst['LightBG'].colors.breakpoint_enabled = C.LightRed
cst['LightBG'].colors.breakpoint_disabled = C.Red
self.set_colors(color_scheme)
# Add a python parser so we can syntax highlight source while
# debugging.
self.parser = PyColorize.Parser()
def set_colors(self, scheme):
"""Shorthand access to the color table scheme selector method."""
self.color_scheme_table.set_active_scheme(scheme)
def interaction(self, frame, traceback):
self.shell.set_completer_frame(frame)
while True:
try:
OldPdb.interaction(self, frame, traceback)
except KeyboardInterrupt:
self.shell.write('\n' + self.shell.get_exception_only())
break
else:
break
def new_do_up(self, arg):
OldPdb.do_up(self, arg)
self.shell.set_completer_frame(self.curframe)
do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
def new_do_down(self, arg):
OldPdb.do_down(self, arg)
self.shell.set_completer_frame(self.curframe)
do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
def new_do_frame(self, arg):
OldPdb.do_frame(self, arg)
self.shell.set_completer_frame(self.curframe)
def new_do_quit(self, arg):
if hasattr(self, 'old_all_completions'):
self.shell.Completer.all_completions=self.old_all_completions
# Pdb sets readline delimiters, so set them back to our own
if self.shell.readline is not None:
self.shell.readline.set_completer_delims(self.shell.readline_delims)
return OldPdb.do_quit(self, arg)
do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
def new_do_restart(self, arg):
"""Restart command. In the context of ipython this is exactly the same
thing as 'quit'."""
self.msg("Restart doesn't make sense here. Using 'quit' instead.")
return self.do_quit(arg)
def postloop(self):
self.shell.set_completer_frame(None)
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno, context = 5)
except KeyboardInterrupt:
pass
def print_stack_entry(self,frame_lineno,prompt_prefix='\n-> ',
context = 3):
#frame, lineno = frame_lineno
print(self.format_stack_entry(frame_lineno, '', context), file=io.stdout)
# vds: >>
frame, lineno = frame_lineno
filename = frame.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
def format_stack_entry(self, frame_lineno, lprefix=': ', context = 3):
try:
import reprlib # Py 3
except ImportError:
import repr as reprlib # Py 2
ret = []
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
ColorsNormal)
frame, lineno = frame_lineno
return_value = ''
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
#return_value += '->'
return_value += reprlib.repr(rv) + '\n'
ret.append(return_value)
#s = filename + '(' + `lineno` + ')'
filename = self.canonic(frame.f_code.co_filename)
link = tpl_link % py3compat.cast_unicode(filename)
if frame.f_code.co_name:
func = frame.f_code.co_name
else:
func = "<lambda>"
call = ''
if func != '?':
if '__args__' in frame.f_locals:
args = reprlib.repr(frame.f_locals['__args__'])
else:
args = '()'
call = tpl_call % (func, args)
# The level info should be generated in the same format pdb uses, to
# avoid breaking the pdbtrack functionality of python-mode in *emacs.
if frame is self.curframe:
ret.append('> ')
else:
ret.append(' ')
ret.append(u'%s(%s)%s\n' % (link,lineno,call))
start = lineno - 1 - context//2
lines = ulinecache.getlines(filename)
start = min(start, len(lines) - context)
start = max(start, 0)
lines = lines[start : start + context]
for i,line in enumerate(lines):
show_arrow = (start + 1 + i == lineno)
linetpl = (frame is self.curframe or show_arrow) \
and tpl_line_em \
or tpl_line
ret.append(self.__format_line(linetpl, filename,
start + 1 + i, line,
arrow = show_arrow) )
return ''.join(ret)
def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
bp_mark = ""
bp_mark_color = ""
scheme = self.color_scheme_table.active_scheme_name
new_line, err = self.parser.format2(line, 'str', scheme)
if not err: line = new_line
bp = None
if lineno in self.get_file_breaks(filename):
bps = self.get_breaks(filename, lineno)
bp = bps[-1]
if bp:
Colors = self.color_scheme_table.active_colors
bp_mark = str(bp.number)
bp_mark_color = Colors.breakpoint_enabled
if not bp.enabled:
bp_mark_color = Colors.breakpoint_disabled
numbers_width = 7
if arrow:
# This is the line with the error
pad = numbers_width - len(str(lineno)) - len(bp_mark)
if pad >= 3:
marker = '-'*(pad-3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = '%s%s' % (marker, str(lineno))
line = tpl_line % (bp_mark_color + bp_mark, num, line)
else:
num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
line = tpl_line % (bp_mark_color + bp_mark, num, line)
return line
def list_command_pydb(self, arg):
"""List command to use if we have a newer pydb installed"""
filename, first, last = OldPdb.parse_list_cmd(self, arg)
if filename is not None:
self.print_list_lines(filename, first, last)
def print_list_lines(self, filename, first, last):
"""The printing (as opposed to the parsing part of a 'list'
command."""
try:
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
src = []
if filename == "<string>" and hasattr(self, "_exec_filename"):
filename = self._exec_filename
for lineno in range(first, last+1):
line = ulinecache.getline(filename, lineno)
if not line:
break
if lineno == self.curframe.f_lineno:
line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
else:
line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
src.append(line)
self.lineno = lineno
print(''.join(src), file=io.stdout)
except KeyboardInterrupt:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print('*** Error in argument:', repr(arg))
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
self.print_list_lines(self.curframe.f_code.co_filename, first, last)
# vds: >>
lineno = first
filename = self.curframe.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
do_l = do_list
def do_pdef(self, arg):
"""Print the call signature for any callable object.
The debugger interface to %pdef"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
def do_pdoc(self, arg):
"""Print the docstring for an object.
The debugger interface to %pdoc."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
def do_pfile(self, arg):
"""Print (or run through pager) the file where an object is defined.
The debugger interface to %pfile.
"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
def do_pinfo(self, arg):
"""Provide detailed information about an object.
The debugger interface to %pinfo, i.e., obj?."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
def do_pinfo2(self, arg):
"""Provide extra detailed information about an object.
The debugger interface to %pinfo2, i.e., obj??."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
def do_psource(self, arg):
"""Print (or run through pager) the source code for an object."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
#######################################################################
# XXX Hack! Use python-2.5 compatible code for this call, because with
# all of our changes, we've drifted from the pdb api in 2.6. For now,
# changing:
#
#line = linecache.getline(filename, lineno, self.curframe.f_globals)
# to:
#
line = linecache.getline(filename, lineno)
#
# does the trick. But in reality, we need to fix this by reconciling
# our updates with the new Pdb APIs in Python 2.6.
#
# End hack. The rest of this method is copied verbatim from 2.6 pdb.py
#######################################################################
if not line:
print('End of file', file=self.stdout)
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print('*** Blank or comment', file=self.stdout)
return 0
return lineno
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import collections
import contextlib
import copy
import datetime
import functools
import traceback
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovoo_base
import six
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields as obj_fields
from nova.openstack.common import versionutils
from nova import utils
LOG = logging.getLogger('object')
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_' + name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.iteritems():
if not isinstance(field, obj_fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
def deleter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
raise AttributeError('No such attribute `%s' % name)
delattr(self, get_attrname(name))
setattr(cls, name, property(getter, setter, deleter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This means this is a base class using the metaclass. I.e.,
# the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
return
def _vers_tuple(obj):
return tuple([int(x) for x in obj.VERSION.split(".")])
# Add the subclass to NovaObject._obj_classes. If the
# same version already exists, replace it. Otherwise,
# keep the list with newest version first.
make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(cls._obj_classes[obj_name]):
if cls.VERSION == obj.VERSION:
cls._obj_classes[obj_name][i] = cls
# Update nova.objects with this newer class.
setattr(objects, obj_name, cls)
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
cls._obj_classes[obj_name].insert(i, cls)
if i == 0:
# Later version than we've seen before. Update
# nova.objects.
setattr(objects, obj_name, cls)
break
else:
cls._obj_classes[obj_name].append(cls)
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen. Update nova.objects
# only if it's the first time we've seen this object name.
if not hasattr(objects, obj_name):
setattr(objects, obj_name, cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
if args and isinstance(args[0], context.RequestContext):
raise exception.ObjectActionError(
action=fn.__name__,
reason='Calling remotables with context is deprecated')
if self._context is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
self._context, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since NovaObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, NovaObject):
setattr(self, key, value)
else:
setattr(self, key,
field.from_primitive(self, key, value))
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
@six.add_metaclass(NovaObjectMetaclass)
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
return '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_LE('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
versionutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = cls._obj_classes[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['nova_object.data']
changes = primitive.get('nova_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def obj_calculate_child_version(self, target_version, child):
"""Calculate the appropriate version for a child object.
This is to be used when backporting an object for an older client.
A sub-object will need to be backported to a suitable version for
the client as well, and this method will calculate what that
version should be, based on obj_relationships.
:param target_version: Version this object is being backported to
:param child: The child field for which the appropriate version
is to be calculated
:returns: None if the child should be omitted from the backport,
otherwise, the version to which the child should be
backported
"""
target_version = utils.convert_version_to_tuple(target_version)
for index, versions in enumerate(self.obj_relationships[child]):
my_version, child_version = versions
my_version = utils.convert_version_to_tuple(my_version)
if target_version < my_version:
if index == 0:
# We're backporting to a version from before this
# subobject was added: delete it from the primitive.
return None
else:
# We're in the gap between index-1 and index, so
# backport to the older version
return self.obj_relationships[child][index - 1][1]
elif target_version == my_version:
# This is the first mapping that satisfies the
# target_version request: backport the object.
return child_version
# No need to backport, as far as we know, so return the latest
# version of the sub-object we know about
return self.obj_relationships[child][-1][1]
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
def _do_backport(to_version):
obj = getattr(self, field)
if obj is None:
return
if isinstance(obj, NovaObject):
if to_version != primitive[field]['nova_object.version']:
obj.obj_make_compatible(
primitive[field]['nova_object.data'],
to_version)
primitive[field]['nova_object.version'] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible(
primitive[field][i]['nova_object.data'],
to_version)
primitive[field][i]['nova_object.version'] = to_version
child_version = self.obj_calculate_child_version(target_version, field)
if child_version is None:
del primitive[field]
else:
_do_backport(child_version)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object
:raises: nova.exception.UnsupportedObjectError if conversion
is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (obj_fields.ObjectField,
obj_fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
if key not in self.obj_relationships:
# NOTE(danms): This is really a coding error and shouldn't
# happen unless we miss something
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % key)
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': target_version or self.VERSION,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != obj_fields.UnspecifiedDefault]
for attr in attrs:
default = copy.deepcopy(self.fields[attr].default)
if default is obj_fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
if not self.obj_attr_is_set(attr):
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError(_('Cannot save anything in the base class'))
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), NovaObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
NOTE: This is NOT "revert to previous values"
NOTE: Specifying fields on recursive resets will only be
honored at the top level. Everything below the top
will reset all.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
# NOTE(danms): This is nova-specific, so don't copy this to o.vo
@contextlib.contextmanager
def obj_alternate_context(self, context):
original_context = self._context
self._context = context
try:
yield
finally:
self._context = original_context
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class NovaObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
pass
class NovaTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(object):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': obj_fields.ListOfObjectsField('NovaObject'),
}
# This is a dictionary of my_version:child_version mappings so that
# we can support backleveling our contents based on the version
# requested of the list object.
child_versions = {}
def __init__(self, *args, **kwargs):
super(ObjectListBase, self).__init__(*args, **kwargs)
if 'objects' not in kwargs:
self.objects = []
self._changed_fields.discard('objects')
def __iter__(self):
"""List iterator interface."""
return iter(self.objects)
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a NovaObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def __contains__(self, value):
"""List membership test."""
return value in self.objects
def count(self, value):
"""List count of value occurrences."""
return self.objects.count(value)
def index(self, value):
"""List index of value."""
return self.objects.index(value)
def sort(self, cmp=None, key=None, reverse=False):
self.objects.sort(cmp=cmp, key=key, reverse=reverse)
def obj_make_compatible(self, primitive, target_version):
primitives = primitive['objects']
child_target_version = self.child_versions.get(target_version, '1.0')
for index, item in enumerate(self.objects):
self.objects[index].obj_make_compatible(
primitives[index]['nova_object.data'],
child_target_version)
primitives[index]['nova_object.version'] = child_target_version
def obj_what_changed(self):
changes = set(self._changed_fields)
for child in self.objects:
if child.obj_what_changed():
changes.add('objects')
return changes
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion as e:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objinst = self.conductor.object_backport(context, objprim,
e.kwargs['supported'])
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime)
else arg for arg in args]
for k, v in kwargs.iteritems():
if k == 'exc_val' and v:
kwargs[k] = str(v)
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
kwargs[k] = ''.join(traceback.format_tb(v))
elif isinstance(v, datetime.datetime):
kwargs[k] = timeutils.strtime(at=v)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
|
|
# STANDARD LIB
from urlparse import urlparse
# LIBRARIES
from django.contrib.auth import get_user_model, get_user, BACKEND_SESSION_KEY
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ValidationError
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.hashers import make_password
from google.appengine.api import users
# DJANGAE
from djangae.contrib.gauth.datastore.models import GaeDatastoreUser, Group, get_permission_choices
from djangae.contrib.gauth.datastore.backends import AppEngineUserAPIBackend
from djangae.contrib.gauth.middleware import AuthenticationMiddleware
from djangae.contrib.gauth.settings import AUTHENTICATION_BACKENDS
from djangae.contrib.gauth.utils import get_switch_accounts_url
from djangae.contrib import sleuth
class BackendTests(TestCase):
""" Tests for the AppEngineUserAPIBackend auth backend. """
def test_invalid_credentials_cause_typeerror(self):
""" If the `authenticate` method is passed credentials which it doesn't understand then
Django expects it to raise a TypeError.
"""
backend = AppEngineUserAPIBackend()
credentials = {'username': 'ted', 'password': 'secret'}
self.assertRaises(TypeError, backend.authenticate, **credentials)
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_authenticate_creates_user_object(self):
""" If `authenticate` is called with valid credentials then a User object should be created
"""
User = get_user_model()
self.assertEqual(User.objects.count(), 0)
email = '[email protected]'
google_user = users.User(email, _user_id='111111111100000000001')
backend = AppEngineUserAPIBackend()
user = backend.authenticate(google_user=google_user,)
self.assertEqual(user.email, '[email protected]') # Domain is lower cased
self.assertEqual(user.email_lower, email.lower())
self.assertEqual(User.objects.count(), 1)
# Calling authenticate again with the same credentials should not create another user
user2 = backend.authenticate(google_user=google_user)
self.assertEqual(user.pk, user2.pk)
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_user_pre_creation_create_unknown(self):
""" User objects for Google-Accounts-based users should be able to be pre-created in DB and
then matched by email address when they log in - even if unknown users are allowed.
"""
User = get_user_model()
backend = AppEngineUserAPIBackend()
email = '[email protected]'
# Pre-create our user
User.objects.pre_create_google_user(email)
# Now authenticate this user via the Google Accounts API
google_user = users.User(email=email, _user_id='111111111100000000001')
user = backend.authenticate(google_user=google_user)
# Check things
self.assertEqual(user.email, email)
self.assertIsNotNone(user.last_login)
self.assertFalse(user.has_usable_password())
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=False)
def test_user_pre_creation_no_create_unknown(self):
""" User objects for Google-Accounts-based users should be able to be pre-created in DB and
then matched by email address when they log in - even if unknown users are not allowed.
"""
User = get_user_model()
backend = AppEngineUserAPIBackend()
email = '[email protected]'
# Pre-create our user
User.objects.pre_create_google_user(email)
# Now authenticate this user via the Google Accounts API
google_user = users.User(email=email, _user_id='111111111100000000001')
user = backend.authenticate(google_user=google_user)
# Check things
self.assertEqual(user.email, email)
self.assertIsNotNone(user.last_login)
self.assertFalse(user.has_usable_password())
def test_user_pre_created_users_are_authenticated_case_insensitively(self):
""" When a user is pre-created their email address may not have been saved with the same
upper/lower case-ness as that which they end up logging in with. So the matching needs
to be done case insensitively.
"""
User = get_user_model()
backend = AppEngineUserAPIBackend()
email = '[email protected]'
# Pre-create our user
User.objects.pre_create_google_user(email)
# Now authenticate this user via the Google Accounts API
google_user = users.User(email='[email protected]', _user_id='111111111100000000001')
user = backend.authenticate(google_user=google_user)
# Check things
self.assertEqual(user.username, '111111111100000000001')
# We expect the email address to have been updated to the one which they logged in with
self.assertEqual(user.email, google_user.email())
self.assertIsNotNone(user.last_login)
self.assertFalse(user.has_usable_password())
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_user_id_switch(self):
""" Users sometimes login with the same email, but a different google user id. We handle those cases by
blanking out the email on the old user object and creating a new one with the new user id.
"""
email = '[email protected]'
old_user = users.User(email=email, _user_id='111111111100000000001')
new_user = users.User(email=email, _user_id='111111111100000000002')
User = get_user_model()
backend = AppEngineUserAPIBackend()
# Authenticate 1st time, creating the user
user1 = backend.authenticate(google_user=old_user)
self.assertEqual(user1.email, email)
self.assertTrue(user1.username.endswith('1'))
self.assertEqual(1, User.objects.count())
# Now another user logs in using the same email
user2 = backend.authenticate(google_user=new_user)
self.assertEqual(user2.email, email)
self.assertTrue(user2.username.endswith('2'))
self.assertEqual(2, User.objects.count())
# The old account is kept around, but the email is blanked
user1 = User.objects.get(pk=user1.pk)
self.assertEqual(user1.email, "")
@override_settings(DJANGAE_FORCE_USER_PRE_CREATION=True)
def test_force_user_pre_creation(self):
User = get_user_model()
self.assertEqual(User.objects.count(), 0)
google_user = users.User('[email protected]', _user_id='111111111100000000001')
backend = AppEngineUserAPIBackend()
self.assertIsNone(backend.authenticate(google_user=google_user,))
self.assertEqual(User.objects.count(), 0)
# superusers don't need pre-creation of User object.
self.assertEqual(User.objects.count(), 0)
with sleuth.switch('google.appengine.api.users.is_current_user_admin', lambda: True):
user = backend.authenticate(google_user=google_user,)
self.assertEqual(User.objects.count(), 1)
self.assertEquals(User.objects.get(), user)
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_user_creation_race_condition(self):
""" If a user double clicks a 'login' button or something, causing 2 threads to be
authenticating the same user at the same time, ensure it doesn't die.
"""
email = "[email protected]"
user_id = "111111111100000000001"
original_user_get = get_user_model().objects.get
def crazy_user_get_patch(*args, **kwargs):
""" Patch for User.objects.get which simulates another thread creating the same user
immedidately after this is called (by doing it as part of this function). """
User = get_user_model()
try:
return original_user_get(*args, **kwargs) # We patched .get()
except User.DoesNotExist:
# This is horrible, but... the backend first tries get() by username and then tries
# get() by email, and we only want to create our user after that second call
if kwargs.keys() != ['username']:
User.objects.create_user(username=user_id, email=email)
raise
backend = AppEngineUserAPIBackend()
google_user = users.User(email, _user_id=user_id)
user_class_path = "djangae.contrib.gauth.datastore.models.GaeDatastoreUser.objects.get"
with sleuth.switch(user_class_path, crazy_user_get_patch):
backend.authenticate(google_user)
@override_settings(AUTHENTICATION_BACKENDS=AUTHENTICATION_BACKENDS)
class MiddlewareTests(TestCase):
""" Tests for the AuthenticationMiddleware. """
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_login(self):
def _get_current_user():
return users.User('[email protected]', _user_id='111111111100000000001')
request = HttpRequest()
SessionMiddleware().process_request(request) # Make the damn sessions work
request.session[BACKEND_SESSION_KEY] = 'djangae.contrib.gauth.datastore.backends.AppEngineUserAPIBackend'
middleware = AuthenticationMiddleware()
# Check that we're not logged in already
user = get_user(request)
self.assertFalse(user.is_authenticated())
# Check that running the middleware when the Google users API doesn't know the current
# user still leaves us as an anonymous users.
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: None):
middleware.process_request(request)
# Check that the middleware successfully logged us in
user = get_user(request)
self.assertFalse(user.is_authenticated())
# Now check that when the Google users API *does* know who we are, that we are logged in.
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', _get_current_user):
middleware.process_request(request)
# Check that the middleware successfully logged us in
user = get_user(request)
self.assertTrue(user.is_authenticated())
self.assertEqual(user.email, '[email protected]')
self.assertEqual(user.username, '111111111100000000001')
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_account_switch(self):
user1 = users.User('[email protected]', _user_id='111111111100000000001')
user2 = users.User('[email protected]', _user_id='222222222200000000002')
request = HttpRequest()
SessionMiddleware().process_request(request) # Make the damn sessions work
request.session[BACKEND_SESSION_KEY] = 'djangae.contrib.gauth.datastore.backends.AppEngineUserAPIBackend'
middleware = AuthenticationMiddleware()
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user1):
middleware.process_request(request)
self.assertEqual(user1.user_id(), request.user.username)
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user2):
middleware.process_request(request)
self.assertEqual(user2.user_id(), request.user.username)
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_user_id_switch(self):
""" Users sometimes login with the same email, but a different google user id. We handle those cases by
blanking out the email on the old user object and creating a new one with the new user id.
"""
email = '[email protected]'
user1 = users.User(email, _user_id='111111111100000000001')
user2 = users.User(email, _user_id='222222222200000000002')
User = get_user_model()
request = HttpRequest()
SessionMiddleware().process_request(request) # Make the damn sessions work
request.session[BACKEND_SESSION_KEY] = 'djangae.contrib.gauth.datastore.backends.AppEngineUserAPIBackend'
middleware = AuthenticationMiddleware()
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user1):
middleware.process_request(request)
self.assertEqual(1, User.objects.count())
django_user1 = request.user
self.assertEqual(user1.user_id(), django_user1.username)
self.assertEqual(user1.email(), django_user1.email)
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user2):
middleware.process_request(request)
self.assertEqual(2, User.objects.count())
django_user2 = request.user
self.assertEqual(user2.user_id(), django_user2.username)
self.assertEqual(user2.email(), django_user2.email)
django_user1 = User.objects.get(pk=django_user1.pk)
self.assertEqual(django_user1.email, "")
@override_settings(DJANGAE_FORCE_USER_PRE_CREATION=True)
def test_force_user_pre_creation(self):
email = '[email protected]'
user1 = users.User(email, _user_id='111111111100000000001')
with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user1):
request = HttpRequest()
SessionMiddleware().process_request(request) # Make the damn sessions work
middleware = AuthenticationMiddleware()
middleware.process_request(request)
# We expect request.user to be AnonymousUser(), because there was no User object in the DB
# and so with pre-creation required, authentication should have failed
self.assertTrue(isinstance(request.user, AnonymousUser))
@override_settings(
AUTH_USER_MODEL='djangae.GaeDatastoreUser',
AUTHENTICATION_BACKENDS=('djangae.contrib.gauth.datastore.backends.AppEngineUserAPIBackend',)
)
class CustomPermissionsUserModelBackendTest(TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = GaeDatastoreUser
def setUp(self):
# Fix Django so that we can use our custom user model.
# TODO: Submit a fix to Django to allow override_settings(AUTH_USER_MODEL='something') to
# work, even if the project has already set AUTH_USER_MODEL to a custom user
super(CustomPermissionsUserModelBackendTest, self).setUp()
GaeDatastoreUser.objects = GaeDatastoreUser._default_manager
GaeDatastoreUser.base_manager_name = 'objects'
self.user = GaeDatastoreUser.objects.create(
username='test1',
email='[email protected]',
password=make_password(None),
is_active=True,
)
self.superuser = GaeDatastoreUser.objects.create(
username='test2',
email='[email protected]',
is_superuser=True,
password=make_password(None),
is_active=True,
)
def tearDown(self):
GaeDatastoreUser.objects.all().delete()
super(CustomPermissionsUserModelBackendTest, self).tearDown()
def test_has_perm(self):
user = self.UserModel.objects.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel.objects.get(pk=self.user.pk)
user.user_permissions = ['auth.test']
user.save()
# reloading user to purge the _perm_cache
user = self.UserModel.objects.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == set(['auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
user.user_permissions.extend(['auth.test2', 'auth.test3'])
user.save()
user = self.UserModel.objects.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), set(['auth.test2', 'auth.test', 'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
group = Group.objects.create(name='test_group')
group.permissions = ['auth.test_group']
group.save()
user.groups = [group]
user.save()
user = self.UserModel.objects.get(pk=self.user.pk)
exp = set(['auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set(['auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel.objects.get(pk=self.user.pk)
user.user_permissions = ['auth.test']
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel.objects.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(get_permission_choices()))
@override_settings(
AUTH_USER_MODEL='djangae.GaeDatastoreUser',
AUTHENTICATION_BACKENDS=('djangae.contrib.gauth.datastore.backends.AppEngineUserAPIBackend',)
)
class SwitchAccountsTests(TestCase):
""" Tests for the switch accounts functionality. """
@override_settings(DJANGAE_CREATE_UNKNOWN_USER=True)
def test_switch_accounts(self):
gcu = 'djangae.contrib.gauth.middleware.users.get_current_user'
final_destination = '/death/' # there's no escaping it
switch_accounts_url = get_switch_accounts_url(next=final_destination)
any_url = '/_ah/warmup'
jekyll = users.User(email='[email protected]', _user_id='1')
hyde = users.User(email='[email protected]', _user_id='2')
# we start our scenario with the user logged in
with sleuth.switch(gcu, lambda: jekyll):
response = self.client.get(any_url)
# Check that the user is logged in
expected_user_query = GaeDatastoreUser.objects.filter(username=jekyll.user_id())
self.assertEqual(len(expected_user_query), 1)
self.assertEqual(int(self.client._session()['_auth_user_id']), expected_user_query[0].pk)
# Now call the switch_accounts view, which should give us a redirect to the login page
response = self.client.get(switch_accounts_url, follow=False)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], users.create_login_url(switch_accounts_url))
# In tests, we don't have dev_appserver fired up, so we can't actually call the login
# URL, but let's suppose that the user wasn't logged into multiple accounts at once
# and so the login page redirected us straight back to the switch_accounts view.
# It should detect this, and should now redirect us to the log*out* URL with a
# destination of the log*in* URL
response = self.client.get(switch_accounts_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response['location'],
users.create_logout_url(users.create_login_url(switch_accounts_url))
)
# And now we have to emulate the scenario that we have now logged in with a different
# account, so re-mock that
with sleuth.switch(gcu, lambda: hyde):
# Now that we're logged in as a different user, we expect request.user to get set to
# the equivalent Django user and to be redirected to our final destination
response = self.client.get(switch_accounts_url)
redirect_path = urlparse(response['location']).path # it has the host name as well
self.assertEqual(redirect_path, final_destination)
expected_user_query = GaeDatastoreUser.objects.filter(username=hyde.user_id())
self.assertEqual(len(expected_user_query), 1)
self.assertEqual(int(self.client._session()['_auth_user_id']), expected_user_query[0].pk)
class ModelTests(TestCase):
def test_email_uniqueness_validation_raised_correctly(self):
""" GaeAbstractBaseUser has an `email_lower` field whcih is unique, but it's really a proxy
for uniqueness on the `email` field.
"""
no_pass = make_password(None)
User = get_user_model()
user1 = User.objects.create_user("111111111111111111111", email="[email protected]", password=no_pass)
user2 = User(username="111111111111111111112", email="[email protected]", password=no_pass)
# We expect the second user to have a unique violation on the `email_lower` field, but it
# should be attached to the (editable) `email` field
try:
user2.full_clean()
except ValidationError as e:
self.assertTrue("email" in e.error_dict)
self.assertFalse("email_lower" in e.error_dict)
# We should still be able to edit the existing user though
user1.email = "[email protected]"
user1.full_clean()
|
|
from collections import defaultdict
from datetime import date, datetime, timedelta
import json
import time
from django import http
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.datastructures import SortedDict
from django.views.decorators.cache import never_cache
from tower import ugettext as _
import amo
from abuse.models import AbuseReport
from access import acl
from addons.decorators import addon_view, addon_view_factory
from addons.models import Addon, Version
from amo.decorators import json_view, post_required
from amo.utils import paginate
from amo.urlresolvers import reverse
from devhub.models import ActivityLog, AddonLog, CommentLog
from editors import forms
from editors.models import (AddonCannedResponse, EditorSubscription, EventLog,
PerformanceGraph, ReviewerScore,
ViewFastTrackQueue, ViewFullReviewQueue,
ViewPendingQueue, ViewPreliminaryQueue,
ViewQueue,
ViewUnlistedFullReviewQueue,
ViewUnlistedPendingQueue,
ViewUnlistedPreliminaryQueue)
from editors.helpers import (ViewFastTrackQueueTable, ViewFullReviewQueueTable,
ViewPendingQueueTable, ViewPreliminaryQueueTable,
ViewUnlistedFullReviewQueueTable,
ViewUnlistedPendingQueueTable,
ViewUnlistedPreliminaryQueueTable)
from reviews.forms import ReviewFlagFormSet
from reviews.models import Review, ReviewFlag
from users.models import UserProfile
from zadmin.models import get_config, set_config
from .decorators import (addons_reviewer_required, any_reviewer_required,
unlisted_addons_reviewer_required)
def context(**kw):
ctx = dict(motd=get_config('editors_review_motd'),
queue_counts=queue_counts(),
unlisted_queue_counts=queue_counts(unlisted=True))
ctx.update(kw)
return ctx
@addons_reviewer_required
def eventlog(request):
form = forms.EventLogForm(request.GET)
eventlog = ActivityLog.objects.editor_events()
if form.is_valid():
if form.cleaned_data['start']:
eventlog = eventlog.filter(created__gte=form.cleaned_data['start'])
if form.cleaned_data['end']:
eventlog = eventlog.filter(created__lt=form.cleaned_data['end'])
if form.cleaned_data['filter']:
eventlog = eventlog.filter(action=form.cleaned_data['filter'].id)
pager = amo.utils.paginate(request, eventlog, 50)
data = context(form=form, pager=pager)
return render(request, 'editors/eventlog.html', data)
@addons_reviewer_required
def eventlog_detail(request, id):
log = get_object_or_404(ActivityLog.objects.editor_events(), pk=id)
review = None
# I really cannot express the depth of the insanity incarnate in
# our logging code...
if len(log.arguments) > 1 and isinstance(log.arguments[1], Review):
review = log.arguments[1]
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
can_undelete = review and review.deleted and (
is_admin or request.user.pk == log.user.pk)
if request.method == 'POST':
# A Form seems overkill for this.
if request.POST['action'] == 'undelete':
if not can_undelete:
raise PermissionDenied
ReviewerScore.award_moderation_points(
log.user, review.addon, review.id, undo=True)
review.undelete()
return redirect('editors.eventlog.detail', id)
data = context(log=log, can_undelete=can_undelete)
return render(request, 'editors/eventlog_detail.html', data)
@any_reviewer_required
def home(request):
if (not acl.action_allowed(request, 'Addons', 'Review') and
acl.action_allowed(request, 'Personas', 'Review')):
return http.HttpResponseRedirect(reverse('editors.themes.home'))
durations = (('new', _('New Add-ons (Under 5 days)')),
('med', _('Passable (5 to 10 days)')),
('old', _('Overdue (Over 10 days)')))
progress, percentage = _editor_progress()
unlisted_progress, unlisted_percentage = _editor_progress(unlisted=True)
reviews_max_display = getattr(settings, 'EDITOR_REVIEWS_MAX_DISPLAY', 5)
reviews_total = ActivityLog.objects.total_reviews()[:reviews_max_display]
reviews_monthly = (
ActivityLog.objects.monthly_reviews()[:reviews_max_display])
reviews_total_count = ActivityLog.objects.user_approve_reviews(
request.user).count()
reviews_monthly_count = (
ActivityLog.objects.current_month_user_approve_reviews(
request.user).count())
# Try to read user position from retrieved reviews.
# If not available, query for it.
reviews_total_position = (
ActivityLog.objects.user_position(reviews_total, request.user)
or ActivityLog.objects.total_reviews_user_position(request.user))
reviews_monthly_position = (
ActivityLog.objects.user_position(reviews_monthly, request.user)
or ActivityLog.objects.monthly_reviews_user_position(request.user))
data = context(
reviews_total=reviews_total,
reviews_monthly=reviews_monthly,
reviews_total_count=reviews_total_count,
reviews_monthly_count=reviews_monthly_count,
reviews_total_position=reviews_total_position,
reviews_monthly_position=reviews_monthly_position,
new_editors=EventLog.new_editors(),
eventlog=ActivityLog.objects.editor_events()[:6],
progress=progress,
unlisted_progress=unlisted_progress,
percentage=percentage,
unlisted_percentage=unlisted_percentage,
durations=durations,
reviews_max_display=reviews_max_display)
return render(request, 'editors/home.html', data)
def _editor_progress(unlisted=False):
"""Return the progress (number of add-ons still unreviewed for a given
period of time) and the percentage (out of all add-ons of that type)."""
types = ['nominated', 'prelim', 'pending']
progress = {'new': queue_counts(types, days_max=4, unlisted=unlisted),
'med': queue_counts(types, days_min=5, days_max=10,
unlisted=unlisted),
'old': queue_counts(types, days_min=11, unlisted=unlisted)}
# Return the percent of (p)rogress out of (t)otal.
def pct(p, t):
return (p / float(t)) * 100 if p > 0 else 0
percentage = {}
for t in types:
total = progress['new'][t] + progress['med'][t] + progress['old'][t]
percentage[t] = {}
for duration in ('new', 'med', 'old'):
percentage[t][duration] = pct(progress[duration][t], total)
return (progress, percentage)
@addons_reviewer_required
def performance(request, user_id=False):
user = request.amo_user
editors = _recent_editors()
is_admin = (acl.action_allowed(request, 'Admin', '%') or
acl.action_allowed(request, 'ReviewerAdminTools', 'View'))
if is_admin and user_id:
try:
user = UserProfile.objects.get(pk=user_id)
except UserProfile.DoesNotExist:
pass # Use request.amo_user from above.
monthly_data = _performance_by_month(user.id)
performance_total = _performance_total(monthly_data)
# Incentive point breakdown.
today = date.today()
month_ago = today - timedelta(days=30)
year_ago = today - timedelta(days=365)
point_total = ReviewerScore.get_total(user)
totals = ReviewerScore.get_breakdown(user)
months = ReviewerScore.get_breakdown_since(user, month_ago)
years = ReviewerScore.get_breakdown_since(user, year_ago)
def _sum(iter, types):
return sum(s.total for s in iter if s.atype in types)
breakdown = {
'month': {
'addons': _sum(months, amo.GROUP_TYPE_ADDON),
'themes': _sum(months, amo.GROUP_TYPE_THEME),
},
'year': {
'addons': _sum(years, amo.GROUP_TYPE_ADDON),
'themes': _sum(years, amo.GROUP_TYPE_THEME),
},
'total': {
'addons': _sum(totals, amo.GROUP_TYPE_ADDON),
'themes': _sum(totals, amo.GROUP_TYPE_THEME),
}
}
data = context(monthly_data=json.dumps(monthly_data),
performance_month=performance_total['month'],
performance_year=performance_total['year'],
breakdown=breakdown, point_total=point_total,
editors=editors, current_user=user, is_admin=is_admin,
is_user=(request.amo_user.id == user.id))
return render(request, 'editors/performance.html', data)
def _recent_editors(days=90):
since_date = datetime.now() - timedelta(days=days)
editors = (UserProfile.objects
.filter(activitylog__action__in=amo.LOG_REVIEW_QUEUE,
activitylog__created__gt=since_date)
.order_by('display_name')
.distinct())
return editors
def _performance_total(data):
# TODO(gkoberger): Fix this so it's the past X, rather than this X to date.
# (ex: March 15-April 15, not April 1 - April 15)
total_yr = dict(usercount=0, teamamt=0, teamcount=0, teamavg=0)
total_month = dict(usercount=0, teamamt=0, teamcount=0, teamavg=0)
current_year = datetime.now().year
for k, val in data.items():
if k.startswith(str(current_year)):
total_yr['usercount'] = total_yr['usercount'] + val['usercount']
total_yr['teamamt'] = total_yr['teamamt'] + val['teamamt']
total_yr['teamcount'] = total_yr['teamcount'] + val['teamcount']
current_label_month = datetime.now().isoformat()[:7]
if current_label_month in data:
total_month = data[current_label_month]
return dict(month=total_month, year=total_yr)
def _performance_by_month(user_id, months=12, end_month=None, end_year=None):
monthly_data = SortedDict()
now = datetime.now()
if not end_month:
end_month = now.month
if not end_year:
end_year = now.year
end_time = time.mktime((end_year, end_month + 1, 1, 0, 0, 0, 0, 0, -1))
start_time = time.mktime((end_year, end_month + 1 - months,
1, 0, 0, 0, 0, 0, -1))
sql = (PerformanceGraph.objects
.filter_raw('log_activity.created >=',
date.fromtimestamp(start_time).isoformat())
.filter_raw('log_activity.created <',
date.fromtimestamp(end_time).isoformat()))
for row in sql.all():
label = row.approval_created.isoformat()[:7]
if label not in monthly_data:
xaxis = row.approval_created.strftime('%b %Y')
monthly_data[label] = dict(teamcount=0, usercount=0,
teamamt=0, label=xaxis)
monthly_data[label]['teamamt'] = monthly_data[label]['teamamt'] + 1
monthly_data_count = monthly_data[label]['teamcount']
monthly_data[label]['teamcount'] = monthly_data_count + row.total
if row.user_id == user_id:
user_count = monthly_data[label]['usercount']
monthly_data[label]['usercount'] = user_count + row.total
# Calculate averages
for i, vals in monthly_data.items():
average = round(vals['teamcount'] / float(vals['teamamt']), 1)
monthly_data[i]['teamavg'] = str(average) # floats aren't valid json
return monthly_data
@addons_reviewer_required
def motd(request):
form = None
if acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit'):
form = forms.MOTDForm(
initial={'motd': get_config('editors_review_motd')})
data = context(form=form)
return render(request, 'editors/motd.html', data)
@addons_reviewer_required
@post_required
def save_motd(request):
if not acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit'):
raise PermissionDenied
form = forms.MOTDForm(request.POST)
if form.is_valid():
set_config('editors_review_motd', form.cleaned_data['motd'])
return redirect(reverse('editors.motd'))
data = context(form=form)
return render(request, 'editors/motd.html', data)
def _queue(request, TableObj, tab, qs=None, unlisted=False):
if qs is None:
qs = TableObj.Meta.model.objects.all()
if request.GET:
search_form = forms.QueueSearchForm(request.GET)
if search_form.is_valid():
qs = search_form.filter_qs(qs)
else:
search_form = forms.QueueSearchForm()
order_by = request.GET.get('sort', TableObj.default_order_by())
order_by = TableObj.translate_sort_cols(order_by)
table = TableObj(data=qs, order_by=order_by)
default = 100
per_page = request.GET.get('per_page', default)
try:
per_page = int(per_page)
except ValueError:
per_page = default
if per_page <= 0 or per_page > 200:
per_page = default
page = paginate(request, table.rows, per_page=per_page)
table.set_page(page)
return render(request, 'editors/queue.html',
context(table=table, page=page, tab=tab,
search_form=search_form,
point_types=amo.REVIEWED_AMO,
unlisted=unlisted))
def queue_counts(type=None, unlisted=False, **kw):
def construct_query(query_type, days_min=None, days_max=None):
def apply_query(query, *args):
query = query.having(*args)
return query
query = query_type.objects
if days_min:
query = apply_query(query, 'waiting_time_days >=', days_min)
if days_max:
query = apply_query(query, 'waiting_time_days <=', days_max)
return query.count
counts = {'pending': construct_query(ViewPendingQueue, **kw),
'nominated': construct_query(ViewFullReviewQueue, **kw),
'prelim': construct_query(ViewPreliminaryQueue, **kw),
'fast_track': construct_query(ViewFastTrackQueue, **kw),
'moderated': (
Review.objects.filter(reviewflag__isnull=False,
editorreview=1).count)}
if unlisted:
counts = {
'pending': construct_query(ViewUnlistedPendingQueue, **kw),
'nominated': construct_query(ViewUnlistedFullReviewQueue, **kw),
'prelim': construct_query(ViewUnlistedPreliminaryQueue, **kw)}
rv = {}
if isinstance(type, basestring):
return counts[type]()
for k, v in counts.items():
if not isinstance(type, list) or k in type:
rv[k] = v()
return rv
@addons_reviewer_required
def queue(request):
return redirect(reverse('editors.queue_pending'))
@addons_reviewer_required
def queue_nominated(request):
return _queue(request, ViewFullReviewQueueTable, 'nominated')
@addons_reviewer_required
def queue_pending(request):
return _queue(request, ViewPendingQueueTable, 'pending')
@addons_reviewer_required
def queue_prelim(request):
return _queue(request, ViewPreliminaryQueueTable, 'prelim')
@addons_reviewer_required
def queue_fast_track(request):
return _queue(request, ViewFastTrackQueueTable, 'fast_track')
@addons_reviewer_required
def queue_moderated(request):
rf = (Review.objects.exclude(Q(addon__isnull=True) |
Q(reviewflag__isnull=True))
.filter(editorreview=1)
.order_by('reviewflag__created'))
page = paginate(request, rf, per_page=20)
flags = dict(ReviewFlag.FLAGS)
reviews_formset = ReviewFlagFormSet(request.POST or None,
queryset=page.object_list,
request=request)
if request.method == 'POST':
if reviews_formset.is_valid():
reviews_formset.save()
else:
amo.messages.error(
request, ' '.join(e.as_text() or _('An unknown error occurred')
for e in reviews_formset.errors))
return redirect(reverse('editors.queue_moderated'))
return render(request, 'editors/queue.html',
context(reviews_formset=reviews_formset,
tab='moderated', page=page, flags=flags,
search_form=None,
point_types=amo.REVIEWED_AMO))
@unlisted_addons_reviewer_required
def unlisted_queue(request):
return redirect(reverse('editors.unlisted_queue_pending'))
@unlisted_addons_reviewer_required
def unlisted_queue_nominated(request):
return _queue(request, ViewUnlistedFullReviewQueueTable, 'nominated',
unlisted=True)
@unlisted_addons_reviewer_required
def unlisted_queue_pending(request):
return _queue(request, ViewUnlistedPendingQueueTable, 'pending',
unlisted=True)
@unlisted_addons_reviewer_required
def unlisted_queue_prelim(request):
return _queue(request, ViewUnlistedPreliminaryQueueTable, 'prelim',
unlisted=True)
@addons_reviewer_required
@post_required
@json_view
def application_versions_json(request):
app_id = request.POST['application_id']
f = forms.QueueSearchForm()
return {'choices': f.version_choices_for_app_id(app_id)}
@addons_reviewer_required
@addon_view_factory(qs=Addon.with_unlisted.all)
def review(request, addon):
if not addon.is_listed and not acl.check_unlisted_addons_reviewer(request):
raise http.Http404
version = addon.latest_version
if not settings.ALLOW_SELF_REVIEWS and addon.has_author(request.amo_user):
amo.messages.warning(request, _('Self-reviews are not allowed.'))
return redirect(reverse('editors.queue'))
form = forms.get_review_form(request.POST or None, request=request,
addon=addon, version=version)
queue_type = (form.helper.review_type if form.helper.review_type
!= 'preliminary' else 'prelim')
if addon.is_listed:
redirect_url = reverse('editors.queue_%s' % queue_type)
else:
redirect_url = reverse('editors.unlisted_queue_%s' % queue_type)
is_admin = acl.action_allowed(request, 'Addons', 'Edit')
if request.method == 'POST' and form.is_valid():
form.helper.process()
if form.cleaned_data.get('notify'):
EditorSubscription.objects.get_or_create(user=request.amo_user,
addon=addon)
if form.cleaned_data.get('adminflag') and is_admin:
addon.update(admin_review=False)
amo.messages.success(request, _('Review successfully processed.'))
return redirect(redirect_url)
canned = AddonCannedResponse.objects.all()
actions = form.helper.actions.items()
statuses = [amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED]
try:
show_diff = (addon.versions.exclude(id=version.id)
.filter(files__isnull=False,
created__lt=version.created,
files__status__in=statuses)
.latest())
except Version.DoesNotExist:
show_diff = None
# The actions we should show a minimal form from.
actions_minimal = [k for (k, a) in actions if not a.get('minimal')]
# We only allow the user to check/uncheck files for "pending"
allow_unchecking_files = form.helper.review_type == "pending"
versions = (Version.objects.filter(addon=addon)
.exclude(files__status=amo.STATUS_BETA)
.order_by('-created')
.transform(Version.transformer_activity)
.transform(Version.transformer))
class PseudoVersion(object):
def __init__(self):
self.all_activity = []
all_files = ()
approvalnotes = None
compatible_apps_ordered = ()
releasenotes = None
status = 'Deleted',
@property
def created(self):
return self.all_activity[0].created
@property
def version(self):
return (self.all_activity[0].activity_log
.details.get('version', '[deleted]'))
# Grab review history for deleted versions of this add-on
comments = (CommentLog.objects
.filter(activity_log__action__in=amo.LOG_REVIEW_QUEUE,
activity_log__versionlog=None,
activity_log__addonlog__addon=addon)
.order_by('created')
.select_related('activity_log'))
comment_versions = defaultdict(PseudoVersion)
for c in comments:
c.version = c.activity_log.details.get('version', c.created)
comment_versions[c.version].all_activity.append(c)
all_versions = comment_versions.values()
all_versions.extend(versions)
all_versions.sort(key=lambda v: v.created,
reverse=True)
pager = amo.utils.paginate(request, all_versions, 10)
num_pages = pager.paginator.num_pages
count = pager.paginator.count
try:
flags = ViewQueue.objects.get(id=addon.id).flags
except ViewQueue.DoesNotExist:
flags = []
user_changes_actions = [
amo.LOG.ADD_USER_WITH_ROLE.id,
amo.LOG.CHANGE_USER_WITH_ROLE.id,
amo.LOG.REMOVE_USER_WITH_ROLE.id]
user_changes_log = AddonLog.objects.filter(
activity_log__action__in=user_changes_actions,
addon=addon).order_by('id')
ctx = context(version=version, addon=addon,
pager=pager, num_pages=num_pages, count=count, flags=flags,
form=form, canned=canned, is_admin=is_admin,
show_diff=show_diff,
allow_unchecking_files=allow_unchecking_files,
actions=actions, actions_minimal=actions_minimal,
whiteboard_form=forms.WhiteboardForm(instance=addon),
user_changes=user_changes_log,
unlisted=not addon.is_listed)
return render(request, 'editors/review.html', ctx)
@never_cache
@json_view
@addons_reviewer_required
def review_viewing(request):
if 'addon_id' not in request.POST:
return {}
addon_id = request.POST['addon_id']
user_id = request.amo_user.id
current_name = ''
is_user = 0
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
interval = amo.EDITOR_VIEWING_INTERVAL
# Check who is viewing.
currently_viewing = cache.get(key)
# If nobody is viewing or current user is, set current user as viewing
if not currently_viewing or currently_viewing == user_id:
# We want to save it for twice as long as the ping interval,
# just to account for latency and the like.
cache.set(key, user_id, interval * 2)
currently_viewing = user_id
current_name = request.amo_user.name
is_user = 1
else:
current_name = UserProfile.objects.get(pk=currently_viewing).name
return {'current': currently_viewing, 'current_name': current_name,
'is_user': is_user, 'interval_seconds': interval}
@never_cache
@json_view
@addons_reviewer_required
def queue_viewing(request):
if 'addon_ids' not in request.POST:
return {}
viewing = {}
user_id = request.amo_user.id
for addon_id in request.POST['addon_ids'].split(','):
addon_id = addon_id.strip()
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
currently_viewing = cache.get(key)
if currently_viewing and currently_viewing != user_id:
viewing[addon_id] = (UserProfile.objects
.get(id=currently_viewing)
.display_name)
return viewing
@json_view
@addons_reviewer_required
def queue_version_notes(request, addon_id):
addon = get_object_or_404(Addon, pk=addon_id)
version = addon.latest_version
return {'releasenotes': unicode(version.releasenotes),
'approvalnotes': version.approvalnotes}
@addons_reviewer_required
def reviewlog(request):
data = request.GET.copy()
if not data.get('start') and not data.get('end'):
today = date.today()
data['start'] = date(today.year, today.month, 1)
form = forms.ReviewLogForm(data)
approvals = ActivityLog.objects.review_queue()
if not acl.check_unlisted_addons_reviewer(request):
# Display logs related to unlisted add-ons only to senior reviewers.
approvals = approvals.filter(addonlog__addon__is_listed=True)
if form.is_valid():
data = form.cleaned_data
if data['start']:
approvals = approvals.filter(created__gte=data['start'])
if data['end']:
approvals = approvals.filter(created__lt=data['end'])
if data['search']:
term = data['search']
approvals = approvals.filter(
Q(commentlog__comments__icontains=term) |
Q(addonlog__addon__name__localized_string__icontains=term) |
Q(user__display_name__icontains=term) |
Q(user__username__icontains=term)).distinct()
pager = amo.utils.paginate(request, approvals, 50)
ad = {
amo.LOG.APPROVE_VERSION.id: _('was approved'),
amo.LOG.PRELIMINARY_VERSION.id: _('given preliminary review'),
amo.LOG.REJECT_VERSION.id: _('rejected'),
amo.LOG.ESCALATE_VERSION.id: _(
'escalated', 'editors_review_history_nominated_adminreview'),
amo.LOG.REQUEST_INFORMATION.id: _('needs more information'),
amo.LOG.REQUEST_SUPER_REVIEW.id: _('needs super review'),
amo.LOG.COMMENT_VERSION.id: _('commented'),
}
data = context(form=form, pager=pager, ACTION_DICT=ad)
return render(request, 'editors/reviewlog.html', data)
@addons_reviewer_required
@addon_view
def abuse_reports(request, addon):
reports = AbuseReport.objects.filter(addon=addon).order_by('-created')
total = reports.count()
reports = amo.utils.paginate(request, reports)
data = context(addon=addon, reports=reports, total=total)
return render(request, 'editors/abuse_reports.html', data)
@addons_reviewer_required
def leaderboard(request):
return render(request, 'editors/leaderboard.html', context(**{
'scores': ReviewerScore.all_users_by_score(),
}))
@addons_reviewer_required
@addon_view_factory(qs=Addon.with_unlisted.all)
def whiteboard(request, addon):
form = forms.WhiteboardForm(request.POST or None, instance=addon)
if form.is_valid():
addon = form.save()
return redirect('editors.review', addon.pk)
raise PermissionDenied
|
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
|
import json
from contextlib import contextmanager
from django.contrib import admin
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.autocomplete import AutocompleteJsonView
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.http import Http404
from django.test import RequestFactory, override_settings
from django.urls import reverse, reverse_lazy
from .admin import AnswerAdmin, QuestionAdmin
from .models import Answer, Author, Authorship, Book, Question
from .tests import AdminViewBasicTestCase
PAGINATOR_SIZE = AutocompleteJsonView.paginate_by
class AuthorAdmin(admin.ModelAdmin):
ordering = ['id']
search_fields = ['id']
class AuthorshipInline(admin.TabularInline):
model = Authorship
autocomplete_fields = ['author']
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorshipInline]
site = admin.AdminSite(name='autocomplete_admin')
site.register(Question, QuestionAdmin)
site.register(Answer, AnswerAdmin)
site.register(Author, AuthorAdmin)
site.register(Book, BookAdmin)
class AutocompleteJsonViewTests(AdminViewBasicTestCase):
as_view_args = {'model_admin': QuestionAdmin(Question, site)}
factory = RequestFactory()
url = reverse_lazy('autocomplete_admin:admin_views_question_autocomplete')
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
username='user', password='secret',
email='[email protected]', is_staff=True,
)
super().setUpTestData()
def test_success(self):
q = Question.objects.create(question='Is this a question?')
request = self.factory.get(self.url, {'term': 'is'})
request.user = self.superuser
response = AutocompleteJsonView.as_view(**self.as_view_args)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data, {
'results': [{'id': str(q.pk), 'text': q.question}],
'pagination': {'more': False},
})
def test_must_be_logged_in(self):
response = self.client.get(self.url, {'term': ''})
self.assertEqual(response.status_code, 200)
self.client.logout()
response = self.client.get(self.url, {'term': ''})
self.assertEqual(response.status_code, 302)
def test_has_view_or_change_permission_required(self):
"""
Users require the change permission for the related model to the
autocomplete view for it.
"""
request = self.factory.get(self.url, {'term': 'is'})
self.user.is_staff = True
self.user.save()
request.user = self.user
response = AutocompleteJsonView.as_view(**self.as_view_args)(request)
self.assertEqual(response.status_code, 403)
self.assertJSONEqual(response.content.decode('utf-8'), {'error': '403 Forbidden'})
for permission in ('view', 'change'):
with self.subTest(permission=permission):
self.user.user_permissions.clear()
p = Permission.objects.get(
content_type=ContentType.objects.get_for_model(Question),
codename='%s_question' % permission,
)
self.user.user_permissions.add(p)
request.user = User.objects.get(pk=self.user.pk)
response = AutocompleteJsonView.as_view(**self.as_view_args)(request)
self.assertEqual(response.status_code, 200)
def test_search_use_distinct(self):
"""
Searching across model relations use QuerySet.distinct() to avoid
duplicates.
"""
q1 = Question.objects.create(question='question 1')
q2 = Question.objects.create(question='question 2')
q2.related_questions.add(q1)
q3 = Question.objects.create(question='question 3')
q3.related_questions.add(q1)
request = self.factory.get(self.url, {'term': 'question'})
request.user = self.superuser
class DistinctQuestionAdmin(QuestionAdmin):
search_fields = ['related_questions__question', 'question']
model_admin = DistinctQuestionAdmin(Question, site)
response = AutocompleteJsonView.as_view(model_admin=model_admin)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(data['results']), 3)
def test_missing_search_fields(self):
class EmptySearchAdmin(QuestionAdmin):
search_fields = []
model_admin = EmptySearchAdmin(Question, site)
msg = 'EmptySearchAdmin must have search_fields for the autocomplete_view.'
with self.assertRaisesMessage(Http404, msg):
model_admin.autocomplete_view(self.factory.get(self.url))
def test_get_paginator(self):
"""Search results are paginated."""
Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10))
model_admin = QuestionAdmin(Question, site)
model_admin.ordering = ['pk']
# The first page of results.
request = self.factory.get(self.url, {'term': ''})
request.user = self.superuser
response = AutocompleteJsonView.as_view(model_admin=model_admin)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data, {
'results': [{'id': str(q.pk), 'text': q.question} for q in Question.objects.all()[:PAGINATOR_SIZE]],
'pagination': {'more': True},
})
# The second page of results.
request = self.factory.get(self.url, {'term': '', 'page': '2'})
request.user = self.superuser
response = AutocompleteJsonView.as_view(model_admin=model_admin)(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data, {
'results': [{'id': str(q.pk), 'text': q.question} for q in Question.objects.all()[PAGINATOR_SIZE:]],
'pagination': {'more': False},
})
@override_settings(ROOT_URLCONF='admin_views.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_views'] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username='super', password='secret', email='[email protected]',
)
self.admin_login(username='super', password='secret', login_url=reverse('autocomplete_admin:index'))
@contextmanager
def select2_ajax_wait(self, timeout=10):
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as ec
yield
with self.disable_implicit_wait():
try:
loading_element = self.selenium.find_element_by_css_selector(
'li.select2-results__option.loading-results'
)
except NoSuchElementException:
pass
else:
self.wait_until(ec.staleness_of(loading_element), timeout=timeout)
def test_select(self):
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_answer_add'))
elem = self.selenium.find_element_by_css_selector('.select2-selection')
elem.click() # Open the autocomplete dropdown.
results = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(results.is_displayed())
option = self.selenium.find_element_by_css_selector('.select2-results__option')
self.assertEqual(option.text, 'No results found')
elem.click() # Close the autocomplete dropdown.
q1 = Question.objects.create(question='Who am I?')
Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10))
elem.click() # Reopen the dropdown now that some objects exist.
result_container = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
# PAGINATOR_SIZE results and "Loading more results".
self.assertEqual(len(results), PAGINATOR_SIZE + 1)
search = self.selenium.find_element_by_css_selector('.select2-search__field')
# Load next page of results by scrolling to the bottom of the list.
with self.select2_ajax_wait():
for _ in range(len(results)):
search.send_keys(Keys.ARROW_DOWN)
results = result_container.find_elements_by_css_selector('.select2-results__option')
# All objects are now loaded.
self.assertEqual(len(results), PAGINATOR_SIZE + 11)
# Limit the results with the search field.
with self.select2_ajax_wait():
search.send_keys('Who')
# Ajax request is delayed.
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), PAGINATOR_SIZE + 12)
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 1)
# Select the result.
search.send_keys(Keys.RETURN)
select = Select(self.selenium.find_element_by_id('id_question'))
self.assertEqual(select.first_selected_option.get_attribute('value'), str(q1.pk))
def test_select_multiple(self):
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_question_add'))
elem = self.selenium.find_element_by_css_selector('.select2-selection')
elem.click() # Open the autocomplete dropdown.
results = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(results.is_displayed())
option = self.selenium.find_element_by_css_selector('.select2-results__option')
self.assertEqual(option.text, 'No results found')
elem.click() # Close the autocomplete dropdown.
Question.objects.create(question='Who am I?')
Question.objects.bulk_create(Question(question=str(i)) for i in range(PAGINATOR_SIZE + 10))
elem.click() # Reopen the dropdown now that some objects exist.
result_container = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), PAGINATOR_SIZE + 1)
search = self.selenium.find_element_by_css_selector('.select2-search__field')
# Load next page of results by scrolling to the bottom of the list.
with self.select2_ajax_wait():
for _ in range(len(results)):
search.send_keys(Keys.ARROW_DOWN)
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 31)
# Limit the results with the search field.
with self.select2_ajax_wait():
search.send_keys('Who')
# Ajax request is delayed.
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 32)
self.assertTrue(result_container.is_displayed())
results = result_container.find_elements_by_css_selector('.select2-results__option')
self.assertEqual(len(results), 1)
# Select the result.
search.send_keys(Keys.RETURN)
# Reopen the dropdown and add the first result to the selection.
elem.click()
search.send_keys(Keys.ARROW_DOWN)
search.send_keys(Keys.RETURN)
select = Select(self.selenium.find_element_by_id('id_related_questions'))
self.assertEqual(len(select.all_selected_options), 2)
def test_inline_add_another_widgets(self):
def assertNoResults(row):
elem = row.find_element_by_css_selector('.select2-selection')
elem.click() # Open the autocomplete dropdown.
results = self.selenium.find_element_by_css_selector('.select2-results')
self.assertTrue(results.is_displayed())
option = self.selenium.find_element_by_css_selector('.select2-results__option')
self.assertEqual(option.text, 'No results found')
# Autocomplete works in rows present when the page loads.
self.selenium.get(self.live_server_url + reverse('autocomplete_admin:admin_views_book_add'))
rows = self.selenium.find_elements_by_css_selector('.dynamic-authorship_set')
self.assertEqual(len(rows), 3)
assertNoResults(rows[0])
# Autocomplete works in rows added using the "Add another" button.
self.selenium.find_element_by_link_text('Add another Authorship').click()
rows = self.selenium.find_elements_by_css_selector('.dynamic-authorship_set')
self.assertEqual(len(rows), 4)
assertNoResults(rows[-1])
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
Sum, Count,
F, Value, Func,
IntegerField, BooleanField, CharField)
from django.test import TestCase
from django.utils import six
from .models import Author, Book, Store, DepartmentStore, Company, Employee
def cxOracle_513_py3_bug(func):
"""
cx_Oracle versions up to and including 5.1.3 have a bug with respect to
string handling under Python3 (essentially, they treat Python3 strings
as Python2 strings rather than unicode). This makes some tests here
fail under Python 3 -- so we mark them as expected failures.
See https://code.djangoproject.com/ticket/23843, in particular comment 6,
which points to https://bitbucket.org/anthony_tuininga/cx_oracle/issue/6/
"""
from unittest import expectedFailure
from django.db import connection
if connection.vendor == 'oracle' and six.PY3 and connection.Database.version <= '5.1.3':
return expectedFailure(func)
else:
return func
class NonAggregateAnnotationTestCase(TestCase):
fixtures = ["annotations.json"]
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=2)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=2)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=1).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=1)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_513_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_513_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
|
|
# -*- coding: utf-8 -*-
import logging
import itertools
import math
import httplib as http
from modularodm import Q
from flask import request
from framework import utils
from framework import sentry
from framework.auth.core import User
from framework.flask import redirect # VOL-aware redirect
from framework.routing import proxy_url
from framework.exceptions import HTTPError
from framework.auth.forms import SignInForm
from framework.forms import utils as form_utils
from framework.guid.model import GuidStoredObject
from framework.auth.forms import RegistrationForm
from framework.auth.forms import ResetPasswordForm
from framework.auth.forms import ForgotPasswordForm
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from website.models import Guid
from website.models import Node
from website.util import rubeus
from website.project import model
from website.util import web_url_for
from website.util import permissions
from website.project import new_dashboard
from website.settings import ALL_MY_PROJECTS_ID
from website.settings import ALL_MY_REGISTRATIONS_ID
logger = logging.getLogger(__name__)
def _rescale_ratio(auth, nodes):
"""Get scaling denominator for log lists across a sequence of nodes.
:param nodes: Nodes
:return: Max number of logs
"""
if not nodes:
return 0
counts = [
len(node.logs)
for node in nodes
if node.can_view(auth)
]
if counts:
return float(max(counts))
return 0.0
def _render_node(node, auth=None):
"""
:param node:
:return:
"""
perm = None
# NOTE: auth.user may be None if viewing public project while not
# logged in
if auth and auth.user and node.get_permissions(auth.user):
perm_list = node.get_permissions(auth.user)
perm = permissions.reduce_permissions(perm_list)
return {
'title': node.title,
'id': node._primary_key,
'url': node.url,
'api_url': node.api_url,
'primary': node.primary,
'date_modified': utils.iso8601format(node.date_modified),
'category': node.category,
'permissions': perm, # A string, e.g. 'admin', or None
}
def _render_nodes(nodes, auth=None):
"""
:param nodes:
:return:
"""
ret = {
'nodes': [
_render_node(node, auth)
for node in nodes
],
'rescale_ratio': _rescale_ratio(auth, nodes),
}
return ret
@collect_auth
def index(auth):
"""Redirect to dashboard if user is logged in, else show homepage.
"""
if auth.user:
return redirect(web_url_for('dashboard'))
return {}
def find_dashboard(user):
dashboard_folder = user.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if dashboard_folder.count() == 0:
new_dashboard(user)
dashboard_folder = user.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
return dashboard_folder[0]
@must_be_logged_in
def get_dashboard(auth, nid=None, **kwargs):
user = auth.user
if nid is None:
node = find_dashboard(user)
dashboard_projects = [rubeus.to_project_root(node, auth, **kwargs)]
return_value = {'data': dashboard_projects}
elif nid == ALL_MY_PROJECTS_ID:
return_value = {'data': get_all_projects_smart_folder(**kwargs)}
elif nid == ALL_MY_REGISTRATIONS_ID:
return_value = {'data': get_all_registrations_smart_folder(**kwargs)}
else:
node = Node.load(nid)
dashboard_projects = rubeus.to_project_hgrid(node, auth, **kwargs)
return_value = {'data': dashboard_projects}
return_value['timezone'] = user.timezone
return_value['locale'] = user.locale
return return_value
@must_be_logged_in
def get_all_projects_smart_folder(auth, **kwargs):
# TODO: Unit tests
user = auth.user
contributed = user.node__contributed
nodes = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False) &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'eq', None)
).sort('-title')
parents_to_exclude = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False)
)
comps = contributed.find(
Q('is_folder', 'eq', False) &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'nin', parents_to_exclude.get_keys()) &
# is not in the nodes list
Q('_id', 'nin', nodes.get_keys()) &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', False)
)
return_value = [rubeus.to_project_root(node, auth, **kwargs) for node in comps]
return_value.extend([rubeus.to_project_root(node, auth, **kwargs) for node in nodes])
return return_value
@must_be_logged_in
def get_all_registrations_smart_folder(auth, **kwargs):
# TODO: Unit tests
user = auth.user
contributed = user.node__contributed
nodes = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', True) &
Q('is_folder', 'eq', False) &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'eq', None)
).sort('-title')
parents_to_exclude = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', True) &
Q('is_folder', 'eq', False)
)
comps = contributed.find(
Q('is_folder', 'eq', False) &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'nin', parents_to_exclude.get_keys()) &
# is not in the nodes list
Q('_id', 'nin', nodes.get_keys()) &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', True)
)
return_value = [rubeus.to_project_root(comp, auth, **kwargs) for comp in comps]
return_value.extend([rubeus.to_project_root(node, auth, **kwargs) for node in nodes])
return return_value
@must_be_logged_in
def get_dashboard_nodes(auth):
"""Get summary information about the current user's dashboard nodes.
:param-query no_components: Exclude components from response.
NOTE: By default, components will only be shown if the current user
is contributor on a comonent but not its parent project. This query
parameter forces ALL components to be excluded from the request.
:param-query permissions: Filter upon projects for which the current user
has the specified permissions. Examples: 'write', 'admin'
"""
user = auth.user
contributed = user.node__contributed # nodes user contributed to
nodes = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False)
)
# TODO: Store truthy values in a named constant available site-wide
if request.args.get('no_components') not in [True, 'true', 'True', '1', 1]:
comps = contributed.find(
# components only
Q('category', 'ne', 'project') &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'nin', nodes.get_keys()) &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', False)
)
else:
comps = []
nodes = list(nodes) + list(comps)
if request.args.get('permissions'):
perm = request.args['permissions'].strip().lower()
if perm not in permissions.PERMISSIONS:
raise HTTPError(http.BAD_REQUEST, dict(
message_short='Invalid query parameter',
message_long='{0} is not in {1}'.format(perm, permissions.PERMISSIONS)
))
response_nodes = [node for node in nodes if node.has_permission(user, permission=perm)]
else:
response_nodes = nodes
return _render_nodes(response_nodes, auth)
@must_be_logged_in
def dashboard(auth):
user = auth.user
dashboard_folder = find_dashboard(user)
dashboard_id = dashboard_folder._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': dashboard_id,
}
def paginate(items, total, page, size):
start = page * size
paginated_items = itertools.islice(items, start, start + size)
pages = math.ceil(total / float(size))
return paginated_items, pages
@must_be_logged_in
def watched_logs_get(**kwargs):
user = kwargs['auth'].user
try:
page = int(request.args.get('page', 0))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
try:
size = int(request.args.get('size', 10))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "size".'
))
total = sum(1 for x in user.get_recent_log_ids())
paginated_logs, pages = paginate(user.get_recent_log_ids(), total, page, size)
logs = (model.NodeLog.load(id) for id in paginated_logs)
return {
"logs": [serialize_log(log) for log in logs],
"total": total,
"pages": pages,
"page": page
}
def serialize_log(node_log, anonymous=False):
'''Return a dictionary representation of the log.'''
return {
'id': str(node_log._primary_key),
'user': node_log.user.serialize()
if isinstance(node_log.user, User)
else {'fullname': node_log.foreign_user},
'contributors': [node_log._render_log_contributor(c) for c in node_log.params.get("contributors", [])],
'api_key': node_log.api_key.label if node_log.api_key else '',
'action': node_log.action,
'params': node_log.params,
'date': utils.iso8601format(node_log.date),
'node': node_log.node.serialize() if node_log.node else None,
'anonymous': anonymous
}
def reproducibility():
return redirect('/ezcuj/wiki')
def registration_form():
return form_utils.jsonify(RegistrationForm(prefix='register'))
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
def reset_password_form():
return form_utils.jsonify(ResetPasswordForm())
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
return u'/{0}/'.format(url)
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object is a GuidStoredObject descendant. If a model
# was once a descendant but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a redirect_mode attribute or otherwise don't behave as
# expected.
if not isinstance(guid_object.referent, GuidStoredObject):
sentry.log_message(
'Guid `{}` resolved to non-guid object'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http.NOT_FOUND)
url = _build_guid_url(referent.deep_url, suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
|
|
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: Simplified BSD
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used has supervised signal label names. The indivial
file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use utf-8 text files in a scikit-learn classification or clustering
algorithm you will first need to use the `sklearn.features.text`
module to build a feature extraction transformer that suits your
problem.
Similar feature extractors should be build for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename).read() for filename in filenames]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data', 'iris.csv')))
fdescr = open(join(module_path, 'descr', 'iris.rst'))
temp = data_file.next()
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr.read(),
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> digits.data.shape
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
descr = open(join(module_path, 'descr', 'digits.rst')).read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
'target_names', the meaning of the labels, and 'DESCR', the
full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> boston.data.shape
(506, 13)
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data',
'boston_house_prices.csv')))
fdescr = open(join(module_path, 'descr', 'boston_house_prices.rst'))
temp = data_file.next()
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = data_file.next() # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=fdescr.read())
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
|
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT (original implementation module)
Sharable database access and functions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from datetime import datetime
import resources.lib.common as common
import resources.lib.database.db_base_mysql as db_base_mysql
import resources.lib.database.db_base_sqlite as db_base_sqlite
import resources.lib.database.db_utils as db_utils
def get_shareddb_class(use_mysql=False):
# Dynamically sets the inherit class
base_class = db_base_mysql.MySQLDatabase if use_mysql else db_base_sqlite.SQLiteDatabase
class NFSharedDatabase(base_class):
def __init__(self):
if use_mysql:
super(NFSharedDatabase, self).__init__() # pylint: disable=no-value-for-parameter
else:
super(NFSharedDatabase, self).__init__(db_utils.SHARED_DB_FILENAME)
def get_value(self, key, default_value=None, table=db_utils.TABLE_SHARED_APP_CONF, data_type=None): # pylint: disable=useless-super-delegation
return super(NFSharedDatabase, self).get_value(key, default_value, table, data_type)
def get_values(self, key, default_value=None, table=db_utils.TABLE_SHARED_APP_CONF): # pylint: disable=useless-super-delegation
return super(NFSharedDatabase, self).get_values(key, default_value, table)
def set_value(self, key, value, table=db_utils.TABLE_SHARED_APP_CONF): # pylint: disable=useless-super-delegation
super(NFSharedDatabase, self).set_value(key, value, table)
def delete_key(self, key, table=db_utils.TABLE_SHARED_APP_CONF): # pylint: disable=useless-super-delegation
super(NFSharedDatabase, self).delete_key(key, table)
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def set_profile(self, guid, sort_order):
"""Update or Insert a profile"""
# Update or insert approach,
# if there is no updated row then insert new one (no id changes)
if self.is_mysql_database:
query = db_utils.mysql_insert_or_update('profiles', ['Guid'], ['SortOrder'])
self._execute_non_query(query, (guid, sort_order), multi=True)
else:
data = db_utils.sql_filtered_update('profiles',
['SortOrder'],
['Guid'],
[sort_order, guid])
cur = self._execute_query(data[0], data[1])
if cur.rowcount == 0:
data = db_utils.sql_filtered_insert('profiles',
['Guid', 'SortOrder'],
[guid, sort_order])
self._execute_non_query(data[0], data[1])
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def delete_profile(self, guid):
query = 'DELETE FROM profiles WHERE Guid = ?'
self._execute_non_query(query, (guid,))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_movie_filepath(self, movieid, default_value=None):
"""Get movie filepath for given id"""
query = 'SELECT FilePath FROM video_lib_movies WHERE MovieID = ?'
cur = self._execute_query(query, (movieid,))
result = cur.fetchone()
return result[0] if result else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_episode_filepath(self, tvshowid, seasonid, episodeid, default_value=None):
"""Get movie filepath for given id"""
query =\
('SELECT FilePath FROM video_lib_episodes '
'INNER JOIN video_lib_seasons '
'ON video_lib_episodes.SeasonID = video_lib_seasons.SeasonID '
'WHERE video_lib_seasons.TvShowID = ? AND '
'video_lib_seasons.SeasonID = ? AND '
'video_lib_episodes.EpisodeID = ?')
cur = self._execute_query(query, (tvshowid, seasonid, episodeid))
result = cur.fetchone()
return result[0] if result is not None else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_all_episodes_ids_and_filepath_from_tvshow(self, tvshowid):
"""Get all episodes IDs and filepaths for given id"""
cur = self.get_cursor_for_dict_results()
query =\
('SELECT video_lib_episodes.FilePath, video_lib_seasons.TvShowID, '
'video_lib_episodes.SeasonID, video_lib_episodes.EpisodeID '
'FROM video_lib_episodes '
'INNER JOIN video_lib_seasons '
'ON video_lib_episodes.SeasonID = video_lib_seasons.SeasonID '
'WHERE video_lib_seasons.TvShowID = ?')
cur = self._execute_query(query, (tvshowid,), cur)
return cur.fetchall()
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_all_episodes_ids_and_filepath_from_season(self, tvshowid, seasonid):
"""Get all episodes IDs and filepaths for given id"""
cur = self.get_cursor_for_dict_results()
query =\
('SELECT video_lib_episodes.FilePath, video_lib_seasons.TvShowID, '
'video_lib_episodes.SeasonID, video_lib_episodes.EpisodeID '
'FROM video_lib_episodes '
'INNER JOIN video_lib_seasons '
'ON video_lib_episodes.SeasonID = video_lib_seasons.SeasonID '
'WHERE video_lib_seasons.TvShowID = ? AND '
'video_lib_seasons.SeasonID = ?')
cur = self._execute_query(query, (tvshowid, seasonid), cur)
return cur.fetchall()
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_random_episode_filepath_from_tvshow(self, tvshowid, default_value=None):
"""Get random episode filepath of a show of a given id"""
rand_func_name = 'RAND()' if self.is_mysql_database else 'RANDOM()'
query =\
('SELECT FilePath FROM video_lib_episodes '
'INNER JOIN video_lib_seasons '
'ON video_lib_episodes.SeasonID = video_lib_seasons.SeasonID '
'WHERE video_lib_seasons.TvShowID = ? '
'ORDER BY {} LIMIT 1').format(rand_func_name)
cur = self._execute_query(query, (tvshowid,))
result = cur.fetchone()
return result[0] if result is not None else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_random_episode_filepath_from_season(self, tvshowid, seasonid, default_value=None):
"""Get random episode filepath of a show of a given id"""
rand_func_name = 'RAND()' if self.is_mysql_database else 'RANDOM()'
query =\
('SELECT FilePath FROM video_lib_episodes '
'INNER JOIN video_lib_seasons '
'ON video_lib_episodes.SeasonID = video_lib_seasons.SeasonID '
'WHERE video_lib_seasons.TvShowID = ? AND video_lib_seasons.SeasonID = ? '
'ORDER BY {} LIMIT 1').format(rand_func_name)
cur = self._execute_query(query, (tvshowid, seasonid))
result = cur.fetchone()
return result[0] if result is not None else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_all_video_id_list(self):
"""Get all the ids of movies and tvshows contained in the library"""
cur = self.get_cursor_for_list_results()
query = ('SELECT MovieID FROM video_lib_movies '
'UNION '
'SELECT TvShowID FROM video_lib_tvshows')
cur = self._execute_query(query, cursor=cur)
return self.return_rows_as_list(cur)
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_tvshows_id_list(self, enum_vid_prop=None, prop_value=None):
"""
Get all the ids of tvshows contained in the library
:param enum_vid_prop: Optional: use db_utils.VidLibProp
:param prop_value: Optional: value as filter
:return: list of tvshows ids
"""
cur = self.get_cursor_for_list_results()
if enum_vid_prop and prop_value:
query = ('SELECT TvShowID FROM video_lib_tvshows '
'WHERE ' + enum_vid_prop + ' = ?')
cur = self._execute_query(query, (str(prop_value),), cur)
else:
query = 'SELECT TvShowID FROM video_lib_tvshows'
cur = self._execute_query(query, cursor=cur)
return self.return_rows_as_list(cur)
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_movies_id_list(self):
"""Get all the ids of movies contained in the library"""
cur = self.get_cursor_for_list_results()
query = 'SELECT MovieID FROM video_lib_movies'
cur = self._execute_query(query, cursor=cur)
return self.return_rows_as_list(cur)
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def movie_id_exists(self, movieid):
"""Return True if a movie id exists"""
query = 'SELECT EXISTS(SELECT 1 FROM video_lib_movies WHERE MovieID = ?)'
cur = self._execute_query(query, (movieid,))
return bool(cur.fetchone()[0])
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def tvshow_id_exists(self, tvshowid):
"""Return True if a tvshow id exists"""
query = 'SELECT EXISTS(SELECT 1 FROM video_lib_tvshows WHERE TvShowID = ?)'
cur = self._execute_query(query, (tvshowid,))
return bool(cur.fetchone()[0])
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def season_id_exists(self, tvshowid, seasonid):
"""Return True if a tvshow season id exists"""
query =\
('SELECT EXISTS('
'SELECT 1 FROM video_lib_seasons '
'INNER JOIN video_lib_tvshows '
'ON video_lib_seasons.TvShowID = video_lib_tvshows.TvShowID '
'WHERE video_lib_tvshows.TvShowID = ? AND video_lib_seasons.SeasonID = ?)')
cur = self._execute_query(query, (tvshowid, seasonid))
return bool(cur.fetchone()[0])
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def episode_id_exists(self, tvshowid, seasonid, episodeid):
"""Return True if a tvshow episode id exists"""
query =\
('SELECT EXISTS('
'SELECT 1 FROM video_lib_episodes '
'INNER JOIN video_lib_seasons '
'ON video_lib_episodes.SeasonID = video_lib_seasons.SeasonID '
'INNER JOIN video_lib_tvshows '
'ON video_lib_seasons.TvShowID = video_lib_tvshows.TvShowID '
'WHERE video_lib_tvshows.TvShowID = ? AND '
'video_lib_seasons.SeasonID = ? AND '
'video_lib_episodes.EpisodeID = ?)')
cur = self._execute_query(query, (tvshowid, seasonid, episodeid))
return bool(cur.fetchone()[0])
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def set_movie(self, movieid, file_path, nfo_export):
"""Update or insert a movie"""
# Update or insert approach, if there is no updated row then insert new one
if self.is_mysql_database:
query = db_utils.mysql_insert_or_update('video_lib_movies', ['MovieID'],
['FilePath', 'NfoExport'])
self._execute_non_query(query, (movieid, file_path, str(nfo_export)), multi=True)
else:
update_query = ('UPDATE video_lib_movies SET FilePath = ?, NfoExport = ? '
'WHERE MovieID = ?')
cur = self._execute_query(update_query, (file_path, str(nfo_export), movieid))
if cur.rowcount == 0:
insert_query = ('INSERT INTO video_lib_movies (MovieID, FilePath, NfoExport) '
'VALUES (?, ?, ?)')
self._execute_non_query(insert_query, (movieid, file_path, str(nfo_export)))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def set_tvshow(self, tvshowid, nfo_export, exclude_update):
"""Update or insert a tvshow"""
# Update or insert approach, if there is no updated row then insert new one
if self.is_mysql_database:
query = db_utils.mysql_insert_or_update('video_lib_tvshows', ['TvShowID'],
['ExcludeUpdate', 'NfoExport'])
self._execute_non_query(query, (tvshowid, str(exclude_update), str(nfo_export)),
multi=True)
else:
update_query = ('UPDATE video_lib_tvshows SET NfoExport = ?, ExcludeUpdate = ? '
'WHERE TvShowID = ?')
cur = self._execute_query(update_query, (str(nfo_export),
str(exclude_update), tvshowid))
if cur.rowcount == 0:
insert_query = \
('INSERT INTO video_lib_tvshows (TvShowID, NfoExport, ExcludeUpdate) '
'VALUES (?, ?, ?)')
self._execute_non_query(insert_query, (tvshowid,
str(nfo_export),
str(exclude_update)))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def insert_season(self, tvshowid, seasonid):
"""Insert a season if not exists"""
if not self.season_id_exists(tvshowid, seasonid):
insert_query = ('INSERT INTO video_lib_seasons (TvShowID, SeasonID) '
'VALUES (?, ?)')
self._execute_non_query(insert_query, (tvshowid, seasonid))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def insert_episode(self, tvshowid, seasonid, episodeid, file_path):
"""Insert a episode if not exists"""
if not self.episode_id_exists(tvshowid, seasonid, episodeid):
insert_query = ('INSERT INTO video_lib_episodes (SeasonID, EpisodeID, FilePath) '
'VALUES (?, ?, ?)')
self._execute_non_query(insert_query, (seasonid, episodeid, file_path))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def delete_movie(self, movieid):
"""Delete a movie from database"""
query = 'DELETE FROM video_lib_movies WHERE MovieID = ?'
self._execute_query(query, (movieid,))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def delete_tvshow(self, tvshowid):
"""Delete a tvshow from database"""
query = 'DELETE FROM video_lib_tvshows WHERE TvShowID = ?'
self._execute_query(query, (tvshowid,))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def delete_season(self, tvshowid, seasonid):
"""Delete a season from database"""
query = 'DELETE FROM video_lib_seasons WHERE TvShowID = ? AND SeasonID = ?'
self._execute_query(query, (tvshowid, seasonid))
# if there are no other seasons, delete the tvshow
query = 'SELECT EXISTS(SELECT 1 FROM video_lib_seasons WHERE TvShowID = ?)'
cur = self._execute_query(query, (tvshowid,))
if not bool(cur.fetchone()[0]):
self.delete_tvshow(tvshowid)
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def delete_episode(self, tvshowid, seasonid, episodeid):
"""Delete a episode from database"""
query = 'DELETE FROM video_lib_episodes WHERE SeasonID = ? AND EpisodeID = ?'
self._execute_query(query, (seasonid, episodeid))
# if there are no other episodes, delete the season
query = 'SELECT EXISTS(SELECT 1 FROM video_lib_episodes WHERE SeasonID = ?)'
cur = self._execute_query(query, (seasonid,))
if not bool(cur.fetchone()[0]):
self.delete_season(tvshowid, seasonid)
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_tvshow_property(self, tvshowid, enum_vid_prop, default_value=None, data_type=None):
"""
Read the value of the specified property
:param tvshowid: id of tvshow
:param enum_vid_prop: Use a enum value of db_utils.VidLibProp
:param default_value: When key do not exist return this default value
:param data_type: OPTIONAL Used to set data type conversion only when default_value is None
:return: the property value
"""
query = 'SELECT ' + enum_vid_prop + ' FROM video_lib_tvshows WHERE TvShowID = ?'
cur = self._execute_query(query, (tvshowid,))
result = cur.fetchone()
if default_value is not None:
data_type = type(default_value)
elif data_type is None:
data_type = str
return common.convert_from_string(result[0], data_type) \
if result is not None else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def set_tvshow_property(self, tvshowid, enum_vid_prop, value):
update_query = ('UPDATE video_lib_tvshows '
'SET ' + enum_vid_prop + ' = ? WHERE TvShowID = ?')
value = common.convert_to_string(value)
self._execute_query(update_query, (value, tvshowid))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_watched_status(self, profile_guid, videoid, default_value=None, data_type=None):
"""Get override watched status value of a given id stored to current profile"""
query = 'SELECT Value FROM watched_status_override WHERE ProfileGuid = ? AND VideoID = ?'
cur = self._execute_query(query, (profile_guid, videoid))
result = cur.fetchone()
if default_value is not None:
data_type = type(default_value)
elif data_type is None:
data_type = str
return common.convert_from_string(result[0], data_type) \
if result is not None else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def set_watched_status(self, profile_guid, videoid, value):
"""Update or insert the watched status override value to current profile"""
# Update or insert approach, if there is no updated row then insert new one
value = common.convert_to_string(value)
if self.is_mysql_database:
query = db_utils.mysql_insert_or_update('watched_status_override',
['ProfileGuid', 'VideoID'],
['Value'])
self._execute_non_query(query, (profile_guid, videoid, value),
multi=True)
else:
update_query = ('UPDATE watched_status_override '
'SET Value = ? '
'WHERE ProfileGuid = ? AND VideoID = ?')
cur = self._execute_query(update_query, (value, profile_guid, videoid))
if cur.rowcount == 0:
insert_query = ('INSERT INTO watched_status_override '
'(ProfileGuid, VideoID, Value) '
'VALUES (?, ?, ?)')
self._execute_non_query(insert_query, (profile_guid, videoid, value))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def delete_watched_status(self, profile_guid, videoid):
"""Delete a watched status override from database"""
query = 'DELETE FROM watched_status_override WHERE ProfileGuid = ? AND VideoID = ?'
self._execute_query(query, (profile_guid, videoid))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def get_stream_continuity(self, profile_guid, videoid, default_value=None, data_type=None):
"""Get stream continuity value of a given id stored to current profile"""
query = 'SELECT Value FROM stream_continuity WHERE ProfileGuid = ? AND VideoID = ?'
cur = self._execute_query(query, (profile_guid, videoid))
result = cur.fetchone()
if default_value is not None:
data_type = type(default_value)
elif data_type is None:
data_type = str
return common.convert_from_string(result[0], data_type) \
if result is not None else default_value
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def set_stream_continuity(self, profile_guid, videoid, value):
"""Update or insert a stream continuity value to current profile"""
# Update or insert approach, if there is no updated row then insert new one
value = common.convert_to_string(value)
date_last_modified = common.convert_to_string(datetime.now())
if self.is_mysql_database:
query = db_utils.mysql_insert_or_update('stream_continuity',
['ProfileGuid', 'VideoID'],
['Value', 'DateLastModified'])
self._execute_non_query(query, (profile_guid, videoid, value, date_last_modified),
multi=True)
else:
update_query = ('UPDATE stream_continuity '
'SET Value = ?, DateLastModified = ? '
'WHERE ProfileGuid = ? AND VideoID = ?')
cur = self._execute_query(update_query, (value, date_last_modified,
profile_guid, videoid))
if cur.rowcount == 0:
insert_query = ('INSERT INTO stream_continuity '
'(ProfileGuid, VideoID, Value, DateLastModified) '
'VALUES (?, ?, ?, ?)')
self._execute_non_query(insert_query, (profile_guid, videoid,
value, date_last_modified))
@db_base_mysql.handle_connection
@db_base_sqlite.handle_connection
def purge_library(self):
"""Delete all records from library tables"""
query = 'DELETE FROM video_lib_movies'
self._execute_non_query(query)
query = 'DELETE FROM video_lib_episodes'
self._execute_non_query(query)
query = 'DELETE FROM video_lib_seasons'
self._execute_non_query(query)
query = 'DELETE FROM video_lib_tvshows'
self._execute_non_query(query)
return NFSharedDatabase
|
|
"""
Various bayesian regression
"""
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from scipy.linalg import pinvh
from .base import LinearModel, _rescale_data
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300. Should be greater than
or equal to 1.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
alpha_init : float
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : boolean, optional
If True, compute the log marginal likelihood at each iteration of the
optimization. Default is False.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features,)
Coefficients of the regression model (mean of distribution).
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
Estimated variance-covariance matrix of the weights.
scores_ : array, shape = (n_iter_ + 1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, alpha_init=None,
lambda_init=None, compute_score=False, fit_intercept=True,
normalize=False, copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : returns an instance of self.
"""
if self.n_iter < 1:
raise ValueError('n_iter should be greater than or equal to 1.'
' Got {!r}.'.format(self.n_iter))
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1. / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding rmse
coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance is given by 1/alpha_ * scaled_sigma_
scaled_sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
def _update_coef_(self, X, y, n_samples, n_features, XT_y, U, Vh,
eigen_vals_, alpha_, lambda_):
"""Update posterior mean and compute corresponding rmse.
Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
+ np.dot(X.T, X))^-1
"""
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
return coef_, rmse_
def _log_marginal_likelihood(self, n_samples, n_features, eigen_vals,
alpha_, lambda_, coef, rmse):
"""Log marginal likelihood."""
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
# compute the log of the determinant of the posterior covariance.
# posterior covariance is given by
# sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
if n_samples > n_features:
logdet_sigma = - np.sum(np.log(lambda_ + alpha_ * eigen_vals))
else:
logdet_sigma = np.full(n_features, lambda_,
dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals
logdet_sigma = - np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse -
lambda_ * np.sum(coef ** 2) +
logdet_sigma -
n_samples * log(2 * np.pi))
return score
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
ARDRegression()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_ard.py
<sphx_glr_auto_examples_linear_model_plot_ard.py>`.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers). Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True,
ensure_min_samples=2)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = 1. / (np.var(y) + eps)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Compute sigma and mu (using Woodbury matrix identity)
def update_sigma(X, alpha_, lambda_, keep_lambda, n_samples):
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
return sigma_
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
return coef_
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda, n_samples)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
# update sigma and mu using updated parameters from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda, n_samples)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
|
|
import time
import logging
import torch
import numpy as np
from eight_mile.progress import create_progress_bar
from eight_mile.utils import listify
from baseline.utils import get_model_file, get_metric_cmp, convert_seq2seq_golds, convert_seq2seq_preds
from baseline.train import Trainer, create_trainer, register_trainer, register_training_func
from eight_mile.pytorch.optz import OptimizerManager
from eight_mile.bleu import bleu
from baseline.model import create_model_for
from torch.utils.data import DataLoader
logger = logging.getLogger('baseline')
@register_trainer(task='seq2seq', name='default')
class Seq2SeqTrainerPyTorch(Trainer):
def __init__(self, model, **kwargs):
super().__init__()
if type(model) is dict:
checkpoint = kwargs.get('checkpoint')
if checkpoint:
model['checkpoint'] = checkpoint
model = create_model_for('seq2seq', **model)
self.clip = float(kwargs.get('clip', 5))
self.model = model
self.optimizer = OptimizerManager(self.model, **kwargs)
self._input = model.make_input
self._predict = model.predict
self.tgt_rlut = kwargs['tgt_rlut']
self.gpus = kwargs.get('gpus', 1)
self.bleu_n_grams = int(kwargs.get("bleu_n_grams", 4))
self.label_smoothing = kwargs.get("label_smoothing")
if self.gpus > 0:
self.crit = model.create_loss(label_smooth=self.label_smoothing).cuda()
if self.gpus > 1:
self.model = torch.nn.DataParallel(model).cuda()
else:
self.model.cuda()
else:
logger.warning("Requested training on CPU. This will be slow.")
self.crit = model.create_loss()
self.nsteps = kwargs.get('nsteps', 500)
@staticmethod
def _num_toks(tgt_lens):
return torch.sum(tgt_lens).item()
@staticmethod
def _acc(preds, golds):
"""Calculate the accuracy of exact matching between preds and golds. This metric is particularly useful when
using Seq2SeqModel for prediction."""
total = len(preds)
correct = 0
for pred, gold in zip(preds, golds):
if pred == gold[0]:
correct += 1
return float(correct)/total
def save(self, model_file):
self._get_pytorch_model().save(model_file)
def _get_pytorch_model(self):
return self.model.module if self.gpus > 1 else self.model
def calc_metrics(self, agg, norm):
metrics = super().calc_metrics(agg, norm)
metrics['perplexity'] = np.exp(metrics['avg_loss'])
return metrics
def test(self, vs, reporting_fns, phase, **kwargs):
if phase == 'Test':
return self._evaluate(vs, reporting_fns, **kwargs)
self.model.eval()
total_loss = total_toks = 0
steps = len(vs)
self.valid_epochs += 1
preds = []
golds = []
start = time.perf_counter()
pg = create_progress_bar(steps)
for batch_dict in pg(vs):
input_ = self._input(batch_dict)
tgt = input_['tgt']
tgt_lens = input_['tgt_len']
pred = self.model(input_)
loss = self.crit(pred, tgt)
toks = self._num_toks(tgt_lens)
total_loss += loss.item() * toks
total_toks += toks
greedy_preds = [p[0] for p in self._predict(input_, beam=1, make_input=False)[0]]
preds.extend(convert_seq2seq_preds(greedy_preds, self.tgt_rlut))
golds.extend(convert_seq2seq_golds(tgt.cpu().numpy(), tgt_lens, self.tgt_rlut))
metrics = self.calc_metrics(total_loss, total_toks)
metrics['bleu'] = bleu(preds, golds, self.bleu_n_grams)[0]
metrics['acc'] = self._acc(preds, golds)
self.report(
self.valid_epochs, metrics, start,
phase, 'EPOCH', reporting_fns
)
return metrics
def _evaluate(self, es, reporting_fns, **kwargs):
self.model.eval()
pg = create_progress_bar(len(es))
preds = []
golds = []
start = time.perf_counter()
for batch_dict in pg(es):
tgt = batch_dict['tgt']
tgt_lens = batch_dict['tgt_lengths']
pred = [p[0] for p in self._predict(batch_dict, numpy_to_tensor=False, **kwargs)[0]]
preds.extend(convert_seq2seq_preds(pred, self.tgt_rlut))
golds.extend(convert_seq2seq_golds(tgt, tgt_lens, self.tgt_rlut))
metrics = {'bleu': bleu(preds, golds, self.bleu_n_grams)[0]}
metrics['acc'] = self._acc(preds, golds)
self.report(
0, metrics, start, 'Test', 'EPOCH', reporting_fns
)
return metrics
def train(self, ts, reporting_fns):
self.model.train()
epoch_loss = 0
epoch_toks = 0
start = time.perf_counter()
self.nstep_start = start
for batch_dict in ts:
self.optimizer.zero_grad()
input_ = self._input(batch_dict)
tgt = input_['tgt']
pred = self.model(input_)
loss = self.crit(pred, tgt)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
tgt_lens = batch_dict['tgt_lengths']
tok_count = self._num_toks(tgt_lens)
reporting_loss = loss.item() * tok_count
epoch_loss += reporting_loss
epoch_toks += tok_count
self.nstep_agg += reporting_loss
self.nstep_div += tok_count
if (self.optimizer.global_step + 1) % self.nsteps == 0:
metrics = self.calc_metrics(self.nstep_agg, self.nstep_div)
metrics['lr'] = self.optimizer.current_lr
self.report(
self.optimizer.global_step + 1, metrics, self.nstep_start,
'Train', 'STEP', reporting_fns, self.nsteps
)
self.reset_nstep()
metrics = self.calc_metrics(epoch_loss, epoch_toks)
metrics['lr'] = self.optimizer.current_lr
self.train_epochs += 1
self.report(
self.train_epochs, metrics, start,
'Train', 'EPOCH', reporting_fns
)
return metrics
@register_training_func('seq2seq')
def fit(model_params, ts, vs, es=None, **kwargs):
do_early_stopping = bool(kwargs.get('do_early_stopping', True))
epochs = int(kwargs.get('epochs', 20))
model_file = get_model_file('seq2seq', 'pytorch', kwargs.get('basedir'))
num_loader_workers = int(kwargs.get('num_loader_workers', 0))
pin_memory = bool(kwargs.get('pin_memory', True))
if not isinstance(ts, DataLoader):
ts = DataLoader(ts, num_workers=num_loader_workers, batch_size=None, pin_memory=pin_memory)
if not isinstance(vs, DataLoader):
vs = DataLoader(vs, batch_size=None, pin_memory=pin_memory)
if es and not isinstance(es, DataLoader):
es = DataLoader(es, batch_size=None, pin_memory=pin_memory)
best_metric = 0
if do_early_stopping:
early_stopping_metric = kwargs.get('early_stopping_metric', 'perplexity')
early_stopping_cmp, best_metric = get_metric_cmp(early_stopping_metric, kwargs.get('early_stopping_cmp'))
patience = kwargs.get('patience', epochs)
logger.info('Doing early stopping on [%s] with patience [%d]', early_stopping_metric, patience)
reporting_fns = listify(kwargs.get('reporting', []))
logger.info('reporting %s', reporting_fns)
after_train_fn = kwargs.get('after_train_fn', None)
trainer = create_trainer(model_params, **kwargs)
last_improved = 0
for epoch in range(epochs):
trainer.train(ts, reporting_fns)
if after_train_fn is not None:
after_train_fn(trainer.model)
test_metrics = trainer.test(vs, reporting_fns, phase='Valid')
if do_early_stopping is False:
trainer.save(model_file)
elif early_stopping_cmp(test_metrics[early_stopping_metric], best_metric):
last_improved = epoch
best_metric = test_metrics[early_stopping_metric]
logger.info('New best %.3f', best_metric)
trainer.save(model_file)
elif (epoch - last_improved) > patience:
logger.info('Stopping due to persistent failures to improve')
break
if do_early_stopping is True:
logger.info('Best performance on %s: %.3f at epoch %d', early_stopping_metric, best_metric, last_improved)
if es is not None:
model = torch.load(model_file)
trainer = create_trainer(model, **kwargs)
test_metrics = trainer.test(es, reporting_fns, phase='Test')
return test_metrics
|
|
import datetime
import logging
from pyon.agent.simple_agent import SimpleResourceAgent
from pyon.core.exception import Unauthorized, NotFound
from pyon.public import log
from pyon.net.endpoint import Publisher
from interface.objects import AgentCommand
from ion.agents.cei.util import looping_call
try:
from eeagent.core import EEAgentCore
from eeagent.beatit import make_beat_msg
from eeagent.execute import get_exe_factory
from eeagent.eeagent_exceptions import EEAgentUnauthorizedException
from pidantic.pidantic_exceptions import PIDanticExecutionException
except ImportError:
EEAgentCore = None # noqa
"""
@package ion.agents.cei.execution_engine_agent
@file ion/agents/cei/execution_engine_agent.py
@author Patrick Armstrong
@brief Pyon port of EEAgent
"""
DEFAULT_HEARTBEAT = 5
class ExecutionEngineAgent(SimpleResourceAgent):
"""Agent to manage processes on a worker
"""
def __init__(self):
log.debug("ExecutionEngineAgent init")
SimpleResourceAgent.__init__(self)
def on_init(self):
if not EEAgentCore:
msg = "EEAgentCore isn't available. Use autolaunch.cfg buildout"
log.error(msg)
self.heartbeat_thread = None
return
log.debug("ExecutionEngineAgent Pyon on_init")
launch_type_name = self.CFG.eeagent.launch_type.name
if not launch_type_name:
# TODO: Fail fast here?
log.error("No launch_type.name specified")
self._factory = get_exe_factory(
launch_type_name, self.CFG, pyon_container=self.container, log=log)
# TODO: Allow other core class?
self.core = EEAgentCore(self.CFG, self._factory, log)
interval = float(self.CFG.eeagent.get('heartbeat', DEFAULT_HEARTBEAT))
if interval > 0:
self.heartbeater = HeartBeater(
self.CFG, self._factory, self.resource_id, self, log=log)
self.heartbeater.poll()
self.heartbeat_thread, self._heartbeat_thread_event = looping_call(0.1, self.heartbeater.poll)
else:
self.heartbeat_thread = None
self._heartbeat_thread_event = None
def on_quit(self):
if self._heartbeat_thread_event is not None:
self._heartbeat_thread_event.set()
self.heartbeat_thread.join()
self.heartbeat_thread.kill() # just in case
self._factory.terminate()
def rcmd_launch_process(self, u_pid, round, run_type, parameters):
try:
self.core.launch_process(u_pid, round, run_type, parameters)
except EEAgentUnauthorizedException, e:
raise Unauthorized(e.message)
except PIDanticExecutionException, e:
raise NotFound(e.message)
def rcmd_terminate_process(self, u_pid, round):
self.core.terminate_process(u_pid, round)
def rcmd_restart_process(self, u_pid, round):
self.core.restart_process(u_pid, round)
def rcmd_cleanup_process(self, u_pid, round):
self.core.cleanup(u_pid, round)
def rcmd_dump_state(self):
return make_beat_msg(self.core._process_managers_map, self.CFG)
class HeartBeater(object):
def __init__(self, CFG, factory, process_id, process, log=logging):
self._log = log
self._log.log(logging.DEBUG, "Starting the heartbeat thread")
self._CFG = CFG
self._res = None
self._interval = float(CFG.eeagent.heartbeat)
self._res = None
self._done = False
self._started = False
self._factory = factory
self.process = process
self.process_id = process_id
self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')
self._publisher = Publisher(to_name=self._pd_name)
self._factory.set_state_change_callback(
self._state_change_callback, None)
self._first_beat()
def _first_beat(self):
self._beat_time = datetime.datetime.now()
def _next_beat(self, now):
self._beat_time = now + datetime.timedelta(seconds=self._interval)
def _state_change_callback(self, user_arg):
# on state change set the beat time to now
self._beat_time = datetime.datetime.now()
@property
def _eea_started(self):
"""_eea_started
We must ensure that the eea is listening before heartbeating to the PD.
If the eea isn't listening, the PD's reply will be lost.
So we must ensure that the Pyon process's listeners are created, and are ready
"""
if self._started:
return True
if len(self.process._process.listeners) > 0 and all(self.process._process.heartbeat()):
self._log.debug(
"eeagent heartbeat started because len(self.process._process.listeners) > 0 (%s) "
"and all(self.process._process.heartbeat()) == True (%s)" % (
len(self.process._process.listeners), str(self.process._process.heartbeat())))
self._started = True
return True
else:
return False
def poll(self):
if not self._eea_started:
return
now = datetime.datetime.now()
if now > self._beat_time:
self._next_beat(now)
self.beat()
def beat(self):
try:
beat = make_beat_msg(self._factory, self._CFG)
message = dict(
beat=beat, eeagent_id=self.process_id,
resource_id=self._CFG.agent.resource_id)
if self._log.isEnabledFor(logging.DEBUG):
processes = beat.get('processes')
if processes is not None:
processes_str = "processes=%d" % len(processes)
else:
processes_str = ""
self._log.debug("Sending heartbeat to %s %s",
self._pd_name, processes_str)
self._publisher.publish(message)
except Exception:
self._log.exception("beat failed")
class ExecutionEngineAgentClient(object):
def __init__(self, agent_client, timeout=30):
self.client = agent_client
self.timeout = timeout
def launch_process(self, u_pid, round, run_type, parameters):
args = [u_pid, round, run_type, parameters]
cmd = AgentCommand(command='launch_process', args=args)
return self.client.execute(cmd, timeout=self.timeout)
def terminate_process(self, u_pid, round):
args = [u_pid, round]
cmd = AgentCommand(command='terminate_process', args=args)
return self.client.execute(cmd, timeout=self.timeout)
def restart_process(self, u_pid, round):
args = [u_pid, round]
cmd = AgentCommand(command='restart_process', args=args)
return self.client.execute(cmd, timeout=self.timeout)
def cleanup_process(self, u_pid, round):
args = [u_pid, round]
cmd = AgentCommand(command='cleanup_process', args=args)
return self.client.execute(cmd, timeout=self.timeout)
def dump_state(self):
cmd = AgentCommand(command='dump_state', args=[])
return self.client.execute(cmd, timeout=self.timeout)
|
|
"""SCons.Script.SConscript
This module defines the Python API provided to SConscript and SConstruct
files.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Environment
import SCons.Errors
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Platform
import SCons.SConf
import SCons.Script.Main
import SCons.Tool
import SCons.Util
import collections
import os
import os.path
import re
import sys
import traceback
# The following variables used to live in this module. Some
# SConscript files out there may have referred to them directly as
# SCons.Script.SConscript.*. This is now supported by some special
# handling towards the bottom of the SConscript.__init__.py module.
#Arguments = {}
#ArgList = []
#BuildTargets = TargetList()
#CommandLineTargets = []
#DefaultTargets = []
class SConscriptReturn(Exception):
pass
launch_dir = os.path.abspath(os.curdir)
GlobalDict = None
# global exports set by Export():
global_exports = {}
# chdir flag
sconscript_chdir = 1
def get_calling_namespaces():
"""Return the locals and globals for the function that called
into this module in the current call stack."""
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
# Find the first frame that *isn't* from this file. This means
# that we expect all of the SCons frames that implement an Export()
# or SConscript() call to be in this file, so that we can identify
# the first non-Script.SConscript frame as the user's local calling
# environment, and the locals and globals dictionaries from that
# frame as the calling namespaces. See the comment below preceding
# the DefaultEnvironmentCall block for even more explanation.
while frame.f_globals.get("__name__") == __name__:
frame = frame.f_back
return frame.f_locals, frame.f_globals
def compute_exports(exports):
"""Compute a dictionary of exports given one of the parameters
to the Export() function or the exports argument to SConscript()."""
loc, glob = get_calling_namespaces()
retval = {}
try:
for export in exports:
if SCons.Util.is_Dict(export):
retval.update(export)
else:
try:
retval[export] = loc[export]
except KeyError:
retval[export] = glob[export]
except KeyError, x:
raise SCons.Errors.UserError("Export of non-existent variable '%s'"%x)
return retval
class Frame(object):
"""A frame on the SConstruct/SConscript call stack"""
def __init__(self, fs, exports, sconscript):
self.globals = BuildDefaultGlobals()
self.retval = None
self.prev_dir = fs.getcwd()
self.exports = compute_exports(exports) # exports from the calling SConscript
# make sure the sconscript attr is a Node.
if isinstance(sconscript, SCons.Node.Node):
self.sconscript = sconscript
elif sconscript == '-':
self.sconscript = None
else:
self.sconscript = fs.File(str(sconscript))
# the SConstruct/SConscript call stack:
call_stack = []
# For documentation on the methods in this file, see the scons man-page
def Return(*vars, **kw):
retval = []
try:
fvars = SCons.Util.flatten(vars)
for var in fvars:
for v in var.split():
retval.append(call_stack[-1].globals[v])
except KeyError, x:
raise SCons.Errors.UserError("Return of non-existent variable '%s'"%x)
if len(retval) == 1:
call_stack[-1].retval = retval[0]
else:
call_stack[-1].retval = tuple(retval)
stop = kw.get('stop', True)
if stop:
raise SConscriptReturn
stack_bottom = '% Stack boTTom %' # hard to define a variable w/this name :)
def _SConscript(fs, *files, **kw):
top = fs.Top
sd = fs.SConstruct_dir.rdir()
exports = kw.get('exports', [])
# evaluate each SConscript file
results = []
for fn in files:
call_stack.append(Frame(fs, exports, fn))
old_sys_path = sys.path
try:
SCons.Script.sconscript_reading = SCons.Script.sconscript_reading + 1
if fn == "-":
exec sys.stdin in call_stack[-1].globals
else:
if isinstance(fn, SCons.Node.Node):
f = fn
else:
f = fs.File(str(fn))
_file_ = None
# Change directory to the top of the source
# tree to make sure the os's cwd and the cwd of
# fs match so we can open the SConscript.
fs.chdir(top, change_os_dir=1)
if f.rexists():
actual = f.rfile()
_file_ = open(actual.get_abspath(), "r")
elif f.srcnode().rexists():
actual = f.srcnode().rfile()
_file_ = open(actual.get_abspath(), "r")
elif f.has_src_builder():
# The SConscript file apparently exists in a source
# code management system. Build it, but then clear
# the builder so that it doesn't get built *again*
# during the actual build phase.
f.build()
f.built()
f.builder_set(None)
if f.exists():
_file_ = open(f.get_abspath(), "r")
if _file_:
# Chdir to the SConscript directory. Use a path
# name relative to the SConstruct file so that if
# we're using the -f option, we're essentially
# creating a parallel SConscript directory structure
# in our local directory tree.
#
# XXX This is broken for multiple-repository cases
# where the SConstruct and SConscript files might be
# in different Repositories. For now, cross that
# bridge when someone comes to it.
try:
src_dir = kw['src_dir']
except KeyError:
ldir = fs.Dir(f.dir.get_path(sd))
else:
ldir = fs.Dir(src_dir)
if not ldir.is_under(f.dir):
# They specified a source directory, but
# it's above the SConscript directory.
# Do the sensible thing and just use the
# SConcript directory.
ldir = fs.Dir(f.dir.get_path(sd))
try:
fs.chdir(ldir, change_os_dir=sconscript_chdir)
except OSError:
# There was no local directory, so we should be
# able to chdir to the Repository directory.
# Note that we do this directly, not through
# fs.chdir(), because we still need to
# interpret the stuff within the SConscript file
# relative to where we are logically.
fs.chdir(ldir, change_os_dir=0)
os.chdir(actual.dir.get_abspath())
# Append the SConscript directory to the beginning
# of sys.path so Python modules in the SConscript
# directory can be easily imported.
sys.path = [ f.dir.get_abspath() ] + sys.path
# This is the magic line that actually reads up
# and executes the stuff in the SConscript file.
# The locals for this frame contain the special
# bottom-of-the-stack marker so that any
# exceptions that occur when processing this
# SConscript can base the printed frames at this
# level and not show SCons internals as well.
call_stack[-1].globals.update({stack_bottom:1})
old_file = call_stack[-1].globals.get('__file__')
try:
del call_stack[-1].globals['__file__']
except KeyError:
pass
try:
try:
exec _file_ in call_stack[-1].globals
except SConscriptReturn:
pass
finally:
if old_file is not None:
call_stack[-1].globals.update({__file__:old_file})
else:
SCons.Warnings.warn(SCons.Warnings.MissingSConscriptWarning,
"Ignoring missing SConscript '%s'" % f.path)
finally:
SCons.Script.sconscript_reading = SCons.Script.sconscript_reading - 1
sys.path = old_sys_path
frame = call_stack.pop()
try:
fs.chdir(frame.prev_dir, change_os_dir=sconscript_chdir)
except OSError:
# There was no local directory, so chdir to the
# Repository directory. Like above, we do this
# directly.
fs.chdir(frame.prev_dir, change_os_dir=0)
rdir = frame.prev_dir.rdir()
rdir._create() # Make sure there's a directory there.
try:
os.chdir(rdir.get_abspath())
except OSError, e:
# We still couldn't chdir there, so raise the error,
# but only if actions are being executed.
#
# If the -n option was used, the directory would *not*
# have been created and we should just carry on and
# let things muddle through. This isn't guaranteed
# to work if the SConscript files are reading things
# from disk (for example), but it should work well
# enough for most configurations.
if SCons.Action.execute_actions:
raise e
results.append(frame.retval)
# if we only have one script, don't return a tuple
if len(results) == 1:
return results[0]
else:
return tuple(results)
def SConscript_exception(file=sys.stderr):
"""Print an exception stack trace just for the SConscript file(s).
This will show users who have Python errors where the problem is,
without cluttering the output with all of the internal calls leading
up to where we exec the SConscript."""
exc_type, exc_value, exc_tb = sys.exc_info()
tb = exc_tb
while tb and stack_bottom not in tb.tb_frame.f_locals:
tb = tb.tb_next
if not tb:
# We did not find our exec statement, so this was actually a bug
# in SCons itself. Show the whole stack.
tb = exc_tb
stack = traceback.extract_tb(tb)
try:
type = exc_type.__name__
except AttributeError:
type = str(exc_type)
if type[:11] == "exceptions.":
type = type[11:]
file.write('%s: %s:\n' % (type, exc_value))
for fname, line, func, text in stack:
file.write(' File "%s", line %d:\n' % (fname, line))
file.write(' %s\n' % text)
def annotate(node):
"""Annotate a node with the stack frame describing the
SConscript file and line number that created it."""
tb = sys.exc_info()[2]
while tb and stack_bottom not in tb.tb_frame.f_locals:
tb = tb.tb_next
if not tb:
# We did not find any exec of an SConscript file: what?!
raise SCons.Errors.InternalError("could not find SConscript stack frame")
node.creator = traceback.extract_stack(tb)[0]
# The following line would cause each Node to be annotated using the
# above function. Unfortunately, this is a *huge* performance hit, so
# leave this disabled until we find a more efficient mechanism.
#SCons.Node.Annotate = annotate
class SConsEnvironment(SCons.Environment.Base):
"""An Environment subclass that contains all of the methods that
are particular to the wrapper SCons interface and which aren't
(or shouldn't be) part of the build engine itself.
Note that not all of the methods of this class have corresponding
global functions, there are some private methods.
"""
#
# Private methods of an SConsEnvironment.
#
def _exceeds_version(self, major, minor, v_major, v_minor):
"""Return 1 if 'major' and 'minor' are greater than the version
in 'v_major' and 'v_minor', and 0 otherwise."""
return (major > v_major or (major == v_major and minor > v_minor))
def _get_major_minor_revision(self, version_string):
"""Split a version string into major, minor and (optionally)
revision parts.
This is complicated by the fact that a version string can be
something like 3.2b1."""
version = version_string.split(' ')[0].split('.')
v_major = int(version[0])
v_minor = int(re.match('\d+', version[1]).group())
if len(version) >= 3:
v_revision = int(re.match('\d+', version[2]).group())
else:
v_revision = 0
return v_major, v_minor, v_revision
def _get_SConscript_filenames(self, ls, kw):
"""
Convert the parameters passed to SConscript() calls into a list
of files and export variables. If the parameters are invalid,
throws SCons.Errors.UserError. Returns a tuple (l, e) where l
is a list of SConscript filenames and e is a list of exports.
"""
exports = []
if len(ls) == 0:
try:
dirs = kw["dirs"]
except KeyError:
raise SCons.Errors.UserError("Invalid SConscript usage - no parameters")
if not SCons.Util.is_List(dirs):
dirs = [ dirs ]
dirs = list(map(str, dirs))
name = kw.get('name', 'SConscript')
files = [os.path.join(n, name) for n in dirs]
elif len(ls) == 1:
files = ls[0]
elif len(ls) == 2:
files = ls[0]
exports = self.Split(ls[1])
else:
raise SCons.Errors.UserError("Invalid SConscript() usage - too many arguments")
if not SCons.Util.is_List(files):
files = [ files ]
if kw.get('exports'):
exports.extend(self.Split(kw['exports']))
variant_dir = kw.get('variant_dir') or kw.get('build_dir')
if variant_dir:
if len(files) != 1:
raise SCons.Errors.UserError("Invalid SConscript() usage - can only specify one SConscript with a variant_dir")
duplicate = kw.get('duplicate', 1)
src_dir = kw.get('src_dir')
if not src_dir:
src_dir, fname = os.path.split(str(files[0]))
files = [os.path.join(str(variant_dir), fname)]
else:
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.fs.Dir(src_dir)
fn = files[0]
if not isinstance(fn, SCons.Node.Node):
fn = self.fs.File(fn)
if fn.is_under(src_dir):
# Get path relative to the source directory.
fname = fn.get_path(src_dir)
files = [os.path.join(str(variant_dir), fname)]
else:
files = [fn.abspath]
kw['src_dir'] = variant_dir
self.fs.VariantDir(variant_dir, src_dir, duplicate)
return (files, exports)
#
# Public methods of an SConsEnvironment. These get
# entry points in the global name space so they can be called
# as global functions.
#
def Configure(self, *args, **kw):
if not SCons.Script.sconscript_reading:
raise SCons.Errors.UserError("Calling Configure from Builders is not supported.")
kw['_depth'] = kw.get('_depth', 0) + 1
return SCons.Environment.Base.Configure(self, *args, **kw)
def Default(self, *targets):
SCons.Script._Set_Default_Targets(self, targets)
def EnsureSConsVersion(self, major, minor, revision=0):
"""Exit abnormally if the SCons version is not late enough."""
# split string to avoid replacement during build process
if SCons.__version__ == '__' + 'VERSION__':
SCons.Warnings.warn(SCons.Warnings.DevelopmentVersionWarning,
"EnsureSConsVersion is ignored for development version")
return
scons_ver = self._get_major_minor_revision(SCons.__version__)
if scons_ver < (major, minor, revision):
if revision:
scons_ver_string = '%d.%d.%d' % (major, minor, revision)
else:
scons_ver_string = '%d.%d' % (major, minor)
print "SCons %s or greater required, but you have SCons %s" % \
(scons_ver_string, SCons.__version__)
sys.exit(2)
def EnsurePythonVersion(self, major, minor):
"""Exit abnormally if the Python version is not late enough."""
if sys.version_info < (major, minor):
v = sys.version.split()[0]
print "Python %d.%d or greater required, but you have Python %s" %(major,minor,v)
sys.exit(2)
def Exit(self, value=0):
sys.exit(value)
def Export(self, *vars, **kw):
for var in vars:
global_exports.update(compute_exports(self.Split(var)))
global_exports.update(kw)
def GetLaunchDir(self):
global launch_dir
return launch_dir
def GetOption(self, name):
name = self.subst(name)
return SCons.Script.Main.GetOption(name)
def Help(self, text):
text = self.subst(text, raw=1)
SCons.Script.HelpFunction(text)
def Import(self, *vars):
try:
frame = call_stack[-1]
globals = frame.globals
exports = frame.exports
for var in vars:
var = self.Split(var)
for v in var:
if v == '*':
globals.update(global_exports)
globals.update(exports)
else:
if v in exports:
globals[v] = exports[v]
else:
globals[v] = global_exports[v]
except KeyError,x:
raise SCons.Errors.UserError("Import of non-existent variable '%s'"%x)
def SConscript(self, *ls, **kw):
if 'build_dir' in kw:
msg = """The build_dir keyword has been deprecated; use the variant_dir keyword instead."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuildDirWarning, msg)
def subst_element(x, subst=self.subst):
if SCons.Util.is_List(x):
x = list(map(subst, x))
else:
x = subst(x)
return x
ls = list(map(subst_element, ls))
subst_kw = {}
for key, val in kw.items():
if SCons.Util.is_String(val):
val = self.subst(val)
elif SCons.Util.is_List(val):
result = []
for v in val:
if SCons.Util.is_String(v):
v = self.subst(v)
result.append(v)
val = result
subst_kw[key] = val
files, exports = self._get_SConscript_filenames(ls, subst_kw)
subst_kw['exports'] = exports
return _SConscript(self.fs, *files, **subst_kw)
def SConscriptChdir(self, flag):
global sconscript_chdir
sconscript_chdir = flag
def SetOption(self, name, value):
name = self.subst(name)
SCons.Script.Main.SetOption(name, value)
#
#
#
SCons.Environment.Environment = SConsEnvironment
def Configure(*args, **kw):
if not SCons.Script.sconscript_reading:
raise SCons.Errors.UserError("Calling Configure from Builders is not supported.")
kw['_depth'] = 1
return SCons.SConf.SConf(*args, **kw)
# It's very important that the DefaultEnvironmentCall() class stay in this
# file, with the get_calling_namespaces() function, the compute_exports()
# function, the Frame class and the SConsEnvironment.Export() method.
# These things make up the calling stack leading up to the actual global
# Export() or SConscript() call that the user issued. We want to allow
# users to export local variables that they define, like so:
#
# def func():
# x = 1
# Export('x')
#
# To support this, the get_calling_namespaces() function assumes that
# the *first* stack frame that's not from this file is the local frame
# for the Export() or SConscript() call.
_DefaultEnvironmentProxy = None
def get_DefaultEnvironmentProxy():
global _DefaultEnvironmentProxy
if not _DefaultEnvironmentProxy:
default_env = SCons.Defaults.DefaultEnvironment()
_DefaultEnvironmentProxy = SCons.Environment.NoSubstitutionProxy(default_env)
return _DefaultEnvironmentProxy
class DefaultEnvironmentCall(object):
"""A class that implements "global function" calls of
Environment methods by fetching the specified method from the
DefaultEnvironment's class. Note that this uses an intermediate
proxy class instead of calling the DefaultEnvironment method
directly so that the proxy can override the subst() method and
thereby prevent expansion of construction variables (since from
the user's point of view this was called as a global function,
with no associated construction environment)."""
def __init__(self, method_name, subst=0):
self.method_name = method_name
if subst:
self.factory = SCons.Defaults.DefaultEnvironment
else:
self.factory = get_DefaultEnvironmentProxy
def __call__(self, *args, **kw):
env = self.factory()
method = getattr(env, self.method_name)
return method(*args, **kw)
def BuildDefaultGlobals():
"""
Create a dictionary containing all the default globals for
SConstruct and SConscript files.
"""
global GlobalDict
if GlobalDict is None:
GlobalDict = {}
import SCons.Script
d = SCons.Script.__dict__
def not_a_module(m, d=d, mtype=type(SCons.Script)):
return not isinstance(d[m], mtype)
for m in filter(not_a_module, dir(SCons.Script)):
GlobalDict[m] = d[m]
return GlobalDict.copy()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base constants and handlers."""
import base64
import Cookie
import datetime
import hmac
import json
import logging
import os
import sys
import time
import traceback
import urllib
import urlparse
import jinja2
import webapp2
from google.appengine.api import users
from core import counters
from core.domain import config_domain
from core.domain import config_services
from core.domain import obj_services
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import user_services
from core.platform import models
import feconf
import jinja_utils
import utils
current_user_services = models.Registry.import_current_user_services()
(user_models,) = models.Registry.import_models([models.NAMES.user])
ONE_DAY_AGO_IN_SECS = -24 * 60 * 60
DEFAULT_CSRF_SECRET = 'oppia csrf secret'
CSRF_SECRET = config_domain.ConfigProperty(
'oppia_csrf_secret', {'type': 'unicode'},
'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET)
SITE_NAME = config_domain.ConfigProperty(
'site_name', {'type': 'unicode'}, 'The site name', 'SITE_NAME')
BEFORE_END_HEAD_TAG_HOOK = config_domain.ConfigProperty(
'before_end_head_tag_hook', {
'type': 'unicode',
'ui_config': {
'rows': 7,
},
},
'Code to insert just before the closing </head> tag in all pages.', '')
BEFORE_END_BODY_TAG_HOOK = config_domain.ConfigProperty(
'before_end_body_tag_hook', {
'type': 'unicode',
'ui_config': {
'rows': 7,
},
},
'Code to insert just before the closing </body> tag in all pages.', '')
SIDEBAR_MENU_ADDITIONAL_LINKS = config_domain.ConfigProperty(
'sidebar_menu_additional_links', {
'type': 'list',
'items': {
'type': 'dict',
'properties': [{
'name': 'name',
'description': 'Text of the menu item',
'schema': {'type': 'unicode'},
}, {
'name': 'link',
'description': 'The link to open in a new tab',
'schema': {'type': 'unicode'},
}, {
'name': 'icon_filename',
'description': (
'Filename of the menu icon (in /images/sidebar)'),
'schema': {'type': 'unicode'},
}]
}
},
'Additional links to show in the sidebar menu.',
default_value=[{
'name': 'Blog',
'link': 'http://site/blog/url',
'icon_filename': 'comment.png',
}])
SITE_FEEDBACK_FORM_URL = config_domain.ConfigProperty(
'site_feedback_form_url', {'type': 'unicode'},
'Site feedback form URL (leave blank if there is no such form)', '')
SHARING_OPTIONS = config_domain.ConfigProperty(
'sharing_options', {
'type': 'dict',
'properties': [{
'name': 'gplus',
'schema': {
'type': 'bool',
}
}, {
'name': 'facebook',
'schema': {
'type': 'bool',
}
}, {
'name': 'twitter',
'schema': {
'type': 'bool',
}
}]
},
'Sharing options to display in the editor view',
default_value={
'gplus': False,
'facebook': False,
'twitter': False,
})
SOCIAL_MEDIA_BUTTONS = config_domain.ConfigProperty(
'social_media_buttons', {
'type': 'list',
'items': {
'type': 'dict',
'properties': [{
'name': 'link',
'description': 'The link to open in a new tab',
'schema': {'type': 'unicode'},
}, {
'name': 'icon_filename',
'description': (
'Filename of the social media icon (in /images/social)'),
'schema': {'type': 'unicode'},
}]
}
},
'Links and icon filenames for the social media buttons in the sidebar.',
[])
DISABLED_EXPLORATIONS = config_domain.ConfigProperty(
'disabled_explorations', {
'type': 'list',
'items': {
'type': 'unicode'
}
},
'IDs of explorations which should not be displayable in either the '
'learner or editor views',
[])
def require_user(handler):
"""Decorator that checks if a user is associated to the current session."""
def test_login(self, **kwargs):
"""Checks if the user for the current session is logged in."""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
return handler(self, **kwargs)
return test_login
def require_moderator(handler):
"""Decorator that checks if the current user is a moderator."""
def test_is_moderator(self, **kwargs):
"""Check that the user is a moderator."""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if not rights_manager.Actor(self.user_id).is_moderator():
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
return handler(self, **kwargs)
return test_is_moderator
def require_fully_signed_up(handler):
"""Decorator that checks if the user is logged in and has completed the
signup process. If any of these checks fail, an UnauthorizedUserException
is raised.
"""
def test_registered_as_editor(self, **kwargs):
"""Check that the user has registered as an editor."""
if (not self.user_id
or self.username in config_domain.BANNED_USERNAMES.value
or not user_services.has_fully_registered(self.user_id)):
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
return handler(self, **kwargs)
return test_registered_as_editor
def _clear_login_cookies(response_headers):
# AppEngine sets the ACSID cookie for http:// and the SACSID cookie
# for https:// . We just unset both below.
cookie = Cookie.SimpleCookie()
for cookie_name in ['ACSID', 'SACSID']:
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['expires'] = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS)
).strftime('%a, %d %b %Y %H:%M:%S GMT')
response_headers.add_header(*cookie.output().split(': ', 1))
class LogoutPage(webapp2.RequestHandler):
def get(self):
"""Logs the user out, and returns them to a specified page or the home
page.
"""
# The str conversion is needed, otherwise an InvalidResponseError
# asking for the 'Location' header value to be str instead of
# 'unicode' will result.
url_to_redirect_to = str(self.request.get('return_url') or '/')
_clear_login_cookies(self.response.headers)
if feconf.DEV_MODE:
self.redirect(users.create_logout_url(url_to_redirect_to))
else:
self.redirect(url_to_redirect_to)
class BaseHandler(webapp2.RequestHandler):
"""Base class for all Oppia handlers."""
# Whether to check POST and PUT payloads for CSRF tokens prior to
# processing them. Can be overridden by subclasses if this check is
# not necessary.
REQUIRE_PAYLOAD_CSRF_CHECK = True
# Specific page name to use as a key for generating CSRF tokens. This name
# must be overwritten by subclasses. This represents both the source
# page name and the destination page name.
# TODO(sll): A weakness of the current approach is that the source and
# destination page names have to be the same. Consider fixing this.
PAGE_NAME_FOR_CSRF = ''
# Whether to redirect requests corresponding to a logged-in user who has
# not completed signup in to the signup page. This ensures that logged-in
# users have agreed to the latest terms.
REDIRECT_UNFINISHED_SIGNUPS = True
@webapp2.cached_property
def jinja2_env(self):
return jinja_utils.get_jinja_env(feconf.FRONTEND_TEMPLATES_DIR)
def __init__(self, request, response): # pylint: disable=super-init-not-called
# Set self.request, self.response and self.app.
self.initialize(request, response)
self.start_time = datetime.datetime.utcnow()
# Initializes the return dict for the handlers.
self.values = {}
self.user = current_user_services.get_current_user()
self.user_id = current_user_services.get_user_id(
self.user) if self.user else None
self.username = None
self.has_seen_editor_tutorial = False
self.partially_logged_in = False
self.values['profile_picture_data_url'] = None
if self.user_id:
email = current_user_services.get_user_email(self.user)
user_settings = user_services.get_or_create_user(
self.user_id, email)
self.values['user_email'] = user_settings.email
if (self.REDIRECT_UNFINISHED_SIGNUPS and not
user_services.has_fully_registered(self.user_id)):
_clear_login_cookies(self.response.headers)
self.partially_logged_in = True
self.user_id = None
else:
self.username = user_settings.username
self.values['username'] = self.username
self.values['profile_picture_data_url'] = (
user_settings.profile_picture_data_url)
if user_settings.last_started_state_editor_tutorial:
self.has_seen_editor_tutorial = True
self.is_moderator = rights_manager.Actor(self.user_id).is_moderator()
self.is_admin = rights_manager.Actor(self.user_id).is_admin()
self.is_super_admin = (
current_user_services.is_current_user_super_admin())
self.values['is_moderator'] = self.is_moderator
self.values['is_admin'] = self.is_admin
self.values['is_super_admin'] = self.is_super_admin
if self.request.get('payload'):
self.payload = json.loads(self.request.get('payload'))
else:
self.payload = None
def unescape_state_name(self, escaped_state_name):
"""Unescape a state name that is encoded with encodeURIComponent."""
return urllib.unquote(escaped_state_name).decode('utf-8')
def dispatch(self):
"""Overrides dispatch method in webapp2 superclass."""
# If the request is to the old demo server, redirect it permanently to
# the new demo server.
if self.request.uri.startswith('https://oppiaserver.appspot.com'):
self.redirect('https://oppiatestserver.appspot.com', True)
return
# In DEV_MODE, clearing cookies does not log out the user, so we
# force-clear them by redirecting to the logout URL.
if feconf.DEV_MODE and self.partially_logged_in:
self.redirect(users.create_logout_url(self.request.uri))
return
if self.payload and self.REQUIRE_PAYLOAD_CSRF_CHECK:
try:
if not self.PAGE_NAME_FOR_CSRF:
raise Exception('No CSRF page name specified for this '
'handler.')
csrf_token = self.request.get('csrf_token')
if not csrf_token:
raise Exception(
'Missing CSRF token. Changes were not saved. '
'Please report this bug.')
is_csrf_token_valid = CsrfTokenManager.is_csrf_token_valid(
self.user_id, self.PAGE_NAME_FOR_CSRF, csrf_token)
if not is_csrf_token_valid:
raise self.UnauthorizedUserException(
'Your session has expired, and unfortunately your '
'changes cannot be saved. Please refresh the page.')
except Exception as e:
logging.error(
'%s: page name %s, payload %s',
e, self.PAGE_NAME_FOR_CSRF, self.payload)
return self.handle_exception(e, self.app.debug)
super(BaseHandler, self).dispatch()
def get(self, *args, **kwargs): # pylint: disable=unused-argument
"""Base method to handle GET requests."""
raise self.PageNotFoundException
def post(self, *args): # pylint: disable=unused-argument
"""Base method to handle POST requests."""
raise self.PageNotFoundException
def put(self, *args): # pylint: disable=unused-argument
"""Base method to handle PUT requests."""
raise self.PageNotFoundException
def delete(self, *args): # pylint: disable=unused-argument
"""Base method to handle DELETE requests."""
raise self.PageNotFoundException
def render_json(self, values):
self.response.content_type = 'application/javascript; charset=utf-8'
self.response.headers['Content-Disposition'] = (
'attachment; filename="oppia-attachment.txt"')
self.response.headers['Strict-Transport-Security'] = (
'max-age=31536000; includeSubDomains')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
json_output = json.dumps(values, cls=utils.JSONEncoderForHTML)
self.response.write('%s%s' % (feconf.XSSI_PREFIX, json_output))
# Calculate the processing time of this request.
duration = datetime.datetime.utcnow() - self.start_time
processing_time = duration.seconds + duration.microseconds / 1E6
counters.JSON_RESPONSE_TIME_SECS.inc(increment=processing_time)
counters.JSON_RESPONSE_COUNT.inc()
def render_template(
self, filename, values=None, iframe_restriction='DENY',
redirect_url_on_logout=None):
if values is None:
values = self.values
scheme, netloc, path, _, _ = urlparse.urlsplit(self.request.uri)
values.update({
'ALL_LANGUAGE_CODES': feconf.ALL_LANGUAGE_CODES,
'BEFORE_END_HEAD_TAG_HOOK': jinja2.utils.Markup(
BEFORE_END_HEAD_TAG_HOOK.value),
'BEFORE_END_BODY_TAG_HOOK': jinja2.utils.Markup(
BEFORE_END_BODY_TAG_HOOK.value),
'CAN_SEND_ANALYTICS_EVENTS': feconf.CAN_SEND_ANALYTICS_EVENTS,
'DEFAULT_LANGUAGE_CODE': feconf.ALL_LANGUAGE_CODES[0]['code'],
'DEV_MODE': feconf.DEV_MODE,
'DOMAIN_URL': '%s://%s' % (scheme, netloc),
'ACTIVITY_STATUS_PRIVATE': (
rights_manager.ACTIVITY_STATUS_PRIVATE),
'ACTIVITY_STATUS_PUBLIC': (
rights_manager.ACTIVITY_STATUS_PUBLIC),
'ACTIVITY_STATUS_PUBLICIZED': (
rights_manager.ACTIVITY_STATUS_PUBLICIZED),
'FULL_URL': '%s://%s/%s' % (scheme, netloc, path),
'INVALID_NAME_CHARS': feconf.INVALID_NAME_CHARS,
# TODO(sll): Consider including the obj_editor html directly as
# part of the base HTML template?
'OBJECT_EDITORS_JS': jinja2.utils.Markup(
obj_services.get_all_object_editor_js_templates()),
'RTE_COMPONENT_SPECS': (
rte_component_registry.Registry.get_all_specs()),
'SHOW_CUSTOM_PAGES': feconf.SHOW_CUSTOM_PAGES,
'SIDEBAR_MENU_ADDITIONAL_LINKS': (
SIDEBAR_MENU_ADDITIONAL_LINKS.value),
'SITE_FEEDBACK_FORM_URL': SITE_FEEDBACK_FORM_URL.value,
'SITE_NAME': SITE_NAME.value,
'SOCIAL_MEDIA_BUTTONS': SOCIAL_MEDIA_BUTTONS.value,
'SYSTEM_USERNAMES': feconf.SYSTEM_USERNAMES,
'user_is_logged_in': user_services.has_fully_registered(
self.user_id),
})
if 'meta_name' not in values:
values['meta_name'] = 'Personalized Online Learning from Oppia'
if 'meta_description' not in values:
values['meta_description'] = (
'Oppia is a free, open-source learning platform. Join the '
'community to create or try an exploration today!')
if redirect_url_on_logout is None:
redirect_url_on_logout = self.request.uri
if self.user_id:
values['logout_url'] = (
current_user_services.create_logout_url(
redirect_url_on_logout))
else:
values['login_url'] = (
current_user_services.create_login_url(self.request.uri))
# Create a new csrf token for inclusion in HTML responses. This assumes
# that tokens generated in one handler will be sent back to a handler
# with the same page name.
values['csrf_token'] = ''
if self.REQUIRE_PAYLOAD_CSRF_CHECK and self.PAGE_NAME_FOR_CSRF:
values['csrf_token'] = CsrfTokenManager.create_csrf_token(
self.user_id, self.PAGE_NAME_FOR_CSRF)
self.response.cache_control.no_cache = True
self.response.cache_control.must_revalidate = True
self.response.headers['Strict-Transport-Security'] = (
'max-age=31536000; includeSubDomains')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
if iframe_restriction is not None:
if iframe_restriction in ['SAMEORIGIN', 'DENY']:
self.response.headers['X-Frame-Options'] = iframe_restriction
else:
raise Exception(
'Invalid X-Frame-Options: %s' % iframe_restriction)
self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT'
self.response.pragma = 'no-cache'
self.response.write(self.jinja2_env.get_template(
filename).render(**values))
# Calculate the processing time of this request.
duration = datetime.datetime.utcnow() - self.start_time
processing_time = duration.seconds + duration.microseconds / 1E6
counters.HTML_RESPONSE_TIME_SECS.inc(increment=processing_time)
counters.HTML_RESPONSE_COUNT.inc()
def _render_exception(self, error_code, values):
assert error_code in [400, 401, 404, 500]
values['code'] = error_code
# This checks if the response should be JSON or HTML.
if self.payload is not None:
self.render_json(values)
else:
self.values.update(values)
self.render_template(
'error/error.html', iframe_restriction=None)
def handle_exception(self, exception, unused_debug_mode):
"""Overwrites the default exception handler."""
logging.info(''.join(traceback.format_exception(*sys.exc_info())))
logging.error('Exception raised: %s', exception)
if isinstance(exception, self.PageNotFoundException):
logging.error('Invalid URL requested: %s', self.request.uri)
self.error(404)
self._render_exception(404, {
'error': 'Could not find the page %s.' % self.request.uri})
return
if isinstance(exception, self.NotLoggedInException):
self.redirect(
current_user_services.create_login_url(self.request.uri))
return
if isinstance(exception, self.UnauthorizedUserException):
self.error(401)
self._render_exception(401, {'error': unicode(exception)})
return
if isinstance(exception, self.InvalidInputException):
self.error(400)
self._render_exception(400, {'error': unicode(exception)})
return
if isinstance(exception, self.InternalErrorException):
self.error(500)
self._render_exception(500, {'error': unicode(exception)})
return
self.error(500)
self._render_exception(500, {'error': unicode(exception)})
class UnauthorizedUserException(Exception):
"""Error class for unauthorized access."""
class NotLoggedInException(Exception):
"""Error class for users that are not logged in (error code 401)."""
class InvalidInputException(Exception):
"""Error class for invalid input on the user side (error code 400)."""
class PageNotFoundException(Exception):
"""Error class for a page not found error (error code 404)."""
class InternalErrorException(Exception):
"""Error class for an internal server side error (error code 500)."""
class Error404Handler(BaseHandler):
"""Handles 404 errors."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
class CsrfTokenManager(object):
"""Manages page/user tokens in memcache to protect against CSRF."""
# Max age of the token (48 hours).
_CSRF_TOKEN_AGE_SECS = 60 * 60 * 48
# Default user id for non-logged-in users.
_USER_ID_DEFAULT = 'non_logged_in_user'
@classmethod
def init_csrf_secret(cls):
"""Verify that non-default CSRF secret exists; creates one if not."""
# Any non-default value is fine.
if CSRF_SECRET.value and CSRF_SECRET.value != DEFAULT_CSRF_SECRET:
return
# Initialize to random value.
config_services.set_property(
feconf.SYSTEM_COMMITTER_ID, CSRF_SECRET.name,
base64.urlsafe_b64encode(os.urandom(20)))
@classmethod
def _create_token(cls, user_id, page_name, issued_on):
"""Creates a digest (string representation) of a token."""
cls.init_csrf_secret()
# The token has 4 parts: hash of the actor user id, hash of the page
# name, hash of the time issued and plain text of the time issued.
if user_id is None:
user_id = cls._USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digester = hmac.new(str(CSRF_SECRET.value))
digester.update(str(user_id))
digester.update(':')
digester.update(str(page_name))
digester.update(':')
digester.update(str(issued_on))
digest = digester.digest()
token = '%s/%s' % (issued_on, base64.urlsafe_b64encode(digest))
return token
@classmethod
def _get_current_time(cls):
return time.time()
@classmethod
def create_csrf_token(cls, user_id, page_name):
if not page_name:
raise Exception('Cannot create CSRF token if page name is empty.')
return cls._create_token(user_id, page_name, cls._get_current_time())
@classmethod
def is_csrf_token_valid(cls, user_id, page_name, token):
"""Validate a given CSRF token with the CSRF secret in memcache."""
try:
parts = token.split('/')
if len(parts) != 2:
return False
issued_on = long(parts[0])
age = cls._get_current_time() - issued_on
if age > cls._CSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(user_id, page_name, issued_on)
if authentic_token == token:
return True
return False
except Exception:
return False
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Netease Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for availability zones
"""
from oslo.config import cfg
from nova import availability_zones as az
from nova import context
from nova import db
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
CONF.import_opt('default_availability_zone',
'nova.availability_zones')
class AvailabilityZoneTestCases(test.TestCase):
"""Test case for aggregate based availability zone."""
def setUp(self):
super(AvailabilityZoneTestCases, self).setUp()
self.host = 'me'
self.availability_zone = 'nova-test'
self.default_az = CONF.default_availability_zone
self.default_in_az = CONF.internal_service_availability_zone
self.context = context.get_admin_context()
self.agg = self._create_az('az_agg', self.availability_zone)
def tearDown(self):
db.aggregate_delete(self.context, self.agg['id'])
super(AvailabilityZoneTestCases, self).tearDown()
def _create_az(self, agg_name, az_name):
agg_meta = {'name': agg_name}
agg = db.aggregate_create(self.context, agg_meta)
metadata = {'availability_zone': az_name}
db.aggregate_metadata_add(self.context, agg['id'], metadata)
return agg
def _update_az(self, aggregate, az_name):
metadata = {'availability_zone': az_name}
db.aggregate_update(self.context, aggregate['id'], metadata)
def _create_service_with_topic(self, topic, host, disabled=False):
values = {
'binary': 'bin',
'host': host,
'topic': topic,
'disabled': disabled,
}
return db.service_create(self.context, values)
def _destroy_service(self, service):
return db.service_destroy(self.context, service['id'])
def _add_to_aggregate(self, service, aggregate):
return db.aggregate_host_add(self.context,
aggregate['id'], service['host'])
def _delete_from_aggregate(self, service, aggregate):
return db.aggregate_host_delete(self.context,
aggregate['id'], service['host'])
def test_set_availability_zone_compute_service(self):
"""Test for compute service get right availability zone."""
service = self._create_service_with_topic('compute', self.host)
services = db.service_get_all(self.context)
# The service is not add into aggregate, so confirm it is default
# availability zone.
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEquals(new_service['availability_zone'],
self.default_az)
# The service is added into aggregate, confirm return the aggregate
# availability zone.
self._add_to_aggregate(service, self.agg)
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEquals(new_service['availability_zone'],
self.availability_zone)
self._destroy_service(service)
def test_set_availability_zone_not_compute_service(self):
"""Test not compute service get right availability zone."""
service = self._create_service_with_topic('network', self.host)
services = db.service_get_all(self.context)
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEquals(new_service['availability_zone'],
self.default_in_az)
self._destroy_service(service)
def test_get_host_availability_zone(self):
"""Test get right availability zone by given host."""
self.assertEquals(self.default_az,
az.get_host_availability_zone(self.context, self.host))
service = self._create_service_with_topic('compute', self.host)
self._add_to_aggregate(service, self.agg)
self.assertEquals(self.availability_zone,
az.get_host_availability_zone(self.context, self.host))
def test_update_host_availability_zone(self):
"""Test availability zone could be update by given host."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
self.assertEquals(az_name,
az.get_host_availability_zone(self.context, self.host))
# Update AZ
new_az_name = 'az2'
self._update_az(agg_az1, new_az_name)
self.assertEquals(new_az_name,
az.get_host_availability_zone(self.context, self.host))
def test_delete_host_availability_zone(self):
"""Test availability zone could be deleted successfully."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
self.assertEquals(az_name,
az.get_host_availability_zone(self.context, self.host))
# Delete the AZ via deleting the aggregate
self._delete_from_aggregate(service, agg_az1)
self.assertEquals(self.default_az,
az.get_host_availability_zone(self.context, self.host))
def test_get_availability_zones(self):
"""Test get_availability_zones."""
# get_availability_zones returns two lists, zones with at least one
# enabled services, and zones with no enabled services.
# Use the following test data:
#
# zone host enabled
# nova-test host1 Yes
# nova-test host2 No
# nova-test2 host3 Yes
# nova-test3 host4 No
# <default> host5 No
agg2 = self._create_az('agg-az2', 'nova-test2')
agg3 = self._create_az('agg-az3', 'nova-test3')
service1 = self._create_service_with_topic('compute', 'host1',
disabled=False)
service2 = self._create_service_with_topic('compute', 'host2',
disabled=True)
service3 = self._create_service_with_topic('compute', 'host3',
disabled=False)
service4 = self._create_service_with_topic('compute', 'host4',
disabled=True)
self._create_service_with_topic('compute', 'host5',
disabled=True)
self._add_to_aggregate(service1, self.agg)
self._add_to_aggregate(service2, self.agg)
self._add_to_aggregate(service3, agg2)
self._add_to_aggregate(service4, agg3)
zones, not_zones = az.get_availability_zones(self.context)
self.assertEquals(zones, ['nova-test', 'nova-test2'])
self.assertEquals(not_zones, ['nova-test3', 'nova'])
def test_get_instance_availability_zone_default_value(self):
"""Test get right availability zone by given an instance."""
fake_inst_id = 162
fake_inst = fakes.stub_instance(fake_inst_id, host=self.host)
self.assertEqual(self.default_az,
az.get_instance_availability_zone(self.context, fake_inst))
def test_get_instance_availability_zone_from_aggregate(self):
"""Test get availability zone from aggregate by given an instance."""
host = 'host170'
service = self._create_service_with_topic('compute', host)
self._add_to_aggregate(service, self.agg)
fake_inst_id = 174
fake_inst = fakes.stub_instance(fake_inst_id, host=host)
self.assertEqual(self.availability_zone,
az.get_instance_availability_zone(self.context, fake_inst))
|
|
__author__ = 'Kris & Christian'
from django.utils import unittest
from django.test.client import Client
from django.test.client import RequestFactory
from django.test import TestCase
from app.models import *
from app.views import *
from django.contrib.auth.models import User
import json
#class MyFuncTestCase(unittest.TestCase):
# def testBasic(self):
# a = ['larry', 'curly', 'moe']
# self.assertEqual(my_func(a, 0), 'larry')
# self.assertEqual(my_func(a, 1), 'curly')
class GetTests(unittest.TestCase):
c = Client()
# def testLoginOK(self):
# response = self.c.get('/mynode/')
# self.assertEqual(response.status_code, 200)
def testRegistrationOK(self):
response = self.c.get('/mynode/register/')
self.assertEqual(response.status_code, 200)
def testAuthOK(self):
self.username = 'test'
self.email = '[email protected]'
self.password = 'test'
user = User.objects.create_user(self.username, self.email, self.password)
Users.objects.create(user = user)
login = self.c.login(username=self.username, password=self.password)
self.assertEqual(login, True)
response = self.c.get('/mynode/profile/')
self.assertEqual(response.status_code, 200)
response = self.c.get('/mynode/stream/')
self.assertEqual(response.status_code, 200)
response = self.c.get('/mynode/friends/')
self.assertEqual(response.status_code, 200)
#Looks like we had a few of the same tests Christian
class testRunner(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(
username="admin",
email="[email protected]",
password="password",
first_name="Admin",
last_name="Person")
self.app_user = Users.objects.create(user=self.user, git_url="topched")
self.post = Post.objects.create(author=self.user, content="My first post")
self.post_id = self.post.id
def test_github_feed(self):
url = "http://api.github.com/users/topched"
resp = self.client.get(url)
print resp
def test_match_friends_from_list(self):
tempUser1 = User.objects.create_user(
username="testPerson1",
email="[email protected]",
password="password",
first_name="Test",
last_name="Person")
temp_appUser1 = Users.objects.create(user=tempUser1, git_url="test.git")
tempUser2 = User.objects.create_user(
username="testPerson2",
email="[email protected]",
password="password",
first_name="Test",
last_name="Person")
temp_appUser2 = Users.objects.create(user=tempUser2, git_url="test.git")
tempUser3 = User.objects.create_user(
username="testPerson3",
email="[email protected]",
password="password",
first_name="Test",
last_name="Person")
temp_appUser3 = Users.objects.create(user=tempUser3, git_url="test.git")
tempUser4 = User.objects.create_user(
username="testPerson4",
email="[email protected]",
password="password",
first_name="Test",
last_name="Person")
temp_appUser4 = Users.objects.create(user=tempUser4, git_url="test.git")
url = '/mynode/friends/friend/create/'
#admin sends friend request to tempuser1
self.client.login(username='admin', password='password')
self.client.post(url,{'receiver_display_name': tempUser1.username})
self.assertEquals(len(Friend.objects.all()), 1)
#tempuser1 accepts the friend request
self.client.login(username=tempUser1.username, password='password')
url1 = '/mynode/friends/' + str(self.user.id) + '/confirm/'
self.client.post(url1)
self.assertEquals(len(Friend.objects.all()), 2)
#admin sends friend request to tempuser2
self.client.login(username='admin', password='password')
self.client.post(url, {'receiver_display_name': tempUser2.username})
self.assertEquals(len(Friend.objects.all()), 3)
#tempuser2 accepts friend request
self.client.login(username=tempUser2.username, password='password')
url2 = '/mynode/friends/' + str(self.user.id) + '/confirm/'
self.client.post(url2)
self.assertEquals(len(Friend.objects.all()), 4)
#Are they friends using the api
# tmpUrl = "/service/friends" + self.app_user.uuid + "/" + temp_appUser1.uuid
# resp = self.client.get(tmpUrl)
# print resp.content
#json request to send
send_json = {}
send_json['query'] = "friends"
send_json['author'] = self.app_user.uuid
send_json['authors'] = [temp_appUser1.uuid, temp_appUser2.uuid, temp_appUser3.uuid, temp_appUser4.uuid]
#send the post
url3 = "/service/friends/" + self.app_user.uuid
resp = self.client.post(url3, data=json.dumps(send_json), content_type="application/json")
expectedJson = {}
expectedJson['query'] = 'friends'
expectedJson['author'] = self.app_user.uuid
#TODO return proper values in api
expectedJson['friends'] = [temp_appUser1.uuid, temp_appUser2.uuid]
#compare response to expected
return_vals = json.loads(resp.content)
self.assertEquals(expectedJson, return_vals)
#send a request with no results
send_json = {}
send_json['query'] = 'friends'
send_json['author'] = temp_appUser4.uuid
send_json['authors'] = [self.app_user.uuid, temp_appUser1.uuid, temp_appUser2.uuid, temp_appUser3.uuid]
url4 = "/service/friends/" + temp_appUser4.uuid
resp = self.client.post(url4, json.dumps(send_json), content_type="application/json")
expectedJson = {}
expectedJson['query'] = 'friends'
expectedJson['author'] = temp_appUser4.uuid
expectedJson['friends'] = []
return_vals = json.loads(resp.content)
self.assertEquals(expectedJson, return_vals)
def test_create_and_confirm_friendship(self):
tempUser = User.objects.create_user(
username="testPerson",
email="[email protected]",
password="password",
first_name="Test",
last_name="Person")
temp_appUser = Users.objects.create(user=tempUser, git_url="test.git")
friends = Friend.objects.all()
start = len(friends)
url = '/mynode/friends/friend/create/'
self.client.login(username='admin', password='password')
#print tempUser.username
self.client.post(
url,
{'receiver_display_name': tempUser.username})
friends = Friend.objects.all()
end = len(friends)
#follow created
self.assertEqual(start, end-1)
#Admin sends friend request
self.assertEqual(friends[0].requester,self.user)
#TempUser receives request
self.assertEqual(friends[0].receiver,tempUser)
#friendship should not be accepted yet
self.assertEqual(friends[0].accepted,0)
url1 = '/mynode/friends/' + str(self.user.id) + '/confirm/'
#login and accept friend request
self.client.login(username='testPerson', password='password')
resp2 = self.client.post(url1)
friends = Friend.objects.all()
self.assertEquals(len(friends), 2)
self.assertEquals(friends[0].accepted, 1)
self.assertEquals(friends[1].accepted, 1)
def test_create_user(self):
resp = self.client.get('/mynode/register', follow=True)
self.assertEqual(resp.status_code, 200)
tmp = User.objects.all()
start = len(tmp)
resp = self.client.post(
'/mynode/register/',
{'username':'someUser',
'email':'[email protected]',
'pwd':'password',
'surname':'some',
'lastname':'user',
'git':'fake.git'})
tmp = User.objects.all()
end = len(tmp)
#Created exactly one new user
self.assertEqual(end, start+1)
#Check to make sure the user was created correctly
self.assertEqual(tmp[end-1].username,'someUser')
def test_create_post(self):
posts = Post.objects.all()
self.client.login(username='admin',password='password')
resp = self.client.post(
'/mynode/stream/post/create/',
{'content': 'My second post', 'title': 'My test title', 'content-type': 1, 'visibility':1}
)
tmp = Post.objects.all()
#Exactly 2 posts after creating a new one
self.assertEqual(len(tmp),2)
#Make sure the post contains the correct info
self.assertEqual(tmp[1].content, "My second post")
def test_delete_post(self):
PostToDelete = Post.objects.create(author=self.user,content="A post to delete")
self.client.login(username='admin',password='password')
posts = Post.objects.all()
start = len(posts)
url = '/mynode/stream/post/' + str(PostToDelete.id) + '/delete/'
resp = self.client.post(url)
tmp = Post.objects.all()
#Make sure exactly one post was deleted
self.assertEqual(len(tmp), start-1)
def test_create_comment(self):
self.client.login(username='admin',password='password')
tmp = Comment.objects.all()
start = len(tmp)
url = '/mynode/stream/post/' + str(self.post_id) + '/comments/'
resp = self.client.post(
url,
{'parent_post':self.post_id, 'author':self.user, 'content':'My first comment'})
tmp = Comment.objects.all()
end = len(tmp)
#Make sure exactly one comment was added
self.assertEqual(start+1,end)
val = tmp[end-1].content
#Make sure the comment contains the correct info
self.assertEqual(val, 'My first comment')
def test_get_stream(self):
#No logged in user - should redirect to login
resp = self.client.get('/mynode/stream/', follow=True)
self.assertRedirects(resp, '/mynode/login/')
#Logged user should return the stream page
self.client.login(username='admin', password='password')
resp = self.client.get('/mynode/stream/')
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'stream_page.html')
def test_get_profile(self):
#No logged in user - should redirect
resp = self.client.get('/mynode/profile/', follow=True)
self.assertRedirects(resp, '/mynode/login/')
#Logged in user should return profile page
self.client.login(username='admin', password='password')
resp = self.client.get('/mynode/profile/')
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'profile_page.html')
def test_get_friends(self):
#No logged in user - should redirect
resp = self.client.get('/mynode/friends/', follow=True)
self.assertRedirects(resp, '/mynode/login/')
#Logged in user should return friends page
self.client.login(username='admin', password='password')
resp = self.client.get('/mynode/friends/')
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'friend_page.html')
def test_get_admin(self):
#Logged in user
self.client.login(username='admin', password='password')
resp = self.client.get('/admin/')
self.assertEqual(resp.status_code, 200)
# def test_post_to_api(self):
# #Logged in user
# tempUser1 = User.objects.create_user(
# username="testPerson1",
# email="[email protected]",
# password="password",
# first_name="Test",
# last_name="Person")
# tempUser1.save()
# self.client.login(username='testPerson1', password='password')
# post = Post.objects.create(id=100,author=tempUser1)
# post.save()
# resp = self.client.put('/service/posts/' + str(post.id),
# json.dumps({'content': '1', 'title': 'My test title', 'content-type': 1, 'visibility':1}))
# print resp
# self.assertEqual(Post.objects.get(id=post.id).content, '1')
#not sure why this test doesnt work
# def test_root_redirect(self):
#
# #General root redirection
# resp = self.client.get('/')
# self.assertRedirects(resp, '/mynode/login/')
|
|
#!/usr/bin/env python
import sys
import os
import pmagpy.pmag as pmag
def main():
"""
NAME
make_magic_plots.py
DESCRIPTION
inspects magic directory for available plots.
SYNTAX
make_magic_plots.py [command line options]
INPUT
magic files
OPTIONS
-h prints help message and quits
-f FILE specifies input file name
-fmt [png,eps,svg,jpg,pdf] specify format, default is png
"""
dirlist = ['./']
dir_path = os.getcwd()
names = os.listdir(dir_path)
for n in names:
if 'Location' in n:
dirlist.append(n)
if '-fmt' in sys.argv:
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind+1]
else:
fmt = 'png'
if '-f' in sys.argv:
ind = sys.argv.index("-f")
filelist = [sys.argv[ind+1]]
else:
filelist = os.listdir(dir_path)
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
for loc in dirlist:
print('working on: ', loc)
os.chdir(loc) # change working directories to each location
crd = 's'
if 'er_samples.txt' in filelist: # find coordinate systems
samps, file_type = pmag.magic_read(
'er_samples.txt') # read in data
# get all none blank sample orientations
Srecs = pmag.get_dictitem(samps, 'sample_azimuth', '', 'F')
if len(Srecs) > 0:
crd = 'g'
if 'magic_measurements.txt' in filelist: # start with measurement data
print('working on measurements data')
data, file_type = pmag.magic_read(
'magic_measurements.txt') # read in data
if loc == './':
# get all the blank location names from data file
data = pmag.get_dictitem(data, 'er_location_name', '', 'T')
# looking for zeq_magic possibilities
# get all none blank method codes
AFZrecs = pmag.get_dictitem(
data, 'magic_method_codes', 'LT-AF-Z', 'has')
# get all none blank method codes
TZrecs = pmag.get_dictitem(
data, 'magic_method_codes', 'LT-T-Z', 'has')
# get all none blank method codes
MZrecs = pmag.get_dictitem(
data, 'magic_method_codes', 'LT-M-Z', 'has')
# get all dec measurements
Drecs = pmag.get_dictitem(data, 'measurement_dec', '', 'F')
# get all dec measurements
Irecs = pmag.get_dictitem(data, 'measurement_inc', '', 'F')
Mkeys = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
for key in Mkeys:
Mrecs = pmag.get_dictitem(
data, key, '', 'F') # get intensity data
if len(Mrecs) > 0:
break
# potential for stepwise demag curves
if len(AFZrecs) > 0 or len(TZrecs) > 0 or len(MZrecs) > 0 and len(Drecs) > 0 and len(Irecs) > 0 and len(Mrecs) > 0:
print('zeq_magic.py -fsp pmag_specimens.txt -sav -fmt ' +
fmt+' -crd '+crd)
os.system('zeq_magic.py -sav -fmt '+fmt+' -crd '+crd)
# looking for thellier_magic possibilities
if len(pmag.get_dictitem(data, 'magic_method_codes', 'LP-PI-TRM', 'has')) > 0:
print('thellier_magic.py -fsp pmag_specimens.txt -sav -fmt '+fmt)
os.system('thellier_magic.py -sav -fmt '+fmt)
# looking for hysteresis possibilities
if len(pmag.get_dictitem(data, 'magic_method_codes', 'LP-HYS', 'has')) > 0: # find hyst experiments
print('quick_hyst.py -sav -fmt '+fmt)
os.system('quick_hyst.py -sav -fmt '+fmt)
if 'pmag_results.txt' in filelist: # start with measurement data
data, file_type = pmag.magic_read(
'pmag_results.txt') # read in data
print('number of datapoints: ', len(data))
if loc == './':
# get all the concatenated location names from data file
data = pmag.get_dictitem(data, 'er_location_names', ':', 'has')
print('number of datapoints: ', len(data), loc)
print('working on pmag_results directions')
SiteDIs = pmag.get_dictitem(
data, 'average_dec', "", 'F') # find decs
print('number of directions: ', len(SiteDIs))
SiteDIs = pmag.get_dictitem(
SiteDIs, 'average_inc', "", 'F') # find decs and incs
print('number of directions: ', len(SiteDIs))
# only individual results - not poles
SiteDIs = pmag.get_dictitem(SiteDIs, 'data_type', 'i', 'has')
print('number of directions: ', len(SiteDIs))
# tilt corrected coordinates
SiteDIs_t = pmag.get_dictitem(
SiteDIs, 'tilt_correction', '100', 'T')
print('number of directions: ', len(SiteDIs))
if len(SiteDIs_t) > 0:
print('eqarea_magic.py -sav -crd t -fmt '+fmt)
os.system('eqarea_magic.py -sav -crd t -fmt '+fmt)
elif len(SiteDIs) > 0 and 'tilt_correction' not in SiteDIs[0].keys():
print('eqarea_magic.py -sav -fmt '+fmt)
os.system('eqarea_magic.py -sav -fmt '+fmt)
else:
SiteDIs_g = pmag.get_dictitem(
SiteDIs, 'tilt_correction', '0', 'T') # geographic coordinates
if len(SiteDIs_g) > 0:
print('eqarea_magic.py -sav -crd g -fmt '+fmt)
os.system('eqarea_magic.py -sav -crd g -fmt '+fmt)
else:
SiteDIs_s = pmag.get_dictitem(
SiteDIs, 'tilt_correction', '-1', 'T') # sample coordinates
if len(SiteDIs_s) > 0:
print('eqarea_magic.py -sav -crd s -fmt '+fmt)
os.system('eqarea_magic.py -sav -crd s -fmt '+fmt)
else:
SiteDIs_x = pmag.get_dictitem(
SiteDIs, 'tilt_correction', '', 'T') # no coordinates
if len(SiteDIs_x) > 0:
print('eqarea_magic.py -sav -fmt '+fmt)
os.system('eqarea_magic.py -sav -fmt '+fmt)
print('working on pmag_results VGP map')
VGPs = pmag.get_dictitem(
SiteDIs, 'vgp_lat', "", 'F') # are there any VGPs?
if len(VGPs) > 0: # YES!
os.system(
'vgpmap_magic.py -prj moll -res c -sym ro 5 -sav -fmt png')
print('working on pmag_results intensities')
os.system(
'magic_select.py -f pmag_results.txt -key data_type i T -F tmp.txt')
os.system(
'magic_select.py -f tmp.txt -key average_int 0. has -F tmp1.txt')
os.system(
"grab_magic_key.py -f tmp1.txt -key average_int | awk '{print $1*1e6}' >tmp2.txt")
data, file_type = pmag.magic_read('tmp1.txt') # read in data
locations = pmag.get_dictkey(data, 'er_location_names', "")
histfile = 'LO:_'+locations[0]+'_intensities_histogram:_.'+fmt
os.system(
"histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f tmp2.txt -F " + histfile)
print(
"histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f tmp2.txt -F " + histfile)
os.system('rm tmp*.txt')
if 'rmag_hysteresis.txt' in filelist: # start with measurement data
print('working on rmag_hysteresis')
data, file_type = pmag.magic_read(
'rmag_hysteresis.txt') # read in data
if loc == './':
# get all the blank location names from data file
data = pmag.get_dictitem(data, 'er_location_name', '', 'T')
hdata = pmag.get_dictitem(data, 'hysteresis_bcr', '', 'F')
hdata = pmag.get_dictitem(hdata, 'hysteresis_mr_moment', '', 'F')
hdata = pmag.get_dictitem(hdata, 'hysteresis_ms_moment', '', 'F')
# there are data for a dayplot
hdata = pmag.get_dictitem(hdata, 'hysteresis_bc', '', 'F')
if len(hdata) > 0:
print('dayplot_magic.py -sav -fmt '+fmt)
os.system('dayplot_magic.py -sav -fmt '+fmt)
# if 'er_sites.txt' in filelist: # start with measurement data
# print 'working on er_sites'
#os.system('basemap_magic.py -sav -fmt '+fmt)
if 'rmag_anisotropy.txt' in filelist: # do anisotropy plots if possible
print('working on rmag_anisotropy')
data, file_type = pmag.magic_read(
'rmag_anisotropy.txt') # read in data
if loc == './':
# get all the blank location names from data file
data = pmag.get_dictitem(data, 'er_location_name', '', 'T')
# get specimen coordinates
sdata = pmag.get_dictitem(
data, 'anisotropy_tilt_correction', '-1', 'T')
# get specimen coordinates
gdata = pmag.get_dictitem(
data, 'anisotropy_tilt_correction', '0', 'T')
# get specimen coordinates
tdata = pmag.get_dictitem(
data, 'anisotropy_tilt_correction', '100', 'T')
if len(sdata) > 3:
print('aniso_magic.py -x -B -crd s -sav -fmt '+fmt)
os.system('aniso_magic.py -x -B -crd s -sav -fmt '+fmt)
if len(gdata) > 3:
os.system('aniso_magic.py -x -B -crd g -sav -fmt '+fmt)
if len(tdata) > 3:
os.system('aniso_magic.py -x -B -crd t -sav -fmt '+fmt)
if loc != './':
os.chdir('..') # change working directories to each location
if __name__ == "__main__":
main()
|
|
"""
This module tests the TssList class
"""
import unittest
from datetime import date, datetime
import json
import numpy as np
from thymus.timeseries import Timeseries
from thymus.tsslist import TssList
class TestTssList(unittest.TestCase):
""" This class tests the class TssList. """
def setUp(self):
# three timeseries
self.ts = Timeseries()
self.ts.key = "Test Key"
self.ts.columns = ["F1"]
start_date = datetime(2015, 12, 31).toordinal()
self.ts.dseries = start_date + np.arange(10)
self.ts.tseries = np.arange(10)
self.ts.make_arrays()
# longer timeseries
self.ts_long = Timeseries()
self.ts.columns = ["F1"]
start_date = datetime(2015, 12, 31).toordinal()
self.ts_long.dseries = start_date + np.arange(20)
self.ts_long.tseries = np.arange(20)
self.ts_long.make_arrays()
# shorter timeseries with no columns
self.ts_short = Timeseries()
start_date = datetime(2015, 12, 31).toordinal()
self.ts_short.dseries = start_date + np.arange(5)
self.ts_short.tseries = np.arange(5)
self.ts_short.make_arrays()
self.tss = TssList([self.ts, self.ts_long, self.ts_short])
def test_class_init_(self):
"""Test class initialization."""
self.assertEqual(len(self.tss), 3)
tss = TssList()
self.assertEqual(len(tss), 0)
tss.append(Timeseries())
tss.append(Timeseries())
tss.append(Timeseries())
self.assertEqual(len(tss), 3)
# initialize with something other than a list
# could expand this to verifying the contents of the list
self.assertRaises(ValueError, TssList, 3)
# from tuple
tss = TssList((Timeseries(), Timeseries(), Timeseries()))
def test_tsslist_min_date(self):
"""Tests min date """
self.assertEqual(self.tss.min_date(), self.ts.start_date("datetime"))
tmp_ts0 = Timeseries()
tmp_ts0.dseries = datetime(2014, 12, 31).toordinal() + np.arange(10)
tmp_ts0.tseries = np.arange(10)
tmp_ts0.make_arrays()
self.tss.append(tmp_ts0)
self.assertEqual(self.tss.min_date(), date(2014, 12, 31))
tss = TssList()
self.assertIsNone(tss.min_date())
def test_tsslist_max_date(self):
"""Tests max date """
self.assertEqual(self.tss.max_date(), date(2016, 1, 19))
tmp_ts0 = Timeseries()
tmp_ts0.dseries = datetime(2018, 12, 31).toordinal() - np.arange(10)
tmp_ts0.tseries = np.arange(10)
tmp_ts0.make_arrays()
self.tss.append(tmp_ts0)
self.assertEqual(self.tss.max_date(), date(2018, 12, 31))
tss = TssList()
self.assertIsNone(tss.max_date())
def test_tsslist_combine(self):
"""
A batch of tests combining columns to one timeseries.
Tests check to see whether the parameters are passed down properly to
each timeseries.
"""
# combine(self, discard=True, pad=None)
ts_new = self.tss.combine(discard=True, pad=None)
# shape corresponds to the shortest length
self.assertEqual(
ts_new.tseries.shape[0], self.ts_short.tseries.shape[0]
)
self.assertEqual(ts_new.tseries.shape[1], 3)
# combine(self, discard=False, pad=0)
ts_new = self.tss.combine(discard=False, pad=0)
# shape corresponds to the longest length
self.assertEqual(
ts_new.tseries.shape[0], self.ts_long.tseries.shape[0]
)
self.assertEqual(ts_new.tseries.shape[1], 3)
# test instance of single timeseries in list, should return a clone
tsslist = TssList([self.ts])
ts_new = tsslist.combine()
self.assertNotEqual(ts_new, self.ts)
self.assertListEqual(ts_new.tseries.tolist(), self.ts.tseries.tolist())
self.assertListEqual(ts_new.dseries.tolist(), self.ts.dseries.tolist())
def test_tsslist_get_values(self):
""" Tests the ability to locate the correct row of data. """
date1 = datetime(2016, 1, 4) # existing date within date series
date2 = datetime(2016, 1, 16) # date falling on a weekend
# get data from existing date
self.assertTupleEqual(self.tss.get_values(date=date1), (4.0, 4.0, 4.0))
# attempt to get data from date not present, with notify
self.assertRaises(ValueError, self.tss.get_values, date2, notify=True)
# attempt to get data from date not present, no notify
self.assertTupleEqual(
self.tss.get_values(date=date2), (None, 16.0, None)
)
def test_clone(self):
"""Verifies that a copy is made."""
tss = self.tss.clone()
# is it a separate object
for i, ts_new in enumerate(tss):
ts_orig = self.tss[i]
self.assertIsInstance(ts_orig, Timeseries)
self.assertNotEqual(ts_new, ts_orig)
# do the characteristics match up?
self.assertEqual(len(tss), 3)
ts_orig = self.tss[0]
ts_copy = tss[0]
self.assertEqual(ts_copy.key, ts_orig.key)
self.assertEqual(ts_copy.frequency, ts_orig.frequency)
self.assertTrue(np.array_equal(ts_copy.tseries, ts_orig.tseries))
self.assertTrue(np.array_equal(ts_copy.dseries, ts_orig.dseries))
self.assertListEqual(ts_copy.columns, ts_orig.columns)
self.assertEqual(ts_copy.end_of_period, ts_orig.end_of_period)
ts_orig = self.tss[1]
ts_copy = tss[1]
self.assertEqual(ts_copy.key, ts_orig.key)
self.assertEqual(ts_copy.frequency, ts_orig.frequency)
self.assertTrue(np.array_equal(ts_copy.tseries, ts_orig.tseries))
self.assertTrue(np.array_equal(ts_copy.dseries, ts_orig.dseries))
self.assertEqual(ts_copy.columns, ts_orig.columns)
self.assertEqual(ts_copy.end_of_period, ts_orig.end_of_period)
ts_orig = self.tss[2]
ts_copy = tss[2]
self.assertEqual(ts_copy.key, ts_orig.key)
self.assertEqual(ts_copy.frequency, ts_orig.frequency)
self.assertTrue(np.array_equal(ts_copy.tseries, ts_orig.tseries))
self.assertTrue(np.array_equal(ts_copy.dseries, ts_orig.dseries))
self.assertEqual(ts_copy.columns, ts_orig.columns)
self.assertEqual(ts_copy.end_of_period, ts_orig.end_of_period)
def test_as_dict(self):
"Can it return a dict from the list?"
self.assertTrue(ValueError, self.tss.as_dict)
test_dict = {}
for i in range(len(self.tss)):
ts = self.tss[i]
ts.key = "key_%i" % (i)
test_dict[ts.key] = ts
self.assertDictEqual(self.tss.as_dict(), test_dict)
tss = TssList()
# no key
ts = Timeseries()
ts.tseries = np.arange(5)
ts.dseries = [date.today().toordinal() + i for i in range(5)]
ts.make_arrays()
tss.append(ts)
self.assertRaises(ValueError, tss.as_dict)
def test_to_json(self):
"""
This function tests sending a TssList to a json format.
Using a cheap assumption that since it is simply a list, that as long
as the timeseries are converted, the list is what is needed to check.
More needs to be checked.
"""
json_str = self.tss.to_json()
self.assertIsInstance(json.loads(json_str), list)
def test_from_json(self):
"""
This function tests building back a tsslist from json fmt string.
This relies heavily on the test for Timeseries.from_json.
"""
json_str = self.tss.to_json()
tsslist = TssList()
tsslist.from_json(json_str)
self.assertEqual(len(tsslist), 3)
# did it come back in the right order?
self.assertTupleEqual(tsslist[0].shape(), self.ts.shape())
self.assertTupleEqual(tsslist[1].shape(), self.ts_long.shape())
self.assertTupleEqual(tsslist[2].shape(), self.ts_short.shape())
def test_tsslist_do_func(self):
"""Placeholder for future function."""
pass
if __name__ == "__main__":
unittest.main()
|
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the album art fetchers."""
import os
import shutil
import responses
import _common
from _common import unittest
from beetsplug import fetchart
from beets.autotag import AlbumInfo, AlbumMatch
from beets import library
from beets import importer
from beets import config
class FetchImageTest(_common.TestCase):
@responses.activate
def run(self, *args, **kwargs):
super(FetchImageTest, self).run(*args, **kwargs)
def mock_response(self, content_type):
responses.add(responses.GET, 'http://example.com', content_type=content_type)
def test_invalid_type_returns_none(self):
self.mock_response('image/watercolour')
artpath = fetchart._fetch_image('http://example.com')
self.assertEqual(artpath, None)
def test_jpeg_type_returns_path(self):
self.mock_response('image/jpeg')
artpath = fetchart._fetch_image('http://example.com')
self.assertNotEqual(artpath, None)
class FSArtTest(_common.TestCase):
def setUp(self):
super(FSArtTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = fetchart.art_in_path(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
_common.touch(os.path.join(self.dpath, 'art.jpg'))
fn = fetchart.art_in_path(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'art.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, 'a.txt'))
fn = fetchart.art_in_path(self.dpath, ('art',), False)
self.assertEqual(fn, None)
def test_cautious_skips_fallback(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = fetchart.art_in_path(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_empty_dir(self):
fn = fetchart.art_in_path(self.dpath, ('art',), True)
self.assertEqual(fn, None)
class CombinedTest(_common.TestCase):
ASIN = 'xxxx'
MBID = 'releaseid'
AMAZON_URL = 'http://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg'.format(ASIN)
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
CAA_URL = 'http://coverartarchive.org/release/{0}/front-500.jpg'.format(MBID)
def setUp(self):
super(CombinedTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
# Set up configuration.
fetchart.FetchArtPlugin()
@responses.activate
def run(self, *args, **kwargs):
super(CombinedTest, self).run(*args, **kwargs)
def mock_response(self, url, content_type='image/jpeg'):
responses.add(responses.GET, url, content_type=content_type)
def test_main_interface_returns_amazon_art(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = fetchart.art_for_album(album, None)
self.assertNotEqual(artpath, None)
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
artpath = fetchart.art_for_album(album, None)
self.assertEqual(artpath, None)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = fetchart.art_for_album(album, [self.dpath])
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
def test_main_interface_falls_back_to_amazon(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = fetchart.art_for_album(album, [self.dpath])
self.assertNotEqual(artpath, None)
self.assertFalse(artpath.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
fetchart.art_for_album(album, [self.dpath])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL)
def test_main_interface_falls_back_to_aao(self):
self.mock_response(self.AMAZON_URL, content_type='text/html')
album = _common.Bag(asin=self.ASIN)
fetchart.art_for_album(album, [self.dpath])
self.assertEqual(responses.calls[-1].request.url, self.AAO_URL)
def test_main_interface_uses_caa_when_mbid_available(self):
self.mock_response(self.CAA_URL)
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = fetchart.art_for_album(album, None)
self.assertNotEqual(artpath, None)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.CAA_URL)
def test_local_only_does_not_access_network(self):
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = fetchart.art_for_album(album, [self.dpath], local_only=True)
self.assertEqual(artpath, None)
self.assertEqual(len(responses.calls), 0)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = fetchart.art_for_album(album, [self.dpath], None, local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
self.assertEqual(len(responses.calls), 0)
class AAOTest(_common.TestCase):
ASIN = 'xxxx'
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
@responses.activate
def run(self, *args, **kwargs):
super(AAOTest, self).run(*args, **kwargs)
def mock_response(self, url, body):
responses.add(responses.GET, url, body=body, content_type='text/html',
match_querystring=True)
def test_aao_scraper_finds_image(self):
body = """
<br />
<a href="TARGET_URL" title="View larger image" class="thickbox" style="color: #7E9DA2; text-decoration:none;">
<img src="http://www.albumart.org/images/zoom-icon.jpg" alt="View larger image" width="17" height="15" border="0"/></a>
"""
self.mock_response(self.AAO_URL, body)
res = fetchart.aao_art(self.ASIN)
self.assertEqual(res, 'TARGET_URL')
def test_aao_scraper_returns_none_when_no_image_present(self):
self.mock_response(self.AAO_URL, 'blah blah')
res = fetchart.aao_art(self.ASIN)
self.assertEqual(res, None)
class ArtImporterTest(_common.TestCase):
def setUp(self):
super(ArtImporterTest, self).setUp()
# Mock the album art fetcher to always return our test file.
self.art_file = os.path.join(self.temp_dir, 'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = fetchart.art_for_album
self.afa_response = self.art_file
def art_for_album(i, p, maxwidth=None, local_only=False):
return self.afa_response
fetchart.art_for_album = art_for_album
# Test library.
self.libpath = os.path.join(self.temp_dir, 'tmplib.blb')
self.libdir = os.path.join(self.temp_dir, 'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, 'album'))
itempath = os.path.join(self.libdir, 'album', 'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
# The plugin and import configuration.
self.plugin = fetchart.FetchArtPlugin()
self.session = _common.import_session(self.lib)
# Import task for the coroutine.
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album = self.album
info = AlbumInfo(
album = 'some album',
album_id = 'albumid',
artist = 'some artist',
artist_id = 'artistid',
tracks = [],
)
self.task.set_choice(AlbumMatch(0, info, {}, set(), set()))
def tearDown(self):
self.lib._connection().close()
super(ArtImporterTest, self).tearDown()
fetchart.art_for_album = self.old_afa
def _fetch_art(self, should_exist):
"""Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
"""
# Execute the two relevant parts of the importer.
self.plugin.fetch_art(self.session, self.task)
self.plugin.assign_art(self.session, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(artpath,
os.path.join(os.path.dirname(self.i.path), 'cover.jpg'))
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
config['import']['delete'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_move_original_file(self):
config['import']['move'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = artdest
self._fetch_art(True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
"""
Download interface for the data stored in coop database (alldata)
This is called from /request/coop/fe.phtml
"""
import datetime
import zipfile
from io import BytesIO, StringIO
import pandas as pd
import psycopg2.extras
from paste.request import parse_formvars
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, utc, get_dbconnstr
from metpy.units import units
from sqlalchemy import text
DEGC = units.degC
DEGF = units.degF
EXL = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
def f2c(val):
"""Convert F to C."""
return (val * DEGF).to(DEGC).m
def get_scenario_period(ctx):
"""Compute the inclusive start and end dates to fetch scenario data for
Arguments:
ctx dictionary context this app was called with
"""
sts = datetime.date(ctx["scenario_year"], ctx["ets"].month, ctx["ets"].day)
ets = datetime.date(ctx["scenario_year"], 12, 31)
return sts, ets
def get_database():
"""Get database"""
return get_dbconn("coop")
def sane_date(year, month, day):
"""Attempt to account for usage of days outside of the bounds for
a given month"""
# Calculate the last date of the given month
nextmonth = datetime.date(year, month, 1) + datetime.timedelta(days=35)
lastday = nextmonth.replace(day=1) - datetime.timedelta(days=1)
return datetime.date(year, month, min(day, lastday.day))
def get_cgi_dates(form):
"""Figure out which dates are requested via the form, we shall attempt
to account for invalid dates provided!"""
y1 = int(form.get("year1"))
m1 = int(form.get("month1"))
d1 = int(form.get("day1"))
y2 = int(form.get("year2"))
m2 = int(form.get("month2"))
d2 = int(form.get("day2"))
ets = sane_date(y2, m2, d2)
archive_end = datetime.date.today() - datetime.timedelta(days=1)
if ets > archive_end:
ets = archive_end
return [sane_date(y1, m1, d1), ets]
def get_cgi_stations(form):
"""Figure out which stations the user wants, return a list of them"""
reqlist = form.getall("station[]")
if not reqlist:
reqlist = form.getall("stations")
if not reqlist:
return []
if "_ALL" in reqlist:
network = form.get("network")
nt = NetworkTable(network, only_online=False)
return nt.sts.keys()
return reqlist
def do_apsim(ctx):
"""
[weather.met.weather]
latitude = 42.1 (DECIMAL DEGREES)
tav = 9.325084 (oC) ! annual average ambient temperature
amp = 29.57153 (oC) ! annual amplitude in mean monthly temperature
year day radn maxt mint rain
() () (MJ/m^2) (oC) (oC) (mm)
1986 1 7.38585 0.8938889 -7.295556 0
"""
if len(ctx["stations"]) > 1:
return (
"ERROR: APSIM output is only "
"permitted for one station at a time."
).encode("ascii")
dbconn = get_database()
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
station = ctx["stations"][0]
table = get_tablename(ctx["stations"])
network = f"{station[:2]}CLIMATE"
nt = NetworkTable(network, only_online=False)
thisyear = datetime.datetime.now().year
extra = {}
if ctx["scenario"] == "yes":
sts = datetime.datetime(int(ctx["scenario_year"]), 1, 1)
ets = datetime.datetime(int(ctx["scenario_year"]), 12, 31)
febtest = datetime.date(thisyear, 3, 1) - datetime.timedelta(days=1)
sdaylimit = ""
if febtest.day == 28:
sdaylimit = " and sday != '0229'"
cursor.execute(
f"""
SELECT day, high, low, precip, 1 as doy,
coalesce(narr_srad, merra_srad, hrrr_srad) as srad
from {table} WHERE station = %s
and day >= %s and day <= %s {sdaylimit}""",
(ctx["stations"][0], sts, ets),
)
for row in cursor:
ts = row[0].replace(year=thisyear)
extra[ts] = row
extra[ts]["doy"] = int(ts.strftime("%j"))
if febtest not in extra:
feb28 = datetime.date(thisyear, 2, 28)
extra[febtest] = extra[feb28]
sio = StringIO()
sio.write("! Iowa Environmental Mesonet -- NWS Cooperative Data\n")
sio.write(f"! Created: {utc():%d %b %Y %H:%M:%S} UTC\n")
sio.write("! Contact: daryl herzmann [email protected] 515-294-5978\n")
sio.write("! Station: %s %s\n" % (station, nt.sts[station]["name"]))
sio.write("! Data Period: %s - %s\n" % (ctx["sts"], ctx["ets"]))
if ctx["scenario"] == "yes":
sio.write(
"! !SCENARIO DATA! inserted after: %s replicating year: %s\n"
% (ctx["ets"], ctx["scenario_year"])
)
sio.write("[weather.met.weather]\n")
sio.write(
"latitude = %.1f (DECIMAL DEGREES)\n" % (nt.sts[station]["lat"],)
)
# Compute average temperature!
cursor.execute(
"SELECT avg((high+low)/2) as avgt from ncei_climate91 "
"WHERE station = %s",
(nt.sts[station]["ncei91"],),
)
row = cursor.fetchone()
sio.write(
"tav = %.3f (oC) ! annual average ambient temperature\n"
% (f2c(row["avgt"]),)
)
# Compute the annual amplitude in temperature
cursor.execute(
"""
select max(avg) as h, min(avg) as l from
(SELECT extract(month from valid) as month, avg((high+low)/2.)
from ncei_climate91
WHERE station = %s GROUP by month) as foo
""",
(nt.sts[station]["ncei91"],),
)
row = cursor.fetchone()
sio.write(
("amp = %.3f (oC) ! annual amplitude in mean monthly temperature\n")
% (f2c(row["h"]) - f2c(row["l"]),)
)
sio.write(
"""year day radn maxt mint rain
() () (MJ/m^2) (oC) (oC) (mm)\n"""
)
if ctx.get("hayhoe_model") is not None:
cursor.execute(
"""
SELECT day, high, low, precip,
extract(doy from day) as doy,
0 as srad
from hayhoe_daily WHERE station = %s
and day >= %s and scenario = %s and model = %s
ORDER by day ASC
""",
(
ctx["stations"][0],
ctx["sts"],
ctx["hayhoe_scenario"],
ctx["hayhoe_model"],
),
)
else:
cursor.execute(
f"""
SELECT day, high, low, precip,
extract(doy from day) as doy,
coalesce(narr_srad, merra_srad, hrrr_srad) as srad
from {table}
WHERE station = %s and
day >= %s and day <= %s and high is not null and
low is not null and precip is not null ORDER by day ASC
""",
(station, ctx["sts"], ctx["ets"]),
)
for row in cursor:
srad = -99 if row["srad"] is None else row["srad"]
sio.write(
("%4s %10.0f %10.3f %10.1f %10.1f %10.2f\n")
% (
row["day"].year,
int(row["doy"]),
srad,
f2c(row["high"]),
f2c(row["low"]),
row["precip"] * 25.4,
)
)
if extra:
dec31 = datetime.date(thisyear, 12, 31)
now = row["day"]
while now <= dec31:
row = extra[now]
srad = -99 if row["srad"] is None else row["srad"]
sio.write(
("%4s %10.0f %10.3f %10.1f %10.1f %10.2f\n")
% (
now.year,
int(row["doy"]),
srad,
f2c(row["high"]),
f2c(row["low"]),
row["precip"] * 25.4,
)
)
now += datetime.timedelta(days=1)
return sio.getvalue().encode("ascii")
def do_century(ctx):
"""Materialize the data in Century Format
* Century format (precip cm, avg high C, avg low C)
prec 1980 2.60 6.40 0.90 1.00 0.70 0.00
tmin 1980 14.66 12.10 7.33 -0.89 -5.45 -7.29
tmax 1980 33.24 30.50 27.00 18.37 11.35 9.90
prec 1981 12.00 7.20 0.60 4.90 1.10 0.30
tmin 1981 14.32 12.48 8.17 0.92 -3.25 -8.90
tmax 1981 30.84 28.71 27.02 16.84 12.88 6.82
"""
if len(ctx["stations"]) > 1:
return (
"ERROR: Century output is only "
"permitted for one station at a time."
).encode("ascii")
station = ctx["stations"][0]
network = "%sCLIMATE" % (station[:2],)
nt = NetworkTable(network, only_online=False)
dbconn = get_database()
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Automatically set dates to start and end of year to make output clean
sts = datetime.date(ctx["sts"].year, 1, 1)
ets = datetime.date(ctx["ets"].year, 12, 31)
if ets >= datetime.date.today():
ets = datetime.date.today() - datetime.timedelta(days=1)
table = get_tablename(ctx["stations"])
thisyear = datetime.datetime.now().year
cursor.execute(
"""
WITH scenario as (
SELECT """
+ str(thisyear)
+ """::int as year, month, high, low, precip
from """
+ table
+ """
WHERE station = %s and day > %s and day <= %s and sday != '0229'
), obs as (
select year, month, high, low, precip from """
+ table
+ """
WHERE station = %s and day >= %s and day <= %s
), data as (
SELECT * from obs UNION select * from scenario
)
SELECT year, month, avg(high) as tmax, avg(low) as tmin,
sum(precip) as prec from data GROUP by year, month
""",
(station, ctx["scenario_sts"], ctx["scenario_ets"], station, sts, ets),
)
data = {}
for row in cursor:
if row["year"] not in data:
data[row["year"]] = {}
for mo in range(1, 13):
data[row["year"]][mo] = {"prec": -99, "tmin": -99, "tmax": -99}
data[row["year"]][row["month"]] = {
"prec": (row["prec"] * units("inch")).to(units("mm")).m,
"tmin": f2c(row["tmin"]),
"tmax": f2c(row["tmax"]),
}
sio = StringIO()
sio.write("# Iowa Environmental Mesonet -- NWS Cooperative Data\n")
sio.write(f"# Created: {utc():%d %b %Y %H:%M:%S} UTC\n")
sio.write("# Contact: daryl herzmann [email protected] 515-294-5978\n")
sio.write("# Station: %s %s\n" % (station, nt.sts[station]["name"]))
sio.write("# Data Period: %s - %s\n" % (sts, ets))
if ctx["scenario"] == "yes":
sio.write(
"# !SCENARIO DATA! inserted after: %s replicating year: %s\n"
% (ctx["ets"], ctx["scenario_year"])
)
idxs = ["prec", "tmin", "tmax"]
for year in range(sts.year, ets.year + 1):
for idx in idxs:
sio.write(
(
"%s %s%7.2f%7.2f%7.2f%7.2f%7.2f%7.2f%7.2f"
"%7.2f%7.2f%7.2f%7.2f%7.2f\n"
)
% (
idx,
year,
data[year][1][idx],
data[year][2][idx],
data[year][3][idx],
data[year][4][idx],
data[year][5][idx],
data[year][6][idx],
data[year][7][idx],
data[year][8][idx],
data[year][9][idx],
data[year][10][idx],
data[year][11][idx],
data[year][12][idx],
)
)
return sio.getvalue().encode("ascii")
def do_daycent(ctx):
"""Materialize data for daycent
Daily Weather Data File (use extra weather drivers = 0):
> 1 1 1990 1 7.040 -10.300 0.000
NOTES:
Column 1 - Day of month, 1-31
Column 2 - Month of year, 1-12
Column 3 - Year
Column 4 - Day of the year, 1-366
Column 5 - Maximum temperature for day, degrees C
Column 6 - Minimum temperature for day, degrees C
Column 7 - Precipitation for day, centimeters
"""
if len(ctx["stations"]) > 1:
return (
"ERROR: Daycent output is only "
"permitted for one station at a time."
).encode("ascii")
dbconn = get_database()
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
table = get_tablename(ctx["stations"])
extra = {}
thisyear = datetime.datetime.now().year
if ctx["scenario"] == "yes":
sts = datetime.datetime(int(ctx["scenario_year"]), 1, 1)
ets = datetime.datetime(int(ctx["scenario_year"]), 12, 31)
febtest = datetime.date(thisyear, 3, 1) - datetime.timedelta(days=1)
sdaylimit = ""
if febtest.day == 28:
sdaylimit = " and sday != '0229'"
cursor.execute(
"""
SELECT day, high, low, precip
from """
+ table
+ """ WHERE station = %s
and day >= %s and day <= %s """
+ sdaylimit
+ """
""",
(ctx["stations"][0], sts, ets),
)
for row in cursor:
ts = row[0].replace(year=thisyear)
extra[ts] = row
if febtest not in extra:
feb28 = datetime.date(thisyear, 2, 28)
extra[febtest] = extra[feb28]
if ctx.get("hayhoe_model") is not None:
cursor.execute(
"""
SELECT day, high, low, precip,
extract(doy from day) as doy
from hayhoe_daily WHERE station = %s
and day >= %s and scenario = %s and model = %s
ORDER by day ASC
""",
(
ctx["stations"][0],
ctx["sts"],
ctx["hayhoe_scenario"],
ctx["hayhoe_model"],
),
)
else:
cursor.execute(
"""
SELECT day, high, low, precip,
extract(doy from day) as doy
from """
+ table
+ """ WHERE station = %s
and day >= %s and day <= %s ORDER by day ASC
""",
(ctx["stations"][0], ctx["sts"], ctx["ets"]),
)
sio = StringIO()
sio.write("Daily Weather Data File (use extra weather drivers = 0):\n\n")
for row in cursor:
sio.write(
("%s %s %s %s %.2f %.2f %.2f\n")
% (
row["day"].day,
row["day"].month,
row["day"].year,
int(row["doy"]),
f2c(row["high"]),
f2c(row["low"]),
(row["precip"] * units("inch")).to(units("cm")).m,
)
)
if extra:
dec31 = datetime.date(thisyear, 12, 31)
now = row["day"]
while now <= dec31:
row = extra[now]
sio.write(
("%s %s %s %s %.2f %.2f %.2f\n")
% (
now.day,
now.month,
now.year,
int(now.strftime("%j")),
f2c(row["high"]),
f2c(row["low"]),
(row["precip"] * units("inch")).to(units("cm")).m,
)
)
now += datetime.timedelta(days=1)
return sio.getvalue().encode("ascii")
def get_tablename(stations):
"""Figure out the table that has the data for these stations"""
states = []
for sid in stations:
if sid[:2] not in states:
states.append(sid[:2])
if len(states) == 1:
return "alldata_%s" % (states[0],)
return "alldata"
def get_stationtable(stations):
"""Figure out our station table!"""
states = []
networks = []
for sid in stations:
if sid[:2] not in states:
states.append(sid[:2])
networks.append("%sCLIMATE" % (sid[:2],))
return NetworkTable(networks, only_online=False)
def do_simple(ctx):
"""Generate Simple output"""
dbconn = get_database()
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
table = get_tablename(ctx["stations"])
nt = get_stationtable(ctx["stations"])
thisyear = datetime.datetime.now().year
if len(ctx["stations"]) == 1:
ctx["stations"].append("X")
limitrowcount = "LIMIT 1048000" if ctx["what"] == "excel" else ""
sql = f"""
WITH scenario as (
SELECT station, high, low, precip, snow, snowd, narr_srad,
temp_estimated, precip_estimated,
merra_srad, merra_srad_cs, hrrr_srad,
to_char(('{thisyear}-'||month||'-'||extract(day from day))::date,
'YYYY/mm/dd') as day,
extract(doy from day) as doy,
gddxx(50, 86, high, low) as gdd_50_86,
gddxx(40, 86, high, low) as gdd_40_86,
round((5.0/9.0 * (high - 32.0))::numeric,1) as highc,
round((5.0/9.0 * (low - 32.0))::numeric,1) as lowc,
round((precip * 25.4)::numeric,1) as precipmm
from {table} WHERE
station IN {str(tuple(ctx["stations"]))} and
day >= %s and day <= %s
), obs as (
SELECT station, high, low, precip, snow, snowd, narr_srad,
temp_estimated, precip_estimated,
merra_srad, merra_srad_cs, hrrr_srad,
to_char(day, 'YYYY/mm/dd') as day,
extract(doy from day) as doy,
gddxx(50, 86, high, low) as gdd_50_86,
gddxx(40, 86, high, low) as gdd_40_86,
round((5.0/9.0 * (high - 32.0))::numeric,1) as highc,
round((5.0/9.0 * (low - 32.0))::numeric,1) as lowc,
round((precip * 25.4)::numeric,1) as precipmm
from {table} WHERE station IN {str(tuple(ctx["stations"]))} and
day >= %s and day <= %s
), total as (
SELECT * from obs UNION SELECT * from scenario
)
SELECT * from total ORDER by day ASC {limitrowcount}"""
args = (ctx["scenario_sts"], ctx["scenario_ets"], ctx["sts"], ctx["ets"])
cols = ["station", "station_name", "day", "doy"]
if ctx["inclatlon"] == "yes":
cols.insert(2, "lat")
cols.insert(3, "lon")
cols = cols + ctx["myvars"]
if ctx["what"] == "excel":
# Do the excel logic
df = pd.read_sql(sql, get_dbconnstr("coop"), params=args)
# Convert day into a python date type
df["day"] = pd.to_datetime(df["day"]).dt.date
def _gs(x, y):
return nt.sts[x][y]
df["station_name"] = [_gs(x, "name") for x in df["station"]]
if "lat" in cols:
df["lat"] = [_gs(x, "lat") for x in df["station"]]
df["lon"] = [_gs(x, "lon") for x in df["station"]]
bio = BytesIO()
df.to_excel(bio, columns=cols, index=False, engine="openpyxl")
return bio.getvalue()
cursor.execute(sql, args)
sio = StringIO()
sio.write("# Iowa Environmental Mesonet -- NWS Cooperative Data\n")
sio.write(f"# Created: {utc():%d %b %Y %H:%M:%S} UTC\n")
sio.write("# Contact: daryl herzmann [email protected] 515-294-5978\n")
sio.write("# Data Period: %s - %s\n" % (ctx["sts"], ctx["ets"]))
if ctx["scenario"] == "yes":
sio.write(
"# !SCENARIO DATA! inserted after: %s replicating year: %s\n"
% (ctx["ets"], ctx["scenario_year"])
)
p = {"comma": ",", "tab": "\t", "space": " "}
d = p[ctx["delim"]]
sio.write(d.join(cols) + "\r\n")
for row in cursor:
sid = row["station"]
dc = row.copy()
dc["station_name"] = nt.sts[sid]["name"]
dc["lat"] = "%.4f" % (nt.sts[sid]["lat"],)
dc["lon"] = "%.4f" % (nt.sts[sid]["lon"],)
dc["doy"] = "%.0f" % (dc["doy"],)
res = []
for n in cols:
res.append(str(dc[n]))
sio.write((d.join(res)).replace("None", "M") + "\r\n")
return sio.getvalue().encode("ascii")
def do_salus(ctx):
"""Generate SALUS
StationID, Year, DOY, SRAD, Tmax, Tmin, Rain, DewP, Wind, Par, dbnum
CTRL, 1981, 1, 5.62203, 2.79032, -3.53361, 5.43766, NaN, NaN, NaN, 2
CTRL, 1981, 2, 3.1898, 1.59032, -6.83361, 1.38607, NaN, NaN, NaN, 3
"""
if len(ctx["stations"]) > 1:
return (
"ERROR: SALUS output is only "
"permitted for one station at a time."
).encode("ascii")
dbconn = get_database()
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
scenario_year = 2030
asts = datetime.date(2030, 1, 1)
if ctx["scenario"] == "yes":
# Tricky!
scenario_year = ctx["scenario_year"]
today = datetime.date.today()
asts = datetime.date(scenario_year, today.month, today.day)
table = get_tablename(ctx["stations"])
station = ctx["stations"][0]
thisyear = datetime.datetime.now().year
cursor.execute(
f"""
WITH scenario as (
SELECT
('{thisyear}-'||month||'-'||extract(day from day))::date
as day,
high, low, precip, station,
coalesce(narr_srad, merra_srad, hrrr_srad) as srad
from {table} WHERE station = %s and
day >= %s and year = %s
), obs as (
SELECT day,
high, low, precip, station,
coalesce(narr_srad, merra_srad, hrrr_srad) as srad
from {table} WHERE station = %s and
day >= %s and day <= %s ORDER by day ASC
), total as (
SELECT *, extract(doy from day) as doy from obs
UNION SELECT * from scenario
)
SELECT * from total ORDER by day ASC
""",
(station, asts, scenario_year, station, ctx["sts"], ctx["ets"]),
)
sio = StringIO()
sio.write(
(
"StationID, Year, DOY, SRAD, Tmax, Tmin, Rain, DewP, "
"Wind, Par, dbnum\n"
)
)
for i, row in enumerate(cursor):
srad = -99 if row["srad"] is None else row["srad"]
sio.write(
("%s, %s, %s, %.4f, %.2f, %.2f, %.2f, , , , %s\n")
% (
station[:4],
row["day"].year,
int(row["doy"]),
srad,
f2c(row["high"]),
f2c(row["low"]),
row["precip"] * 25.4,
i + 2,
)
)
return sio.getvalue().encode("ascii")
def do_dndc(ctx):
"""Process DNDC
* One file per year! named StationName / StationName_YYYY.txt
* julian day, tmax C , tmin C, precip cm seperated by space
"""
dbconn = get_database()
cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
table = get_tablename(ctx["stations"])
nt = get_stationtable(ctx["stations"])
if len(ctx["stations"]) == 1:
ctx["stations"].append("X")
scenario_year = 2030
asts = datetime.date(2030, 1, 1)
if ctx["scenario"] == "yes":
# Tricky!
scenario_year = ctx["scenario_year"]
today = datetime.date.today()
asts = datetime.date(scenario_year, today.month, today.day)
thisyear = datetime.datetime.now().year
cursor.execute(
f"""
WITH scenario as (
SELECT
('{thisyear}-'||month||'-'||extract(day from day))::date as day,
high, low, precip, station from {table}
WHERE station IN %s and day >= %s and year = %s),
obs as (
SELECT day, high, low, precip, station from {table}
WHERE station IN %s and day >= %s and day <= %s),
total as (
SELECT *, extract(doy from day) as doy from obs UNION
SELECT *, extract(doy from day) as doy from scenario
)
SELECT * from total ORDER by day ASC
""",
(
tuple(ctx["stations"]),
asts,
scenario_year,
tuple(ctx["stations"]),
ctx["sts"],
ctx["ets"],
),
)
zipfiles = {}
for row in cursor:
station = row["station"]
sname = nt.sts[station]["name"].replace(" ", "_")
fn = f"{sname}/{sname}_{row['day'].year}.txt"
if fn not in zipfiles:
zipfiles[fn] = ""
zipfiles[fn] += ("%s %.2f %.2f %.2f\n") % (
int(row["doy"]),
f2c(row["high"]),
f2c(row["low"]),
row["precip"] * 2.54,
)
sio = BytesIO()
with zipfile.ZipFile(sio, "a") as zf:
for fn, fp in zipfiles.items():
zf.writestr(fn, fp)
return sio.getvalue()
def do_swat(ctx):
"""SWAT
Two files, one for precip [mm] and one for hi and low temperature [C]
"""
table = get_tablename(ctx["stations"])
if len(ctx["stations"]) == 1:
ctx["stations"].append("X")
scenario_year = 2030
asts = datetime.date(2030, 1, 1)
if ctx["scenario"] == "yes":
# Tricky!
scenario_year = ctx["scenario_year"]
today = datetime.date.today()
asts = datetime.date(scenario_year, today.month, today.day)
thisyear = datetime.datetime.now().year
df = pd.read_sql(
text(
f"""
WITH scenario as (
SELECT
('{thisyear}-'||month||'-'||extract(day from day))::date as day,
high, low, precip, station from {table}
WHERE station IN :sids and day >= :asts and year = :scenario_year),
obs as (
SELECT day, high, low, precip, station from {table}
WHERE station IN :sids and day >= :sts and day <= :ets),
total as (
SELECT *, extract(doy from day) as doy from obs UNION
SELECT *, extract(doy from day) as doy from scenario
)
SELECT * from total ORDER by day ASC
"""
),
get_dbconnstr("coop"),
params={
"sids": tuple(ctx["stations"]),
"asts": asts,
"scenario_year": scenario_year,
"sts": ctx["sts"],
"ets": ctx["ets"],
},
index_col=None,
)
df["tmax"] = f2c(df["high"].values)
df["tmin"] = f2c(df["low"].values)
df["pcpn"] = (df["precip"].values * units("inch")).to(units("mm")).m
zipfiles = {}
for station, df2 in df.groupby(by="station"):
pcpfn = f"swatfiles/{station}.pcp"
tmpfn = f"swatfiles/{station}.tmp"
zipfiles[pcpfn] = "IEM COOP %s\n\n\n\n" % (station,)
zipfiles[tmpfn] = "IEM COOP %s\n\n\n\n" % (station,)
for _i, row in df2.iterrows():
zipfiles[pcpfn] += "%s%03i%5.1f\n" % (
row["day"].year,
row["doy"],
row["pcpn"],
)
zipfiles[tmpfn] += ("%s%03i%5.1f%5.1f\n") % (
row["day"].year,
row["doy"],
row["tmax"],
row["tmin"],
)
sio = BytesIO()
with zipfile.ZipFile(sio, "a") as zf:
for fn, fp in zipfiles.items():
zf.writestr(fn, fp)
return sio.getvalue()
def application(environ, start_response):
"""go main go"""
form = parse_formvars(environ)
ctx = {}
ctx["stations"] = get_cgi_stations(form)
if not ctx["stations"]:
start_response(
"500 Internal Server Error", [("Content-type", "text/plain")]
)
return [b"No stations were specified for the request."]
ctx["sts"], ctx["ets"] = get_cgi_dates(form)
ctx["myvars"] = form.getall("vars[]")
# Model specification trumps vars[]
if form.get("model") is not None:
ctx["myvars"] = [form.get("model")]
ctx["what"] = form.get("what", "view")
ctx["delim"] = form.get("delim", "comma")
ctx["inclatlon"] = form.get("gis", "no")
ctx["scenario"] = form.get("scenario", "no")
ctx["hayhoe_scenario"] = form.get("hayhoe_scenario")
ctx["hayhoe_model"] = form.get("hayhoe_model")
ctx["scenario_year"] = 2099
if ctx["scenario"] == "yes":
ctx["scenario_year"] = int(form.get("scenario_year", 2099))
ctx["scenario_sts"], ctx["scenario_ets"] = get_scenario_period(ctx)
# TODO: this code stinks and is likely buggy
headers = []
if (
"apsim" in ctx["myvars"]
or "daycent" in ctx["myvars"]
or "century" in ctx["myvars"]
):
headers.append(("Content-type", "text/plain"))
elif "dndc" not in ctx["myvars"] and ctx["what"] != "excel":
if ctx["what"] == "download":
headers.append(("Content-type", "application/octet-stream"))
dlfn = "changeme.txt"
if len(ctx["stations"]) < 10:
dlfn = "%s.txt" % ("_".join(ctx["stations"]),)
headers.append(
("Content-Disposition", "attachment; filename=%s" % (dlfn,))
)
else:
headers.append(("Content-type", "text/plain"))
elif "dndc" in ctx["myvars"]:
headers.append(("Content-type", "application/octet-stream"))
headers.append(
("Content-Disposition", "attachment; filename=dndc.zip")
)
elif "swat" in ctx["myvars"]:
headers.append(("Content-type", "application/octet-stream"))
headers.append(
("Content-Disposition", "attachment; filename=swatfiles.zip")
)
elif ctx["what"] == "excel":
headers.append(("Content-type", EXL))
headers.append(
("Content-Disposition", "attachment; filename=nwscoop.xlsx")
)
start_response("200 OK", headers)
# OK, now we fret
if "daycent" in ctx["myvars"]:
res = do_daycent(ctx)
elif "century" in ctx["myvars"]:
res = do_century(ctx)
elif "apsim" in ctx["myvars"]:
res = do_apsim(ctx)
elif "dndc" in ctx["myvars"]:
res = do_dndc(ctx)
elif "salus" in ctx["myvars"]:
res = do_salus(ctx)
elif "swat" in ctx["myvars"]:
res = do_swat(ctx)
else:
res = do_simple(ctx)
return [res]
def test_sane_date():
"""Test our sane_date() method"""
assert sane_date(2000, 9, 31) == datetime.date(2000, 9, 30)
assert sane_date(2000, 2, 31) == datetime.date(2000, 2, 29)
assert sane_date(2000, 1, 15) == datetime.date(2000, 1, 15)
|
|
from django.http import Http404
from django.utils.translation import gettext as _
from django.views.generic import DetailView, ListView
from django.views.generic.detail import SingleObjectMixin
from taggit.models import Tag
from ..core.views import PaginatedListView
from .models import Account, Photo, Photoset, User
class PhotosOrderMixin(object):
"""
For pages which list Photos and can change the order they're viewed in.
Can have 'order' in the GET string, with values of 'uploaded' or 'taken'.
Adds an 'order' key to the context_data, with value of 'uploaded'/'taken'.
"""
def get_ordering(self):
args = self.request.GET
if "order" in args and args["order"] == "taken":
return "-taken_time"
else:
return "-post_time"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["order"] = (
"uploaded" if self.get_ordering() == "-post_time" else "taken"
)
return context
def get_queryset(self):
"""Order by -taken_time or -post_time.
If ordering by taken_time, exclude Photos where taken_unknown = True.
"""
queryset = super().get_queryset()
# Not sure why we need to repeat some of this from
# ListView.get_queryset() here, but the UserDetail page, for one,
# wasn't ordering by taken_time without this.
ordering = self.get_ordering()
if ordering:
if ordering == "-taken_time":
# Exclude where we don't know the taken time.
queryset = queryset.filter(taken_unknown=False)
import six
if isinstance(ordering, six.string_types):
ordering = (ordering,)
queryset = queryset.order_by(*ordering)
return queryset
class HomeView(PhotosOrderMixin, PaginatedListView):
template_name = "flickr/home.html"
paginate_by = 48
queryset = Photo.public_photo_objects
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
def get_queryset(self):
"""
Adding the prefetch_related() to self.queryset caused some tests to
fail for some reason.
"""
queryset = super().get_queryset()
return queryset.prefetch_related("user")
class PhotosetListView(ListView):
template_name = "flickr/photoset_list.html"
queryset = Photoset.objects.all().prefetch_related("primary_photo", "user")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["photoset_list"] = context["object_list"]
context["account_list"] = Account.objects.all()
return context
class SingleUserMixin(SingleObjectMixin):
"""Used for views that need data about a User based on nsid in
the URL, and its Account if it has one.
"""
slug_field = "nsid"
slug_url_kwarg = "nsid"
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=User.objects.all())
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["flickr_user"] = self.object
try:
context["account"] = Account.objects.get(user=self.object)
except Account.DoesNotExist:
context["account"] = None
return context
class UserDetailView(PhotosOrderMixin, SingleUserMixin, PaginatedListView):
"""A single Flickr User and its Photos.
The user might have an Account associated with it, or might not.
"""
template_name = "flickr/user_detail.html"
paginate_by = 48
queryset = Photo.public_objects
def get_queryset(self):
"All public Photos from this Account."
queryset = super().get_queryset()
return queryset.filter(user=self.object).prefetch_related("user")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["photo_list"] = context["object_list"]
return context
class PhotoDetailView(DetailView):
"""Show a single Photo. It might be posted by one of the Accounts, or might
be a Photo by someone else, favorited.
"""
model = Photo
slug_field = "flickr_id"
slug_url_kwarg = "flickr_id"
def get_object(self, queryset=None):
"""Do standard DetailView.get_object(), but return 404 if the Photo is
private, OR if the URL's user NSID doesn't match the photo's."""
obj = super().get_object(queryset)
if obj.is_private or obj.user.nsid != self.kwargs["nsid"]:
raise Http404(
_("No %(verbose_name)s found matching the query")
% {"verbose_name": obj._meta.verbose_name}
)
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["flickr_user"] = context["photo"].user
# We can show favorited Photos; they won't have an associated Account.
try:
context["account"] = Account.objects.get(user=context["flickr_user"])
except Account.DoesNotExist:
context["account"] = None
return context
class TagListView(ListView):
template_name = "flickr/tag_list.html"
context_object_name = "tag_list"
def get_queryset(self):
return Photo.tags.most_common()[:100]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
class TagDetailView(PhotosOrderMixin, SingleObjectMixin, PaginatedListView):
"All Photos with a certain tag from all Accounts"
template_name = "flickr/tag_detail.html"
allow_empty = False
queryset = Photo.public_objects.prefetch_related("user")
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=Tag.objects.all())
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag"] = self.object
context["account_list"] = Account.objects.all()
context["photo_list"] = context["object_list"]
return context
def get_queryset(self):
"""Show all the public Photos associated with this tag."""
queryset = super().get_queryset()
return queryset.filter(tags__slug__in=[self.object.slug])
class UserTagDetailView(PhotosOrderMixin, SingleUserMixin, PaginatedListView):
"All Photos with a certain Tag from one User"
template_name = "flickr/user_tag_detail.html"
allow_empty = False
queryset = Photo.public_objects.prefetch_related("user")
def get(self, request, *args, **kwargs):
self.tag_object = self.get_tag_object()
return super().get(request, *args, **kwargs)
def get_tag_object(self):
"""Custom method for fetching the Tag."""
try:
obj = Tag.objects.get(slug=self.kwargs["tag_slug"])
except Tag.DoesNotExist:
raise Http404(_("No Tags found matching the query"))
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag"] = self.tag_object
context["photo_list"] = context["object_list"]
return context
def get_queryset(self):
"""Show all the public Photos associated with this user."""
queryset = super().get_queryset()
return queryset.filter(
user=self.object, tags__slug__in=[self.kwargs["tag_slug"]]
)
class UserPhotosetListView(SingleUserMixin, ListView):
template_name = "flickr/user_photoset_list.html"
queryset = Photoset.objects.all().prefetch_related("primary_photo", "user")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["photoset_list"] = context["object_list"]
return context
def get_queryset(self):
"""Show all Photosets associated with this user."""
queryset = super().get_queryset()
return queryset.filter(user=self.object)
class PhotosetDetailView(PhotosOrderMixin, SingleUserMixin, PaginatedListView):
template_name = "flickr/photoset_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# NOTE: photoset.objects.all() will return PRIVATE photos too.
# You probably don't want to do that.
context["photoset"] = self.photoset_object
context["photo_list"] = context["object_list"]
return context
def get_queryset(self):
"""Show all the public Photos in this Photoset."""
self.photoset_object = self.get_photoset_object()
return self.photoset_object.public_photos()
def get_photoset_object(self):
"""Custom method for fetching the Photoset."""
try:
obj = Photoset.objects.get(
user=self.object, flickr_id=self.kwargs["flickr_id"]
)
except Photoset.DoesNotExist:
raise Http404(_("No Photosets found matching the query"))
return obj
|
|
from cloudify.decorators import workflow
from cloudify.workflows import ctx
from cloudify.workflows import tasks as workflow_tasks
from utils import set_state_task
from utils import operation_task
from utils import link_tasks
from utils import CustomContext
from utils import generate_native_node_workflows
from utils import _get_all_nodes
from utils import _get_all_nodes_instances
from utils import _get_all_modified_node_instances
from utils import is_host_node
from workflow import WfStartEvent
from workflow import build_pre_event
# subworkflow 'install' for host 'Compute'
def install_host_compute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
# subworkflow 'uninstall' for host 'Compute'
def uninstall_host_compute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('Compute', 'Compute_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
def install_host(ctx, graph, custom_context, compute):
options = {}
options['Compute'] = install_host_compute
options[compute](ctx, graph, custom_context)
def uninstall_host(ctx, graph, custom_context, compute):
options = {}
options['Compute'] = uninstall_host_compute
options[compute](ctx, graph, custom_context)
@workflow
def a4c_install(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('install')))
_a4c_install(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_uninstall(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
_a4c_uninstall(ctx, graph, custom_context)
return graph.execute()
def _a4c_install(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_install')
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
def _a4c_uninstall(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.register_native_delegate_wf_step('Compute', 'Compute_uninstall')
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
def _get_scaling_group_name_from_node_id(ctx, node_id):
scaling_groups=ctx.deployment.scaling_groups
for group_name, scaling_group in ctx.deployment.scaling_groups.iteritems():
for member in scaling_group['members']:
if member == node_id:
ctx.logger.info("Node {} found in scaling group {}".format(node_id, group_name))
return group_name
return None
@workflow
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
delta = int(delta)
scalable_entity_name = _get_scaling_group_name_from_node_id(ctx, node_id)
scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
if scalable_entity_name:
curr_num_instances = scaling_group['properties']['current_instances']
planned_num_instances = curr_num_instances + delta
scale_id = scalable_entity_name
else:
scaled_node = ctx.get_node(scalable_entity_name)
if not scaled_node:
raise ValueError("Node {0} doesn't exist".format(scalable_entity_name))
if not is_host_node(scaled_node):
raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(scalable_entity_name))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
scale_id = scaled_node.id
if planned_num_instances < 1:
raise ValueError('Provided delta: {0} is illegal. current number of'
'instances of node/group {1} is {2}'
.format(delta, scalable_entity_name, curr_num_instances))
modification = ctx.deployment.start_modification({
scale_id: {
'instances': planned_num_instances
}
})
ctx.logger.info('Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
try:
if delta > 0:
ctx.logger.info('Scaling host/group {0} adding {1} instances'.format(scalable_entity_name, delta))
added_and_related = _get_all_nodes(modification.added)
added = _get_all_modified_node_instances(added_and_related, 'added')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'install')))
custom_context = CustomContext(ctx, added, added_and_related)
install_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Scale failed. Uninstalling node/group {0}'.format(scalable_entity_name))
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
try:
custom_context = CustomContext(ctx, added, added_and_related)
uninstall_host(ctx, graph, custom_context, scalable_entity_name)
graph.execute()
except:
ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(scalable_entity_name))
raise
else:
ctx.logger.info('Unscaling host/group {0} removing {1} instances'.format(scalable_entity_name, delta))
removed_and_related = _get_all_nodes(modification.removed)
removed = _get_all_modified_node_instances(removed_and_related, 'removed')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'uninstall')))
custom_context = CustomContext(ctx, removed, removed_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Unscale failed.')
raise
except:
ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
try:
modification.rollback()
except:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
raise
else:
try:
modification.finish()
except:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
@workflow
def a4c_heal(
ctx,
node_instance_id,
diagnose_value='Not provided',
**kwargs):
"""Reinstalls the whole subgraph of the system topology
The subgraph consists of all the nodes that are hosted in the
failing node's compute and the compute itself.
Additionally it unlinks and establishes appropriate relationships
:param ctx: cloudify context
:param node_id: failing node's id
:param diagnose_value: diagnosed reason of failure
"""
ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
.format(node_instance_id, diagnose_value))
failing_node = ctx.get_node_instance(node_instance_id)
host_instance_id = failing_node._node_instance.host_id
failing_node_host = ctx.get_node_instance(host_instance_id)
node_id = failing_node_host.node_id
subgraph_node_instances = failing_node_host.get_contained_subgraph()
added_and_related = _get_all_nodes(ctx)
try:
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'uninstall')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
graph.execute()
except:
ctx.logger.error('Uninstall while healing failed.')
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'install')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
install_host(ctx, graph, custom_context, node_id)
graph.execute()
#following code can be pasted in src/test/python/workflows/context.py for simulation
#def _build_nodes(ctx):
#types = []
#types.append('alien.cloudify.aws.nodes.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_Compute = _build_node(ctx, 'Compute', types, 1)
#types = []
#types.append('alien.nodes.aws.PublicNetwork')
#types.append('alien.nodes.PublicNetwork')
#types.append('tosca.nodes.Network')
#types.append('tosca.nodes.Root')
#node_NetPub = _build_node(ctx, 'NetPub', types, 1)
#_add_relationship(node_Compute, node_NetPub)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import time
import os
import cProfile, pstats, StringIO
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.profiler as profiler
from recordio_converter import imagenet_train, imagenet_test
def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'):
conv1 = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=False)
return fluid.layers.batch_norm(input=conv1, act=act)
def shortcut(input, ch_out, stride):
ch_in = input.shape[1] # if args.data_format == 'NCHW' else input.shape[-1]
if ch_in != ch_out:
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
else:
return input
def basicblock(input, ch_out, stride):
short = shortcut(input, ch_out, stride)
conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)
conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, act=None)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def bottleneck(input, ch_out, stride):
short = shortcut(input, ch_out * 4, stride)
conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)
conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)
conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, act=None)
return fluid.layers.elementwise_add(x=short, y=conv3, act='relu')
def layer_warp(block_func, input, ch_out, count, stride):
res_out = block_func(input, ch_out, stride)
for i in range(1, count):
res_out = block_func(res_out, ch_out, 1)
return res_out
def resnet_imagenet(input, class_dim, depth=50, data_format='NCHW'):
cfg = {
18: ([2, 2, 2, 1], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}
stages, block_func = cfg[depth]
conv1 = conv_bn_layer(input, ch_out=64, filter_size=7, stride=2, padding=3)
pool1 = fluid.layers.pool2d(
input=conv1, pool_type='avg', pool_size=3, pool_stride=2)
res1 = layer_warp(block_func, pool1, 64, stages[0], 1)
res2 = layer_warp(block_func, res1, 128, stages[1], 2)
res3 = layer_warp(block_func, res2, 256, stages[2], 2)
res4 = layer_warp(block_func, res3, 512, stages[3], 2)
pool2 = fluid.layers.pool2d(
input=res4,
pool_size=7,
pool_type='avg',
pool_stride=1,
global_pooling=True)
out = fluid.layers.fc(input=pool2, size=class_dim, act='softmax')
return out
def resnet_cifar10(input, class_dim, depth=32, data_format='NCHW'):
assert (depth - 2) % 6 == 0
n = (depth - 2) // 6
conv1 = conv_bn_layer(
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, n, 1)
res2 = layer_warp(basicblock, res1, 32, n, 2)
res3 = layer_warp(basicblock, res2, 64, n, 2)
pool = fluid.layers.pool2d(
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
out = fluid.layers.fc(input=pool, size=class_dim, act='softmax')
return out
def get_model(args):
model = resnet_cifar10
if args.data_set == "cifar10":
class_dim = 10
if args.data_format == 'NCHW':
dshape = [3, 32, 32]
else:
dshape = [32, 32, 3]
model = resnet_cifar10
train_reader = paddle.dataset.cifar.train10()
test_reader = paddle.dataset.cifar.test10()
elif args.data_set == "flowers":
class_dim = 102
if args.data_format == 'NCHW':
dshape = [3, 224, 224]
else:
dshape = [224, 224, 3]
model = resnet_imagenet
train_reader = paddle.dataset.flowers.train()
test_reader = paddle.dataset.flowers.test()
elif args.data_set == "imagenet":
class_dim = 1000
if args.data_format == 'NCHW':
dshape = [3, 224, 224]
else:
dshape = [224, 224, 3]
model = resnet_imagenet
if not args.data_path:
raise Exception(
"Must specify --data_path when training with imagenet")
train_reader = imagenet_train(args.data_path)
test_reader = imagenet_test(args.data_path)
if args.use_reader_op:
filelist = [
os.path.join(args.data_path, f) for f in os.listdir(args.data_path)
]
data_file = fluid.layers.open_files(
filenames=filelist,
shapes=[[-1] + dshape, (-1, 1)],
lod_levels=[0, 0],
dtypes=["float32", "int64"],
thread_num=args.gpus,
pass_num=args.pass_num)
data_file = fluid.layers.double_buffer(
fluid.layers.batch(
data_file, batch_size=args.batch_size))
input, label = fluid.layers.read_file(data_file)
else:
input = fluid.layers.data(name='data', shape=dshape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
if args.device == 'CPU' and args.cpus > 1:
places = fluid.layers.get_places(args.cpus)
pd = fluid.layers.ParallelDo(places)
with pd.do():
predict = model(pd.read_input(input), class_dim)
label = pd.read_input(label)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
batch_acc = fluid.layers.accuracy(input=predict, label=label)
pd.write_output(avg_cost)
pd.write_output(batch_acc)
avg_cost, batch_acc = pd()
avg_cost = fluid.layers.mean(avg_cost)
batch_acc = fluid.layers.mean(batch_acc)
else:
predict = model(input, class_dim)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
batch_acc = fluid.layers.accuracy(input=predict, label=label)
inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program):
inference_program = fluid.io.get_inference_program(
target_vars=[batch_acc])
optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
batched_train_reader = paddle.batch(
train_reader if args.no_random else paddle.reader.shuffle(
train_reader, buf_size=5120),
batch_size=args.batch_size * args.gpus,
drop_last=True)
batched_test_reader = paddle.batch(
test_reader, batch_size=args.batch_size, drop_last=True)
return avg_cost, inference_program, optimizer, batched_train_reader,\
batched_test_reader, batch_acc
|
|
"""Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
* ``curl_httpclient`` was the default prior to Tornado 2.0.
Note that if you are using ``curl_httpclient``, it is highly recommended that
you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum
supported version is 7.18.2, and the recommended version is 7.21.1 or newer.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import time
import weakref
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8
from tornado import httputil, stack_context
from tornado.ioloop import IOLoop
from tornado.util import Configurable
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided for convenience and testing; most applications
that are running an IOLoop will want to use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print response.body
except httpclient.HTTPError as e:
print "Error:", e
http_client.close()
"""
def __init__(self, async_client_class=None, **kwargs):
self._io_loop = IOLoop()
if async_client_class is None:
async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs)
self._closed = False
def __del__(self):
self.close()
def close(self):
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(self, request, **kwargs):
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError`.
"""
response = self._io_loop.run_sync(functools.partial(
self._async_client.fetch, request, **kwargs))
response.rethrow()
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print response.body
http_client = AsyncHTTPClient()
http_client.fetch("http://www.google.com/", handle_request)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Constructor
arguments other than ``io_loop`` and ``force_instance`` are
deprecated. The implementation subclass as well as arguments to
its constructor can be set with the static method `configure()`
"""
@classmethod
def configurable_base(cls):
return AsyncHTTPClient
@classmethod
def configurable_default(cls):
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls):
attr_name = '_async_client_dict_' + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current()
if io_loop in cls._async_clients() and not force_instance:
return cls._async_clients()[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
**kwargs)
if not force_instance:
cls._async_clients()[io_loop] = instance
return instance
def initialize(self, io_loop, defaults=None):
self.io_loop = io_loop
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
def close(self):
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._async_clients().get(self.io_loop) is self:
del self._async_clients()[self.io_loop]
def fetch(self, request, callback=None, **kwargs):
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. The ``Future`` wil raise an `HTTPError` if
the request returned a non-200 response code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
"""
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults)
future = TracebackFuture()
if callback is not None:
callback = stack_context.wrap(callback)
def handle_future(future):
exc = future.exception()
if isinstance(exc, HTTPError) and exc.response is not None:
response = exc.response
elif exc is not None:
response = HTTPResponse(
request, 599, error=exc,
request_time=time.time() - request.start_time)
else:
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
def handle_response(response):
if response.error:
future.set_exception(response.error)
else:
future.set_result(response)
self.fetch_impl(request, handle_response)
return future
def fetch_impl(self, request, callback):
raise NotImplementedError()
@classmethod
def configure(cls, impl, **kwargs):
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
use_gzip=True,
proxy_password='',
allow_nonstandard_methods=False,
validate_cert=True)
def __init__(self, url, method="GET", headers=None, body=None,
auth_username=None, auth_password=None, auth_mode=None,
connect_timeout=None, request_timeout=None,
if_modified_since=None, follow_redirects=None,
max_redirects=None, user_agent=None, use_gzip=None,
network_interface=None, streaming_callback=None,
header_callback=None, prepare_curl_callback=None,
proxy_host=None, proxy_port=None, proxy_type=None,
proxy_username=None, proxy_password=None,
allow_nonstandard_methods=None,
validate_cert=None, ca_certs=None,
allow_ipv6=None,
client_key=None, client_cert=None):
r"""All parameters except ``url`` are optional.
:arg string url: URL to fetch
:arg string method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:arg body: HTTP body to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg string auth_username: Username for HTTP authentication
:arg string auth_password: Password for HTTP authentication
:arg string auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds
:arg float request_timeout: Timeout for entire request in seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response?
:arg int max_redirects: Limit for ``follow_redirects``
:arg string user_agent: String to send as ``User-Agent`` header
:arg bool use_gzip: Request gzip encoding from the server
:arg string network_interface: Network interface to use for request.
``curl_httpclient`` only; see note below.
:arg callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg string proxy_host: proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username`` and
``proxy_pass`` are optional. Proxies are currently only supported
with ``curl_httpclient``.
:arg int proxy_port: proxy port
:arg string proxy_type: Available options for this are
CURLPROXY_HTTP, CURLPROXY_HTTP_1_0, CURLPROXY_SOCKS4,
CURLPROXY_SOCKS5, CURLPROXY_SOCKS4A and CURLPROXY_SOCKS5_HOSTNAME.
The HTTP type is default.
:arg string proxy_username: HTTP proxy username
:arg string proxy_password: HTTP proxy password
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument?
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate?
:arg string ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg bool allow_ipv6: Use IPv6 when available? Default is false in
``simple_httpclient`` and true in ``curl_httpclient``
:arg string client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg string client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_type = proxy_type
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.url = url
self.method = method
self.body = body
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
self.use_gzip = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.start_time = time.time()
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
@property
def streaming_callback(self):
return self._streaming_callback
@streaming_callback.setter
def streaming_callback(self, value):
self._streaming_callback = stack_context.wrap(value)
@property
def header_callback(self):
return self._header_callback
@header_callback.setter
def header_callback(self, value):
self._header_callback = stack_context.wrap(value)
@property
def prepare_curl_callback(self):
return self._prepare_curl_callback
@prepare_curl_callback.setter
def prepare_curl_callback(self, value):
self._prepare_curl_callback = stack_context.wrap(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* request: HTTPRequest object
* code: numeric HTTP status code, e.g. 200 or 404
* reason: human-readable reason phrase describing the status code
(with curl_httpclient, this is a default value rather than the
server's actual response)
* headers: `tornado.httputil.HTTPHeaders` object
* effective_url: final location of the resource after following any
redirects
* buffer: ``cStringIO`` object for response body
* body: response body as string (created on demand from ``self.buffer``)
* error: Exception object, if any
* request_time: seconds from request start to finish
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
time_info=None, reason=None):
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
if error is None:
if self.code < 200 or self.code >= 300:
self.error = HTTPError(self.code, response=self)
else:
self.error = None
else:
self.error = error
self.request_time = request_time
self.time_info = time_info or {}
def _get_body(self):
if self.buffer is None:
return None
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
body = property(_get_body)
def rethrow(self):
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self):
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
"""
def __init__(self, code, message=None, response=None):
self.code = code
message = message or httputil.responses.get(code, "Unknown")
self.response = response
Exception.__init__(self, "HTTP %d: %s" % (self.code, message))
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(self, request, defaults):
self.request = request
self.defaults = defaults
def __getattr__(self, name):
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main():
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(response.body)
client.close()
if __name__ == "__main__":
main()
|
|
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta, time
from itertools import product, chain
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
from odo import odo
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.assets.synthetic import make_simple_equity_info
from zipline.errors import UnsupportedPipelineOutput
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn, Column
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoMetaDataWarning,
)
from zipline.pipeline.loaders.blaze.core import (
ExprData,
NonPipelineField,
)
from zipline.testing import (
ZiplineTestCase,
parameter_space,
tmp_asset_finder,
)
from zipline.testing.fixtures import WithAssetFinder
from zipline.testing.predicates import assert_equal, assert_isidentical
from zipline.utils.numpy_utils import float64_dtype, int64_dtype
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_equity_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_equity_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
simple_asset_info = asset_infos[0][0]
with_extra_sid = parameterized.expand(asset_infos)
with_ignore_sid = parameterized.expand(
product(chain.from_iterable(asset_infos), [True, False])
)
def _utc_localize_index_level_0(df):
"""``tz_localize`` the first level of a multiindexed dataframe to utc.
Mutates df in place.
"""
idx = df.index
df.index = pd.MultiIndex.from_product(
(idx.levels[0].tz_localize('utc'), idx.levels[1]),
names=idx.names,
)
return df
class BlazeToPipelineTestCase(WithAssetFinder, ZiplineTestCase):
START_DATE = pd.Timestamp(0)
END_DATE = pd.Timestamp('2015')
@classmethod
def init_class_fixtures(cls):
super(BlazeToPipelineTestCase, cls).init_class_fixtures()
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.df = df = pd.DataFrame({
'sid': cls.ASSET_FINDER_EQUITY_SIDS * 3,
'value': (0., 1., 2., 1., 2., 3., 2., 3., 4.),
'int_value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
int_value: ?int64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
cls.missing_values = {'int_value': 0}
cls.value_dshape = dshape("""var * {
sid: ?int64,
value: float64,
asof_date: datetime,
timestamp: datetime,
}""")
def test_tabular(self):
name = 'expr'
expr = bz.data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertIs(ds.value.dtype, float64_dtype)
self.assertIs(ds.int_value.dtype, int64_dtype)
self.assertTrue(np.isnan(ds.value.missing_value))
self.assertEqual(ds.int_value.missing_value, 0)
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertIs(value.dtype, float64_dtype)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""var * {
sid: int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_missing_timestamp(self):
expr = bz.data(
self.df.loc[:, ['sid', 'value', 'asof_date']],
name='expr',
dshape="""var * {
sid: int64,
value: float64,
asof_date: datetime,
}""",
)
loader = BlazeLoader()
from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
self.assertEqual(len(loader), 1)
exprdata, = loader.values()
assert_isidentical(
exprdata.expr,
bz.transform(expr, timestamp=expr.asof_date),
)
def test_from_blaze_no_resources_dataset_expr(self):
expr = bz.symbol('expr', self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
'no resources provided to compute expr',
)
@parameter_space(metadata={'deltas', 'checkpoints'})
def test_from_blaze_no_resources_metadata_expr(self, metadata):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
metadata_expr = bz.symbol('metadata', self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
**{metadata: metadata_expr}
)
assert_equal(
str(e.exception),
'no resources provided to compute %s' % metadata,
)
def test_from_blaze_mixed_resources_dataset_expr(self):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
resources={expr: self.df},
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
'explicit and implicit resources provided to compute expr',
)
@parameter_space(metadata={'deltas', 'checkpoints'})
def test_from_blaze_mixed_resources_metadata_expr(self, metadata):
expr = bz.symbol('expr', self.dshape)
metadata_expr = bz.data(self.df, name=metadata, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
resources={metadata_expr: self.df},
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
**{metadata: metadata_expr}
)
assert_equal(
str(e.exception),
'explicit and implicit resources provided to compute %s' %
metadata,
)
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata(self, deltas, checkpoints):
select_level = op.getitem(('ignore', 'raise'))
m = {'ds': self.df}
if deltas:
m['ds_deltas'] = pd.DataFrame(columns=self.df.columns),
if checkpoints:
m['ds_checkpoints'] = pd.DataFrame(columns=self.df.columns),
expr = bz.data(
m,
dshape=var * Record((k, self.dshape.measure) for k in m),
)
loader = BlazeLoader()
ds = from_blaze(
expr.ds,
loader=loader,
missing_values=self.missing_values,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
if deltas:
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
else:
self.assertIsNone(exprdata.deltas)
if checkpoints:
self.assertTrue(
exprdata.checkpoints.isidentical(expr.ds_checkpoints),
)
else:
self.assertIsNone(exprdata.checkpoints)
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata_fail_warn(self, deltas, checkpoints):
select_level = op.getitem(('ignore', 'warn'))
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
missing_values=self.missing_values,
)
self.assertEqual(len(ws), deltas + checkpoints)
for w in ws:
w = w.message
self.assertIsInstance(w, NoMetaDataWarning)
self.assertIn(str(expr), str(w))
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata_fail_raise(self, deltas, checkpoints):
if not (deltas or checkpoints):
# not a real case
return
select_level = op.getitem(('ignore', 'raise'))
loader = BlazeLoader()
expr = bz.data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
)
self.assertIn(str(expr), str(e.exception))
def test_non_pipeline_field(self):
expr = bz.data(
[],
dshape="""
var * {
a: complex,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def test_cols_with_all_missing_vals(self):
"""
Tests that when there is no known data, we get output where the
columns have the right dtypes and the right missing values filled in.
input (self.df):
Empty DataFrame
Columns: [sid, float_value, str_value, int_value, bool_value, dt_value,
asof_date, timestamp]
Index: []
output (expected)
str_value float_value int_value
2014-01-01 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
2014-01-02 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
2014-01-03 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
dt_value bool_value
2014-01-01 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
2014-01-02 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
2014-01-03 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
"""
df = pd.DataFrame(columns=['sid', 'float_value', 'str_value',
'int_value', 'bool_value', 'dt_value',
'asof_date', 'timestamp'])
expr = bz.data(
df,
dshape="""
var * {
sid: int64,
float_value: float64,
str_value: string,
int_value: int64,
bool_value: bool,
dt_value: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
fields = OrderedDict(expr.dshape.measure.fields)
expected = pd.DataFrame({
"str_value": np.array([None,
None,
None,
None,
None,
None,
None,
None,
None],
dtype='object'),
"float_value": np.array([np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN],
dtype='float64'),
"int_value": np.array([0,
0,
0,
0,
0,
0,
0,
0,
0],
dtype='int64'),
"bool_value": np.array([False,
False,
False,
False,
False,
False,
False,
False,
False],
dtype='bool'),
"dt_value": [pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT],
},
columns=['str_value', 'float_value', 'int_value', 'bool_value',
'dt_value'],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
))
)
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('float_value', 'str_value', 'int_value', 'bool_value',
'dt_value'),
)
def test_cols_with_some_missing_vals(self):
"""
Tests the following:
1) Forward filling replaces missing values correctly for the data
types supported in pipeline.
2) We don't forward fill when the missing value is the actual value
we got for a date in the case of int/bool columns.
3) We get the correct type of missing value in the output.
input (self.df):
asof_date bool_value dt_value float_value int_value sid
0 2014-01-01 True 2011-01-01 0 1 65
1 2014-01-03 True 2011-01-02 1 2 66
2 2014-01-01 True 2011-01-03 2 3 67
3 2014-01-02 False NaT NaN 0 67
str_value timestamp
0 a 2014-01-01
1 b 2014-01-03
2 c 2014-01-01
3 None 2014-01-02
output (expected)
str_value float_value int_value bool_value
2014-01-01 Equity(65 [A]) a 0 1 True
Equity(66 [B]) None NaN 0 False
Equity(67 [C]) c 2 3 True
2014-01-02 Equity(65 [A]) a 0 1 True
Equity(66 [B]) None NaN 0 False
Equity(67 [C]) c 2 0 False
2014-01-03 Equity(65 [A]) a 0 1 True
Equity(66 [B]) b 1 2 True
Equity(67 [C]) c 2 0 False
dt_value
2014-01-01 Equity(65 [A]) 2011-01-01
Equity(66 [B]) NaT
Equity(67 [C]) 2011-01-03
2014-01-02 Equity(65 [A]) 2011-01-01
Equity(66 [B]) NaT
Equity(67 [C]) 2011-01-03
2014-01-03 Equity(65 [A]) 2011-01-01
Equity(66 [B]) 2011-01-02
Equity(67 [C]) 2011-01-03
"""
dates = (self.dates[0], self.dates[-1], self.dates[0], self.dates[1])
df = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS[:-1] +
(self.ASSET_FINDER_EQUITY_SIDS[-1],)*2,
'float_value': (0., 1., 2., np.NaN),
'str_value': ("a", "b", "c", None),
'int_value': (1, 2, 3, 0),
'bool_value': (True, True, True, False),
'dt_value': (pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.NaT),
'asof_date': dates,
'timestamp': dates,
})
expr = bz.data(
df,
dshape="""
var * {
sid: int64,
float_value: float64,
str_value: string,
int_value: int64,
bool_value: bool,
dt_value: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
fields = OrderedDict(expr.dshape.measure.fields)
expected = pd.DataFrame({
"str_value": np.array(["a",
None,
"c",
"a",
None,
"c",
"a",
"b",
"c"],
dtype='object'),
"float_value": np.array([0,
np.NaN,
2,
0,
np.NaN,
2,
0,
1,
2],
dtype='float64'),
"int_value": np.array([1,
0,
3,
1,
0,
0,
1,
2,
0],
dtype='int64'),
"bool_value": np.array([True,
False,
True,
True,
False,
False,
True,
True,
False],
dtype='bool'),
"dt_value": [pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
},
columns=['str_value', 'float_value', 'int_value', 'bool_value',
'dt_value'],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
))
)
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('float_value', 'str_value', 'int_value', 'bool_value',
'dt_value'),
)
def test_complex_expr(self):
expr = bz.data(self.df, dshape=self.dshape, name='expr')
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# test that we can have complex expressions with no metadata
from_blaze(
expr_with_add,
deltas=None,
checkpoints=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
no_checkpoints_rule='ignore',
)
with self.assertRaises(TypeError) as e:
# test that we cannot create a single column from a non field
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
checkpoints=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
no_checkpoints_rule='ignore',
)
assert_equal(
str(e.exception),
"expression 'expr.value + 1' was array-like but not a simple field"
" of some larger table",
)
deltas = bz.data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
name='deltas',
)
checkpoints = bz.data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
name='checkpoints',
)
# test that we can have complex expressions with explicit metadata
from_blaze(
expr_with_add,
deltas=deltas,
checkpoints=checkpoints,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
with self.assertRaises(TypeError) as e:
# test that we cannot create a single column from a non field
# even with explicit metadata
from_blaze(
expr.value + 1,
deltas=deltas,
checkpoints=checkpoints,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
"expression 'expr.value + 1' was array-like but not a simple field"
" of some larger table",
)
def _test_id(self, df, dshape, expected, finder, add):
expr = bz.data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
for a in add:
p.add(getattr(ds, a).latest, a)
dates = self.dates
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
assert_frame_equal(
result.sort_index(axis=1),
_utc_localize_index_level_0(expected.sort_index(axis=1)),
check_dtype=False,
)
def _test_id_macro(self, df, dshape, expected, finder, add, dates=None):
if dates is None:
dates = self.dates
expr = bz.data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
macro_inputs = []
for column_name in add:
column = getattr(ds, column_name)
macro_inputs.append(column)
with self.assertRaises(UnsupportedPipelineOutput):
# Single column output terms cannot be added to a pipeline.
p.add(column.latest, column_name)
class UsesMacroInputs(CustomFactor):
inputs = macro_inputs
window_length = 1
def compute(self, today, assets, out, *inputs):
e = expected.loc[today]
for i, input_ in enumerate(inputs):
# Each macro input should only have one column.
assert input_.shape == (self.window_length, 1)
assert_equal(input_[0, 0], e[i])
# Run the pipeline with our custom factor. Assertions about the
# expected macro data are made in the `compute` function of our custom
# factor above.
p.add(UsesMacroInputs(), 'uses_macro_inputs')
engine = SimplePipelineEngine(loader, dates, finder)
engine.run_pipeline(p, dates[0], dates[-1])
def test_custom_query_time_tz(self):
df = self.df.copy()
df['timestamp'] = (
pd.DatetimeIndex(df['timestamp'], tz='EST') +
timedelta(hours=8, minutes=44)
).tz_convert('utc').tz_localize(None)
df.ix[3:5, 'timestamp'] = pd.Timestamp('2014-01-01 13:45')
expr = bz.data(df, name='expr', dshape=self.dshape)
loader = BlazeLoader(data_query_time=time(8, 45), data_query_tz='EST')
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
p.add(ds.value.latest, 'value')
p.add(ds.int_value.latest, 'int_value')
dates = self.dates
result = SimplePipelineEngine(
loader,
dates,
self.asset_finder,
).run_pipeline(p, dates[0], dates[-1])
expected = df.drop('asof_date', axis=1)
expected['timestamp'] = expected['timestamp'].dt.normalize().astype(
'datetime64[ns]',
).dt.tz_localize('utc')
expected.ix[3:5, 'timestamp'] += timedelta(days=1)
expected.set_index(['timestamp', 'sid'], inplace=True)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id(self):
"""
input (self.df):
asof_date sid timestamp int_value value
0 2014-01-01 65 2014-01-01 0 0
1 2014-01-01 66 2014-01-01 1 1
2 2014-01-01 67 2014-01-01 2 2
3 2014-01-02 65 2014-01-02 1 1
4 2014-01-02 66 2014-01-02 2 2
5 2014-01-02 67 2014-01-02 3 3
6 2014-01-03 65 2014-01-03 2 2
7 2014-01-03 66 2014-01-03 3 3
8 2014-01-03 67 2014-01-03 4 4
output (expected)
int_value value
2014-01-01 Equity(65 [A]) 0 0
Equity(66 [B]) 1 1
Equity(67 [C]) 2 2
2014-01-02 Equity(65 [A]) 1 1
Equity(66 [B]) 2 2
Equity(67 [C]) 3 3
2014-01-03 Equity(65 [A]) 2 2
Equity(66 [B]) 3 3
Equity(67 [C]) 4 4
"""
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
self.df, self.dshape, expected, self.asset_finder,
('int_value', 'value',)
)
def test_id_with_asof_date(self):
"""
input (self.df):
asof_date sid timestamp int_value value
0 2014-01-01 65 2014-01-01 0 0
1 2014-01-01 66 2014-01-01 1 1
2 2014-01-01 67 2014-01-01 2 2
3 2014-01-02 65 2014-01-02 1 1
4 2014-01-02 66 2014-01-02 2 2
5 2014-01-02 67 2014-01-02 3 3
6 2014-01-03 65 2014-01-03 2 2
7 2014-01-03 66 2014-01-03 3 3
8 2014-01-03 67 2014-01-03 4 4
output (expected)
asof_date
2014-01-01 Equity(65 [A]) 2014-01-01
Equity(66 [B]) 2014-01-01
Equity(67 [C]) 2014-01-01
2014-01-02 Equity(65 [A]) 2014-01-02
Equity(66 [B]) 2014-01-02
Equity(67 [C]) 2014-01-02
2014-01-03 Equity(65 [A]) 2014-01-03
Equity(66 [B]) 2014-01-03
Equity(67 [C]) 2014-01-03
"""
expected = self.df.drop(['value', 'int_value'], axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
self.df, self.dshape, expected, self.asset_finder,
('asof_date',)
)
def test_id_ffill_out_of_window(self):
"""
input (df):
asof_date timestamp sid other value
0 2013-12-22 2013-12-22 65 0 0
1 2013-12-22 2013-12-22 66 NaN 1
2 2013-12-22 2013-12-22 67 2 NaN
3 2013-12-23 2013-12-23 65 NaN 1
4 2013-12-23 2013-12-23 66 2 NaN
5 2013-12-23 2013-12-23 67 3 3
6 2013-12-24 2013-12-24 65 2 NaN
7 2013-12-24 2013-12-24 66 3 3
8 2013-12-24 2013-12-24 67 NaN 4
output (expected):
other value
2014-01-01 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-02 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
"""
dates = self.dates.repeat(3) - timedelta(days=10)
df = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS * 3,
'value': (0, 1, np.nan, 1, np.nan, 3, np.nan, 3, 4),
'other': (0, np.nan, 2, np.nan, 2, 3, 2, 3, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
np.array([[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4]]),
columns=['other', 'value'],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def test_id_multiple_columns(self):
"""
input (df):
asof_date sid timestamp value other
0 2014-01-01 65 2014-01-01 0 1
1 2014-01-01 66 2014-01-01 1 2
2 2014-01-01 67 2014-01-01 2 3
3 2014-01-02 65 2014-01-02 1 2
4 2014-01-02 66 2014-01-02 2 3
5 2014-01-02 67 2014-01-02 3 4
6 2014-01-03 65 2014-01-03 2 3
7 2014-01-03 66 2014-01-03 3 4
8 2014-01-03 67 2014-01-03 4 5
output (expected):
value other
2014-01-01 Equity(65 [A]) 0 1
Equity(66 [B]) 1 2
Equity(67 [C]) 2 3
2014-01-02 Equity(65 [A]) 1 2
Equity(66 [B]) 2 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 3
Equity(66 [B]) 3 4
Equity(67 [C]) 4 5
"""
df = self.df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
).sort_index(axis=1)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'int_value', 'other'),
)
def test_id_macro_dataset(self):
"""
input (self.macro_df)
asof_date timestamp value
0 2014-01-01 2014-01-01 0
3 2014-01-02 2014-01-02 1
6 2014-01-03 2014-01-03 2
output (expected):
value
2014-01-01 0
2014-01-02 1
2014-01-03 2
"""
expected = pd.DataFrame(
data=[[0],
[1],
[2]],
columns=['value'],
index=self.dates,
)
self._test_id_macro(
self.macro_df,
self.macro_dshape,
expected,
self.asset_finder,
('value',),
)
def test_id_ffill_out_of_window_macro_dataset(self):
"""
input (df):
asof_date timestamp other value
0 2013-12-22 2013-12-22 NaN 0
1 2013-12-23 2013-12-23 1 NaN
2 2013-12-24 2013-12-24 NaN NaN
output (expected):
other value
2014-01-01 1 0
2014-01-02 1 0
2014-01-03 1 0
"""
dates = self.dates - timedelta(days=10)
df = pd.DataFrame({
'value': (0, np.nan, np.nan),
'other': (np.nan, 1, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[0, 1],
[0, 1],
[0, 1]],
columns=['other', 'value'],
index=self.dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def test_id_macro_dataset_multiple_columns(self):
"""
input (df):
asof_date timestamp other value
0 2014-01-01 2014-01-01 1 0
3 2014-01-02 2014-01-02 2 1
6 2014-01-03 2014-01-03 3 2
output (expected):
other value
2014-01-01 1 0
2014-01-02 2 1
2014-01-03 3 2
"""
df = self.macro_df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected = pd.DataFrame(
data=[[0, 1],
[1, 2],
[2, 3]],
columns=['value', 'other'],
index=self.dates,
dtype=np.float64,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def test_id_take_last_in_group(self):
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'sid', 'other', 'value'],
data=[
[T('2014-01-01'), T('2014-01-01 00'), 65, 0, 0],
[T('2014-01-01'), T('2014-01-01 01'), 65, 1, np.nan],
[T('2014-01-01'), T('2014-01-01 00'), 66, np.nan, np.nan],
[T('2014-01-01'), T('2014-01-01 01'), 66, np.nan, 1],
[T('2014-01-01'), T('2014-01-01 00'), 67, 2, np.nan],
[T('2014-01-01'), T('2014-01-01 01'), 67, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 65, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), 65, np.nan, 1],
[T('2014-01-02'), T('2014-01-02 00'), 66, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), 66, 2, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 67, 3, 3],
[T('2014-01-02'), T('2014-01-02 01'), 67, 3, 3],
[T('2014-01-03'), T('2014-01-03 00'), 65, 2, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 65, 2, np.nan],
[T('2014-01-03'), T('2014-01-03 00'), 66, 3, 3],
[T('2014-01-03'), T('2014-01-03 01'), 66, np.nan, np.nan],
[T('2014-01-03'), T('2014-01-03 00'), 67, np.nan, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 67, np.nan, 4],
],
)
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
columns=['other', 'value'],
data=[
[1, 0], # 2014-01-01 Equity(65 [A])
[np.nan, 1], # Equity(66 [B])
[2, np.nan], # Equity(67 [C])
[1, 1], # 2014-01-02 Equity(65 [A])
[2, 1], # Equity(66 [B])
[3, 3], # Equity(67 [C])
[2, 1], # 2014-01-03 Equity(65 [A])
[3, 3], # Equity(66 [B])
[3, 3], # Equity(67 [C])
],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def test_id_take_last_in_group_macro(self):
"""
output (expected):
other value
2014-01-01 NaN 1
2014-01-02 1 2
2014-01-03 2 2
"""
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
[T('2014-01-01'), T('2014-01-01 00'), np.nan, 1],
[T('2014-01-01'), T('2014-01-01 01'), np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 1, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), np.nan, 2],
[T('2014-01-03'), T('2014-01-03 00'), 2, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 3, 3],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[np.nan, 1], # 2014-01-01
[1, 2], # 2014-01-02
[2, 2]], # 2014-01-03
columns=['other', 'value'],
index=self.dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('other', 'value'),
)
def _run_pipeline(self,
expr,
deltas,
checkpoints,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn,
apply_deltas_adjustments=True):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
checkpoints,
apply_deltas_adjustments=apply_deltas_adjustments,
loader=loader,
no_deltas_rule='raise',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(
data,
expected_views[today],
err_msg=str(today),
)
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
_utc_localize_index_level_0(expected_output),
check_dtype=False,
)
@with_ignore_sid
def test_deltas(self, asset_info, add_extra_sid):
df = self.df.copy()
if add_extra_sid:
extra_sid_df = pd.DataFrame({
'asof_date': self.dates,
'timestamp': self.dates,
'sid': (ord('E'),) * 3,
'value': (3., 4., 5.,),
'int_value': (3, 4, 5),
})
df = df.append(extra_sid_df, ignore_index=True)
expr = bz.data(df, name='expr', dshape=self.dshape)
deltas = bz.data(df, dshape=self.dshape)
deltas = bz.data(
odo(
bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_deltas_only_one_delta_in_universe(self, asset_info):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
deltas = pd.DataFrame({
'sid': [65, 66],
'asof_date': [self.dates[1], self.dates[0]],
'timestamp': [self.dates[2], self.dates[1]],
'value': [10, 11],
})
deltas = bz.data(deltas, name='deltas', dshape=self.dshape)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[0.0, 11.0, 2.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[10.0, 2.0, 3.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[2.0, 3.0, 4.0],
[2.0, 3.0, 4.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
columns=[
'value',
],
data=np.array([11, 10, 4]).repeat(len(asset_info.index)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
expr = bz.data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(simple_asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0],
[1.0]]),
'2014-01-03': np.array([[11.0],
[2.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS * 2,
'value': (0., 1., 2., 1., 2., 3.),
'int_value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.data(baseline, name='expr', dshape=self.dshape)
deltas = bz.data(
odo(
bz.transform(
expr,
value=expr.value + 10,
timestamp=expr.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views_all_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
# The only novel delta is on 2014-01-05, because it modifies a
# baseline data point that occurred on 2014-01-04, which is on a
# Saturday. The other delta, occurring on 2014-01-02, is seen after
# we already see the baseline data it modifies, and so it is a
# non-novel delta. Thus, the only delta seen in the expected view for
# novel deltas is on 2014-01-06 at (2, 0), (2, 1), and (2, 2).
expected_views_novel_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0]]),
'2014-01-06': np.array([[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[11.0, 12.0, 13.0]]),
})
def get_fourth_asset_view(expected_views, window_length):
return valmap(
lambda view: np.c_[view, [np.nan] * window_length],
expected_views,
)
if len(asset_info) == 4:
expected_views_all_deltas = get_fourth_asset_view(
expected_views_all_deltas, window_length=3
)
expected_views_novel_deltas = get_fourth_asset_view(
expected_views_novel_deltas, window_length=3
)
expected_output_buffer_all_deltas = [
10, 11, 12, np.nan, 11, 12, 13, np.nan
]
expected_output_buffer_novel_deltas = [
0, 1, 2, np.nan, 11, 12, 13, np.nan
]
else:
expected_output_buffer_all_deltas = [
10, 11, 12, 11, 12, 13
]
expected_output_buffer_novel_deltas = [
0, 1, 2, 11, 12, 13
]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(equities=asset_info) as finder:
expected_output_all_deltas = pd.DataFrame(
expected_output_buffer_all_deltas,
index=pd.MultiIndex.from_product((
sorted(expected_views_all_deltas.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
expected_output_novel_deltas = pd.DataFrame(
expected_output_buffer_novel_deltas,
index=pd.MultiIndex.from_product((
sorted(expected_views_novel_deltas.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
it = (
(
True,
expected_views_all_deltas,
expected_output_all_deltas
),
(
False,
expected_views_novel_deltas,
expected_output_novel_deltas
)
)
for apply_deltas_adjs, expected_views, expected_output in it:
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
apply_deltas_adjustments=apply_deltas_adjs,
)
def test_novel_deltas_macro(self):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
baseline = pd.DataFrame({
'value': (0., 1.),
'asof_date': base_dates,
'timestamp': base_dates,
})
expr = bz.data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(simple_asset_info)
expected_views_all_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0],
[10.0],
[10.0]]),
'2014-01-06': np.array([[10.0],
[10.0],
[11.0]]),
})
# The only novel delta is on 2014-01-05, because it modifies a
# baseline data point that occurred on 2014-01-04, which is on a
# Saturday. The other delta, occurring on 2014-01-02, is seen after
# we already see the baseline data it modifies, and so it is a
# non-novel delta. Thus, the only delta seen in the expected view for
# novel deltas is on 2014-01-06 at (2, 0).
expected_views_novel_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[0.0],
[0.0],
[0.0]]),
'2014-01-06': np.array([[0.0],
[0.0],
[11.0]]),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
def get_expected_output(expected_views, values, asset_info):
return pd.DataFrame(
list(concatv(*([value] * nassets for value in values))),
index=pd.MultiIndex.from_product(
(sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),)
), columns=('value',),
)
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output_all_deltas = get_expected_output(
expected_views_all_deltas,
[10, 11],
simple_asset_info,
)
expected_output_novel_deltas = get_expected_output(
expected_views_novel_deltas,
[0, 11],
simple_asset_info,
)
it = (
(
True,
expected_views_all_deltas,
expected_output_all_deltas
),
(
False,
expected_views_novel_deltas,
expected_output_novel_deltas
)
)
for apply_deltas_adjs, expected_views, expected_output in it:
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
apply_deltas_adjustments=apply_deltas_adjs,
)
def _test_checkpoints_macro(self, checkpoints, ffilled_value=-1.0):
"""Simple checkpoints test that accepts a checkpoints dataframe and
the expected value for 2014-01-03 for macro datasets.
The underlying data has value -1.0 on 2014-01-01 and 1.0 on 2014-01-04.
Parameters
----------
checkpoints : pd.DataFrame
The checkpoints data.
ffilled_value : float, optional
The value to be read on the third, if not provided, it will be the
value in the base data that will be naturally ffilled there.
"""
dates = pd.Timestamp('2014-01-01'), pd.Timestamp('2014-01-04')
baseline = pd.DataFrame({
'value': [-1.0, 1.0],
'asof_date': dates,
'timestamp': dates,
})
nassets = len(simple_asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[ffilled_value]]),
'2014-01-04': np.array([[1.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([ffilled_value] * nassets, [1.0] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.macro_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.macro_dshape,
),
expected_views,
expected_output,
finder,
calendar=pd.date_range('2014-01-01', '2014-01-04'),
start=pd.Timestamp('2014-01-03'),
end=dates[-1],
window_length=1,
compute_fn=op.itemgetter(-1),
)
def test_checkpoints_macro(self):
ffilled_value = 0.0
checkpoints_ts = pd.Timestamp('2014-01-02')
checkpoints = pd.DataFrame({
'value': [ffilled_value],
'asof_date': checkpoints_ts,
'timestamp': checkpoints_ts,
})
self._test_checkpoints_macro(checkpoints, ffilled_value)
def test_empty_checkpoints_macro(self):
empty_checkpoints = pd.DataFrame({
'value': [],
'asof_date': [],
'timestamp': [],
})
self._test_checkpoints_macro(empty_checkpoints)
def test_checkpoints_out_of_bounds_macro(self):
# provide two checkpoints, one before the data in the base table
# and one after, these should not affect the value on the third
dates = pd.to_datetime(['2013-12-31', '2014-01-05'])
checkpoints = pd.DataFrame({
'value': [-2, 2],
'asof_date': dates,
'timestamp': dates,
})
self._test_checkpoints_macro(checkpoints)
def _test_checkpoints(self, checkpoints, ffilled_values=None):
"""Simple checkpoints test that accepts a checkpoints dataframe and
the expected value for 2014-01-03.
The underlying data has value -1.0 on 2014-01-01 and 1.0 on 2014-01-04.
Parameters
----------
checkpoints : pd.DataFrame
The checkpoints data.
ffilled_value : float, optional
The value to be read on the third, if not provided, it will be the
value in the base data that will be naturally ffilled there.
"""
nassets = len(simple_asset_info)
dates = pd.to_datetime(['2014-01-01', '2014-01-04'])
dates_repeated = np.tile(dates, nassets)
values = np.arange(nassets) + 1
values = np.hstack((values[::-1], values))
baseline = pd.DataFrame({
'sid': np.tile(simple_asset_info.index, 2),
'value': values,
'asof_date': dates_repeated,
'timestamp': dates_repeated,
})
if ffilled_values is None:
ffilled_values = baseline.value.iloc[:nassets]
updated_values = baseline.value.iloc[nassets:]
expected_views = keymap(pd.Timestamp, {
'2014-01-03': [ffilled_values],
'2014-01-04': [updated_values],
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv(ffilled_values, updated_values)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.value_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.value_dshape,
),
expected_views,
expected_output,
finder,
calendar=pd.date_range('2014-01-01', '2014-01-04'),
start=pd.Timestamp('2014-01-03'),
end=dates[-1],
window_length=1,
compute_fn=op.itemgetter(-1),
)
def test_checkpoints(self):
nassets = len(simple_asset_info)
ffilled_values = (np.arange(nassets, dtype=np.float64) + 1) * 10
dates = [pd.Timestamp('2014-01-02')] * nassets
checkpoints = pd.DataFrame({
'sid': simple_asset_info.index,
'value': ffilled_values,
'asof_date': dates,
'timestamp': dates,
})
self._test_checkpoints(checkpoints, ffilled_values)
def test_empty_checkpoints(self):
checkpoints = pd.DataFrame({
'sid': [],
'value': [],
'asof_date': [],
'timestamp': [],
})
self._test_checkpoints(checkpoints)
def test_checkpoints_out_of_bounds(self):
nassets = len(simple_asset_info)
# provide two sets of checkpoints, one before the data in the base
# table and one after, these should not affect the value on the third
dates = pd.to_datetime(['2013-12-31', '2014-01-05'])
dates_repeated = np.tile(dates, nassets)
ffilled_values = (np.arange(nassets) + 2) * 10
ffilled_values = np.hstack((ffilled_values[::-1], ffilled_values))
checkpoints = pd.DataFrame({
'sid': np.tile(simple_asset_info.index, 2),
'value': ffilled_values,
'asof_date': dates_repeated,
'timestamp': dates_repeated,
})
self._test_checkpoints(checkpoints)
def test_id_take_last_in_group_sorted(self):
"""
input
asof_date timestamp other value
2014-01-03 2014-01-04 00 3 3
2014-01-02 2014-01-04 00 2 2
output (expected):
other value
2014-01-02 NaN NaN
2014-01-03 NaN NaN
2014-01-06 3 3
"""
dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-06'),
])
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
# asof-dates are flipped in terms of order so that if we
# don't sort on asof-date before getting the last in group,
# we will get the wrong result.
[T('2014-01-03'), T('2014-01-04 00'), 3, 3],
[T('2014-01-02'), T('2014-01-04 00'), 2, 2],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[np.nan, np.nan], # 2014-01-02
[np.nan, np.nan], # 2014-01-03
[3, 3]], # 2014-01-06
columns=['other', 'value'],
index=dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('other', 'value'),
dates=dates,
)
class MiscTestCase(ZiplineTestCase):
def test_exprdata_repr(self):
strd = set()
class BadRepr(object):
"""A class which cannot be repr'd.
"""
def __init__(self, name):
self._name = name
def __repr__(self): # pragma: no cover
raise AssertionError('ayy')
def __str__(self):
strd.add(self)
return self._name
assert_equal(
repr(ExprData(
expr=BadRepr('expr'),
deltas=BadRepr('deltas'),
checkpoints=BadRepr('checkpoints'),
odo_kwargs={'a': 'b'},
)),
"ExprData(expr='expr', deltas='deltas',"
" checkpoints='checkpoints', odo_kwargs={'a': 'b'}, "
"apply_deltas_adjustments=True)",
)
def test_blaze_loader_repr(self):
assert_equal(repr(BlazeLoader()), '<BlazeLoader: {}>')
def test_blaze_loader_lookup_failure(self):
class D(DataSet):
c = Column(dtype='float64')
with self.assertRaises(KeyError) as e:
BlazeLoader()(D.c)
assert_equal(str(e.exception), 'D.c::float64')
|
|
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr
from rpython.rtyper import rclass
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.jit.metainterp import history, compile
from rpython.jit.codewriter import heaptracker, longlong
from rpython.jit.backend.model import AbstractCPU
from rpython.jit.backend.llsupport import symbolic, jitframe
from rpython.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes
from rpython.jit.backend.llsupport.descr import (
get_size_descr, get_field_descr, get_array_descr,
get_call_descr, get_interiorfield_descr,
FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr,
FLAG_POINTER, FLAG_FLOAT)
from rpython.jit.backend.llsupport.memcpy import memset_fn
from rpython.jit.backend.llsupport import asmmemmgr, codemap
from rpython.rlib.unroll import unrolling_iterable
class AbstractLLCPU(AbstractCPU):
from rpython.jit.metainterp.typesystem import llhelper as ts
HAS_CODEMAP = False
done_with_this_frame_descr_int = None # overridden by pyjitpl.py
done_with_this_frame_descr_float = None
done_with_this_frame_descr_ref = None
done_with_this_frame_descr_void = None
exit_frame_with_exception_descr_ref = None
vector_extension = False
vector_register_size = 0 # in bytes
vector_horizontal_operations = False
vector_pack_slots = False
def __init__(self, rtyper, stats, opts, translate_support_code=False,
gcdescr=None):
assert type(opts) is not bool
self.opts = opts
from rpython.jit.backend.llsupport.gc import get_ll_description
AbstractCPU.__init__(self)
self.rtyper = rtyper
self.stats = stats
self.translate_support_code = translate_support_code
if translate_support_code and rtyper is not None:
translator = rtyper.annotator.translator
self.remove_gctypeptr = translator.config.translation.gcremovetypeptr
else:
translator = None
self.gc_ll_descr = get_ll_description(gcdescr, translator, rtyper)
# support_guard_gc_type indicates if a gc type of an object can be read.
# In some states (boehm or x86 untranslated) the type is not known just yet,
# because there are cases where it is not guarded. The precise place where it's not
# is while inlining short preamble.
self.supports_guard_gc_type = self.gc_ll_descr.supports_guard_gc_type
if translator and translator.config.translation.gcremovetypeptr:
self.vtable_offset = None
else:
self.vtable_offset, _ = symbolic.get_field_token(rclass.OBJECT,
'typeptr',
translate_support_code)
self.subclassrange_min_offset, _ = symbolic.get_field_token(
rclass.OBJECT_VTABLE, 'subclassrange_min', translate_support_code)
if translate_support_code:
self._setup_exception_handling_translated()
else:
self._setup_exception_handling_untranslated()
self.asmmemmgr = asmmemmgr.AsmMemoryManager()
if self.HAS_CODEMAP:
self.codemap = codemap.CodemapStorage()
self._setup_frame_realloc(translate_support_code)
ad = self.gc_ll_descr.getframedescrs(self).arraydescr
self.signedarraydescr = ad
# the same as normal JITFRAME, however with an array of pointers
self.refarraydescr = ArrayDescr(ad.basesize, ad.itemsize, ad.lendescr,
FLAG_POINTER)
if WORD == 4:
self.floatarraydescr = ArrayDescr(ad.basesize, ad.itemsize * 2,
ad.lendescr, FLAG_FLOAT)
else:
self.floatarraydescr = ArrayDescr(ad.basesize, ad.itemsize,
ad.lendescr, FLAG_FLOAT)
self.setup()
self._debug_errno_container = lltype.malloc(
rffi.CArray(lltype.Signed), 7, flavor='raw', zero=True,
track_allocation=False)
def getarraydescr_for_frame(self, type):
if type == history.FLOAT:
descr = self.floatarraydescr
elif type == history.REF:
descr = self.refarraydescr
else:
descr = self.signedarraydescr
return descr
def setup(self):
pass
def finish_once(self):
if self.HAS_CODEMAP:
self.codemap.finish_once()
def compile_loop(self, inputargs, operations, looptoken, jd_id=0,
unique_id=0, log=True, name='', logger=None):
return self.assembler.assemble_loop(jd_id, unique_id, logger, name,
inputargs, operations,
looptoken, log=log)
def stitch_bridge(self, faildescr, target):
self.assembler.stitch_bridge(faildescr, target)
def _setup_frame_realloc(self, translate_support_code):
FUNC_TP = lltype.Ptr(lltype.FuncType([llmemory.GCREF, lltype.Signed],
llmemory.GCREF))
base_ofs = self.get_baseofs_of_frame_field()
def realloc_frame(frame, size):
try:
if not we_are_translated():
assert not self._exception_emulator[0]
frame = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, frame)
if size > frame.jf_frame_info.jfi_frame_depth:
# update the frame_info size, which is for whatever reason
# not up to date
frame.jf_frame_info.update_frame_depth(base_ofs, size)
new_frame = jitframe.JITFRAME.allocate(frame.jf_frame_info)
frame.jf_forward = new_frame
i = 0
while i < len(frame.jf_frame):
new_frame.jf_frame[i] = frame.jf_frame[i]
frame.jf_frame[i] = 0
i += 1
new_frame.jf_savedata = frame.jf_savedata
new_frame.jf_guard_exc = frame.jf_guard_exc
# all other fields are empty
llop.gc_writebarrier(lltype.Void, new_frame)
return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame)
except Exception, e:
print "Unhandled exception", e, "in realloc_frame"
return lltype.nullptr(llmemory.GCREF.TO)
def realloc_frame_crash(frame, size):
print "frame", frame, "size", size
return lltype.nullptr(llmemory.GCREF.TO)
if not translate_support_code:
fptr = llhelper(FUNC_TP, realloc_frame)
else:
FUNC = FUNC_TP.TO
args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS]
s_result = lltype_to_annotation(FUNC.RESULT)
mixlevelann = MixLevelHelperAnnotator(self.rtyper)
graph = mixlevelann.getgraph(realloc_frame, args_s, s_result)
fptr = mixlevelann.graph2delayed(graph, FUNC)
mixlevelann.finish()
self.realloc_frame = heaptracker.adr2int(llmemory.cast_ptr_to_adr(fptr))
if not translate_support_code:
fptr = llhelper(FUNC_TP, realloc_frame_crash)
else:
FUNC = FUNC_TP.TO
args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS]
s_result = lltype_to_annotation(FUNC.RESULT)
mixlevelann = MixLevelHelperAnnotator(self.rtyper)
graph = mixlevelann.getgraph(realloc_frame_crash, args_s, s_result)
fptr = mixlevelann.graph2delayed(graph, FUNC)
mixlevelann.finish()
self.realloc_frame_crash = heaptracker.adr2int(llmemory.cast_ptr_to_adr(fptr))
def _setup_exception_handling_untranslated(self):
# for running un-translated only, all exceptions occurring in the
# llinterpreter are stored in '_exception_emulator', which is then
# read back by the machine code reading at the address given by
# pos_exception() and pos_exc_value().
_exception_emulator = lltype.malloc(rffi.CArray(lltype.Signed), 2,
zero=True, flavor='raw',
immortal=True)
self._exception_emulator = _exception_emulator
def _store_exception(lle):
self._last_exception = lle # keepalive
tp_i = rffi.cast(lltype.Signed, lle.args[0])
v_i = rffi.cast(lltype.Signed, lle.args[1])
_exception_emulator[0] = tp_i
_exception_emulator[1] = v_i
self.debug_ll_interpreter = LLInterpreter(self.rtyper)
self.debug_ll_interpreter._store_exception = _store_exception
def pos_exception():
return rffi.cast(lltype.Signed, _exception_emulator)
def pos_exc_value():
return (rffi.cast(lltype.Signed, _exception_emulator) +
rffi.sizeof(lltype.Signed))
self.pos_exception = pos_exception
self.pos_exc_value = pos_exc_value
self.insert_stack_check = lambda: (0, 0, 0)
def _setup_exception_handling_translated(self):
def pos_exception():
addr = llop.get_exception_addr(llmemory.Address)
return heaptracker.adr2int(addr)
def pos_exc_value():
addr = llop.get_exc_value_addr(llmemory.Address)
return heaptracker.adr2int(addr)
from rpython.rlib import rstack
STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed],
lltype.Void))
def insert_stack_check():
endaddr = rstack._stack_get_end_adr()
lengthaddr = rstack._stack_get_length_adr()
f = llhelper(STACK_CHECK_SLOWPATH, rstack.stack_check_slowpath)
slowpathaddr = rffi.cast(lltype.Signed, f)
return endaddr, lengthaddr, slowpathaddr
self.pos_exception = pos_exception
self.pos_exc_value = pos_exc_value
self.insert_stack_check = insert_stack_check
def grab_exc_value(self, deadframe):
deadframe = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, deadframe)
return deadframe.jf_guard_exc
def set_savedata_ref(self, deadframe, data):
deadframe = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, deadframe)
deadframe.jf_savedata = data
def get_savedata_ref(self, deadframe):
deadframe = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, deadframe)
return deadframe.jf_savedata
def free_loop_and_bridges(self, compiled_loop_token):
AbstractCPU.free_loop_and_bridges(self, compiled_loop_token)
blocks = compiled_loop_token.asmmemmgr_blocks
if blocks is not None:
compiled_loop_token.asmmemmgr_blocks = None
for rawstart, rawstop in blocks:
self.gc_ll_descr.freeing_block(rawstart, rawstop)
self.asmmemmgr.free(rawstart, rawstop)
if self.HAS_CODEMAP:
self.codemap.free_asm_block(rawstart, rawstop)
def force(self, addr_of_force_token):
frame = rffi.cast(jitframe.JITFRAMEPTR, addr_of_force_token)
frame = frame.resolve()
frame.jf_descr = frame.jf_force_descr
return lltype.cast_opaque_ptr(llmemory.GCREF, frame)
def make_execute_token(self, *ARGS):
# The JIT backend must generate functions with the following
# signature: it takes the jitframe and the threadlocal_addr
# as arguments, and it returns the (possibly reallocated) jitframe.
# The backend can optimize OS_THREADLOCALREF_GET calls to return a
# field of this threadlocal_addr, but only if 'translate_support_code':
# in untranslated tests, threadlocal_addr is a dummy container
# for errno tests only.
FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, llmemory.Address],
llmemory.GCREF))
lst = [(i, history.getkind(ARG)[0]) for i, ARG in enumerate(ARGS)]
kinds = unrolling_iterable(lst)
def execute_token(executable_token, *args):
clt = executable_token.compiled_loop_token
assert len(args) == clt._debug_nbargs
#
addr = executable_token._ll_function_addr
func = rffi.cast(FUNCPTR, addr)
#llop.debug_print(lltype.Void, ">>>> Entering", addr)
frame_info = clt.frame_info
frame = self.gc_ll_descr.malloc_jitframe(frame_info)
ll_frame = lltype.cast_opaque_ptr(llmemory.GCREF, frame)
locs = executable_token.compiled_loop_token._ll_initial_locs
prev_interpreter = None # help flow space
if not self.translate_support_code:
prev_interpreter = LLInterpreter.current_interpreter
LLInterpreter.current_interpreter = self.debug_ll_interpreter
try:
for i, kind in kinds:
arg = args[i]
num = locs[i]
if kind == history.INT:
self.set_int_value(ll_frame, num, arg)
elif kind == history.FLOAT:
self.set_float_value(ll_frame, num, arg)
else:
assert kind == history.REF
self.set_ref_value(ll_frame, num, arg)
if self.translate_support_code:
ll_threadlocal_addr = llop.threadlocalref_addr(
llmemory.Address)
else:
ll_threadlocal_addr = rffi.cast(llmemory.Address,
self._debug_errno_container)
llop.gc_writebarrier(lltype.Void, ll_frame)
ll_frame = func(ll_frame, ll_threadlocal_addr)
finally:
if not self.translate_support_code:
LLInterpreter.current_interpreter = prev_interpreter
#llop.debug_print(lltype.Void, "<<<< Back")
return ll_frame
return execute_token
# ------------------- helpers and descriptions --------------------
@staticmethod
def _cast_int_to_gcref(x):
# dangerous! only use if you are sure no collection could occur
# between reading the integer and casting it to a pointer
return rffi.cast(llmemory.GCREF, x)
@staticmethod
def cast_gcref_to_int(x):
return rffi.cast(lltype.Signed, x)
@staticmethod
def cast_int_to_adr(x):
return rffi.cast(llmemory.Address, x)
@staticmethod
def cast_adr_to_int(x):
return rffi.cast(lltype.Signed, x)
@specialize.arg(2)
def cast_int_to_ptr(self, x, TYPE):
return rffi.cast(TYPE, x)
def sizeof(self, S, vtable=lltype.nullptr(rclass.OBJECT_VTABLE)):
return get_size_descr(self.gc_ll_descr, S, vtable)
def fielddescrof(self, STRUCT, fieldname):
return get_field_descr(self.gc_ll_descr, STRUCT, fieldname)
def unpack_fielddescr(self, fielddescr):
assert isinstance(fielddescr, FieldDescr)
return fielddescr.offset
unpack_fielddescr._always_inline_ = True
def unpack_fielddescr_size(self, fielddescr):
assert isinstance(fielddescr, FieldDescr)
ofs = fielddescr.offset
size = fielddescr.field_size
sign = fielddescr.is_field_signed()
return ofs, size, sign
unpack_fielddescr_size._always_inline_ = True
@specialize.memo()
def arraydescrof(self, A):
return get_array_descr(self.gc_ll_descr, A)
def interiorfielddescrof(self, A, fieldname, arrayfieldname=None):
return get_interiorfield_descr(self.gc_ll_descr, A, fieldname,
arrayfieldname)
def unpack_arraydescr(self, arraydescr):
assert isinstance(arraydescr, ArrayDescr)
return arraydescr.basesize
unpack_arraydescr._always_inline_ = True
def unpack_arraydescr_size(self, arraydescr):
assert isinstance(arraydescr, ArrayDescr)
ofs = arraydescr.basesize
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return ofs, size, sign
unpack_arraydescr_size._always_inline_ = True
def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
return get_call_descr(self.gc_ll_descr, ARGS, RESULT, extrainfo)
def calldescrof_dynamic(self, cif_description, extrainfo):
from rpython.jit.backend.llsupport import ffisupport
return ffisupport.get_call_descr_dynamic(self, cif_description,
extrainfo)
def _calldescr_dynamic_for_tests(self, atypes, rtype,
abiname='FFI_DEFAULT_ABI'):
from rpython.jit.backend.llsupport import ffisupport
return ffisupport.calldescr_dynamic_for_tests(self, atypes, rtype,
abiname)
def get_latest_descr(self, deadframe):
deadframe = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, deadframe)
descr = deadframe.jf_descr
res = history.AbstractDescr.show(self, descr)
assert isinstance(res, history.AbstractFailDescr)
return res
def _decode_pos(self, deadframe, index):
descr = self.get_latest_descr(deadframe)
return rffi.cast(lltype.Signed, descr.rd_locs[index]) * WORD
@specialize.arg(2)
def get_value_direct(self, deadframe, tp, index):
if tp == 'i':
return self.get_int_value_direct(deadframe, index * WORD)
elif tp == 'r':
return self.get_ref_value_direct(deadframe, index * WORD)
elif tp == 'f':
return self.get_float_value_direct(deadframe, index * WORD)
else:
assert False
def get_int_value(self, deadframe, index):
pos = self._decode_pos(deadframe, index)
return self.get_int_value_direct(deadframe, pos)
def get_int_value_direct(self, deadframe, pos):
descr = self.gc_ll_descr.getframedescrs(self).arraydescr
ofs = self.unpack_arraydescr(descr)
return self.read_int_at_mem(deadframe, pos + ofs, WORD, 1)
def get_ref_value(self, deadframe, index):
pos = self._decode_pos(deadframe, index)
return self.get_ref_value_direct(deadframe, pos)
def get_ref_value_direct(self, deadframe, pos):
descr = self.gc_ll_descr.getframedescrs(self).arraydescr
ofs = self.unpack_arraydescr(descr)
return self.read_ref_at_mem(deadframe, pos + ofs)
def get_float_value(self, deadframe, index):
pos = self._decode_pos(deadframe, index)
return self.get_float_value_direct(deadframe, pos)
def get_float_value_direct(self, deadframe, pos):
descr = self.gc_ll_descr.getframedescrs(self).arraydescr
ofs = self.unpack_arraydescr(descr)
return self.read_float_at_mem(deadframe, pos + ofs)
# ____________________ RAW PRIMITIVES ________________________
@specialize.argtype(1)
def read_int_at_mem(self, gcref, ofs, size, sign):
for STYPE, UTYPE, itemsize in unroll_basic_sizes:
if size == itemsize:
if sign:
val = llop.raw_load(STYPE, gcref, ofs)
val = rffi.cast(lltype.Signed, val)
else:
val = llop.raw_load(UTYPE, gcref, ofs)
val = rffi.cast(lltype.Signed, val)
return val
else:
raise NotImplementedError("size = %d" % size)
@specialize.argtype(1)
def write_int_at_mem(self, gcref, ofs, size, newvalue):
for TYPE, _, itemsize in unroll_basic_sizes:
if size == itemsize:
newvalue = rffi.cast(TYPE, newvalue)
llop.raw_store(lltype.Void, gcref, ofs, newvalue)
return
else:
raise NotImplementedError("size = %d" % size)
@specialize.argtype(1)
def read_ref_at_mem(self, gcref, ofs):
return llop.raw_load(llmemory.GCREF, gcref, ofs)
# non-@specialized: must only be called with llmemory.GCREF
def write_ref_at_mem(self, gcref, ofs, newvalue):
llop.raw_store(lltype.Void, gcref, ofs, newvalue)
# the write barrier is implied above
@specialize.argtype(1)
def read_float_at_mem(self, gcref, ofs):
return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs)
@specialize.argtype(1)
def write_float_at_mem(self, gcref, ofs, newvalue):
llop.raw_store(lltype.Void, gcref, ofs, newvalue)
# ____________________________________________________________
def set_int_value(self, newframe, index, value):
""" Note that we keep index multiplied by WORD here mostly
for completeness with get_int_value and friends
"""
descr = self.gc_ll_descr.getframedescrs(self).arraydescr
ofs = self.unpack_arraydescr(descr)
self.write_int_at_mem(newframe, ofs + index, WORD, value)
def set_ref_value(self, newframe, index, value):
descr = self.gc_ll_descr.getframedescrs(self).arraydescr
ofs = self.unpack_arraydescr(descr)
self.write_ref_at_mem(newframe, ofs + index, value)
def set_float_value(self, newframe, index, value):
descr = self.gc_ll_descr.getframedescrs(self).arraydescr
ofs = self.unpack_arraydescr(descr)
self.write_float_at_mem(newframe, ofs + index, value)
@specialize.arg(1)
def get_ofs_of_frame_field(self, name):
descrs = self.gc_ll_descr.getframedescrs(self)
ofs = self.unpack_fielddescr(getattr(descrs, name))
return ofs
def get_baseofs_of_frame_field(self):
descrs = self.gc_ll_descr.getframedescrs(self)
base_ofs = self.unpack_arraydescr(descrs.arraydescr)
return base_ofs
# ____________________________________________________________
def check_is_object(self, gcptr):
"""Check if the given, non-null gcptr refers to an rclass.OBJECT
or not at all (an unrelated GcStruct or a GcArray). Only usable
in the llgraph backend, or after translation of a real backend."""
assert self.supports_guard_gc_type
return self.gc_ll_descr.check_is_object(gcptr)
def get_actual_typeid(self, gcptr):
"""Fetch the actual typeid of the given gcptr, as an integer.
Only usable in the llgraph backend, or after translation of a
real backend."""
assert self.supports_guard_gc_type
return self.gc_ll_descr.get_actual_typeid(gcptr)
# ____________________________________________________________
def bh_arraylen_gc(self, array, arraydescr):
assert isinstance(arraydescr, ArrayDescr)
ofs = arraydescr.lendescr.offset
return self.read_int_at_mem(array, ofs, WORD, 1)
@specialize.argtype(1)
def bh_getarrayitem_gc_i(self, gcref, itemindex, arraydescr):
ofs, size, sign = self.unpack_arraydescr_size(arraydescr)
return self.read_int_at_mem(gcref, ofs + itemindex * size, size,
sign)
def bh_getarrayitem_gc_r(self, gcref, itemindex, arraydescr):
ofs = self.unpack_arraydescr(arraydescr)
return self.read_ref_at_mem(gcref, itemindex * WORD + ofs)
@specialize.argtype(1)
def bh_getarrayitem_gc_f(self, gcref, itemindex, arraydescr):
ofs = self.unpack_arraydescr(arraydescr)
fsize = rffi.sizeof(longlong.FLOATSTORAGE)
return self.read_float_at_mem(gcref, itemindex * fsize + ofs)
@specialize.argtype(1)
def bh_setarrayitem_gc_i(self, gcref, itemindex, newvalue, arraydescr):
ofs, size, sign = self.unpack_arraydescr_size(arraydescr)
self.write_int_at_mem(gcref, ofs + itemindex * size, size, newvalue)
def bh_setarrayitem_gc_r(self, gcref, itemindex, newvalue, arraydescr):
ofs = self.unpack_arraydescr(arraydescr)
self.write_ref_at_mem(gcref, itemindex * WORD + ofs, newvalue)
@specialize.argtype(1)
def bh_setarrayitem_gc_f(self, gcref, itemindex, newvalue, arraydescr):
ofs = self.unpack_arraydescr(arraydescr)
fsize = rffi.sizeof(longlong.FLOATSTORAGE)
self.write_float_at_mem(gcref, ofs + itemindex * fsize, newvalue)
bh_setarrayitem_raw_i = bh_setarrayitem_gc_i
bh_setarrayitem_raw_f = bh_setarrayitem_gc_f
bh_getarrayitem_raw_i = bh_getarrayitem_gc_i
bh_getarrayitem_raw_f = bh_getarrayitem_gc_f
def bh_getinteriorfield_gc_i(self, gcref, itemindex, descr):
assert isinstance(descr, InteriorFieldDescr)
ofs, size, _ = self.unpack_arraydescr_size(descr.arraydescr)
fldofs, fldsize, sign = self.unpack_fielddescr_size(descr.fielddescr)
ofs += itemindex * size + fldofs
return self.read_int_at_mem(gcref, ofs, fldsize, sign)
def bh_getinteriorfield_gc_r(self, gcref, itemindex, descr):
assert isinstance(descr, InteriorFieldDescr)
ofs, size, _ = self.unpack_arraydescr_size(descr.arraydescr)
ofs += descr.fielddescr.offset
fullofs = itemindex * size + ofs
return self.read_ref_at_mem(gcref, fullofs)
def bh_getinteriorfield_gc_f(self, gcref, itemindex, descr):
assert isinstance(descr, InteriorFieldDescr)
ofs, size, _ = self.unpack_arraydescr_size(descr.arraydescr)
ofs += descr.fielddescr.offset
fullofs = itemindex * size + ofs
return self.read_float_at_mem(gcref, fullofs)
def bh_setinteriorfield_gc_i(self, gcref, itemindex, newvalue, descr):
assert isinstance(descr, InteriorFieldDescr)
ofs, size, _ = self.unpack_arraydescr_size(descr.arraydescr)
fldofs, fldsize, _ = self.unpack_fielddescr_size(descr.fielddescr)
ofs += itemindex * size + fldofs
self.write_int_at_mem(gcref, ofs, fldsize, newvalue)
def bh_setinteriorfield_gc_r(self, gcref, itemindex, newvalue, descr):
assert isinstance(descr, InteriorFieldDescr)
ofs, size, _ = self.unpack_arraydescr_size(descr.arraydescr)
ofs += itemindex * size + descr.fielddescr.offset
self.write_ref_at_mem(gcref, ofs, newvalue)
def bh_setinteriorfield_gc_f(self, gcref, itemindex, newvalue, descr):
assert isinstance(descr, InteriorFieldDescr)
ofs, size, _ = self.unpack_arraydescr_size(descr.arraydescr)
ofs += itemindex * size + descr.fielddescr.offset
self.write_float_at_mem(gcref, ofs, newvalue)
def bh_strlen(self, string):
s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string)
return len(s.chars)
def bh_unicodelen(self, string):
u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
return len(u.chars)
def bh_strgetitem(self, string, index):
s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string)
return ord(s.chars[index])
def bh_unicodegetitem(self, string, index):
u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
return ord(u.chars[index])
@specialize.argtype(1)
def bh_getfield_gc_i(self, struct, fielddescr):
ofs, size, sign = self.unpack_fielddescr_size(fielddescr)
if isinstance(lltype.typeOf(struct), lltype.Ptr):
fielddescr.check_correct_type(struct)
return self.read_int_at_mem(struct, ofs, size, sign)
@specialize.argtype(1)
def bh_getfield_gc_r(self, struct, fielddescr):
ofs = self.unpack_fielddescr(fielddescr)
if isinstance(lltype.typeOf(struct), lltype.Ptr):
fielddescr.check_correct_type(struct)
return self.read_ref_at_mem(struct, ofs)
@specialize.argtype(1)
def bh_getfield_gc_f(self, struct, fielddescr):
ofs = self.unpack_fielddescr(fielddescr)
if isinstance(lltype.typeOf(struct), lltype.Ptr):
fielddescr.check_correct_type(struct)
return self.read_float_at_mem(struct, ofs)
bh_getfield_raw_i = bh_getfield_gc_i
bh_getfield_raw_r = bh_getfield_gc_r
bh_getfield_raw_f = bh_getfield_gc_f
@specialize.argtype(1)
def bh_setfield_gc_i(self, struct, newvalue, fielddescr):
ofs, size, _ = self.unpack_fielddescr_size(fielddescr)
if isinstance(lltype.typeOf(struct), lltype.Ptr):
fielddescr.check_correct_type(struct)
self.write_int_at_mem(struct, ofs, size, newvalue)
def bh_setfield_gc_r(self, struct, newvalue, fielddescr):
ofs = self.unpack_fielddescr(fielddescr)
if isinstance(lltype.typeOf(struct), lltype.Ptr):
fielddescr.check_correct_type(struct)
self.write_ref_at_mem(struct, ofs, newvalue)
@specialize.argtype(1)
def bh_setfield_gc_f(self, struct, newvalue, fielddescr):
ofs = self.unpack_fielddescr(fielddescr)
if isinstance(lltype.typeOf(struct), lltype.Ptr):
fielddescr.check_correct_type(struct)
self.write_float_at_mem(struct, ofs, newvalue)
bh_setfield_raw_i = bh_setfield_gc_i
bh_setfield_raw_f = bh_setfield_gc_f
def bh_raw_store_i(self, addr, offset, newvalue, descr):
ofs, size, _ = self.unpack_arraydescr_size(descr)
assert ofs == 0 # otherwise, 'descr' is not a raw length-less array
self.write_int_at_mem(addr, offset, size, newvalue)
def bh_raw_store_f(self, addr, offset, newvalue, descr):
self.write_float_at_mem(addr, offset, newvalue)
def bh_raw_load_i(self, addr, offset, descr):
ofs, size, sign = self.unpack_arraydescr_size(descr)
assert ofs == 0 # otherwise, 'descr' is not a raw length-less array
return self.read_int_at_mem(addr, offset, size, sign)
def bh_raw_load_f(self, addr, offset, descr):
return self.read_float_at_mem(addr, offset)
def bh_new(self, sizedescr):
return self.gc_ll_descr.gc_malloc(sizedescr)
def bh_new_with_vtable(self, sizedescr):
res = self.gc_ll_descr.gc_malloc(sizedescr)
if self.vtable_offset is not None:
self.write_int_at_mem(res, self.vtable_offset, WORD, sizedescr.get_vtable())
return res
def bh_new_raw_buffer(self, size):
return lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
def bh_classof(self, struct):
struct = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct)
result_adr = llmemory.cast_ptr_to_adr(struct.typeptr)
return heaptracker.adr2int(result_adr)
def bh_new_array(self, length, arraydescr):
return self.gc_ll_descr.gc_malloc_array(length, arraydescr)
bh_new_array_clear = bh_new_array
def bh_newstr(self, length):
return self.gc_ll_descr.gc_malloc_str(length)
def bh_newunicode(self, length):
return self.gc_ll_descr.gc_malloc_unicode(length)
def bh_strsetitem(self, string, index, newvalue):
s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string)
s.chars[index] = chr(newvalue)
def bh_unicodesetitem(self, string, index, newvalue):
u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
u.chars[index] = unichr(newvalue)
def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src)
dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst)
rstr.copy_string_contents(src, dst, srcstart, dststart, length)
def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src)
dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst)
rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
def bh_call_i(self, func, args_i, args_r, args_f, calldescr):
assert isinstance(calldescr, CallDescr)
if not we_are_translated():
calldescr.verify_types(args_i, args_r, args_f, history.INT + 'S')
return calldescr.call_stub_i(func, args_i, args_r, args_f)
def bh_call_r(self, func, args_i, args_r, args_f, calldescr):
assert isinstance(calldescr, CallDescr)
if not we_are_translated():
calldescr.verify_types(args_i, args_r, args_f, history.REF)
return calldescr.call_stub_r(func, args_i, args_r, args_f)
def bh_call_f(self, func, args_i, args_r, args_f, calldescr):
assert isinstance(calldescr, CallDescr)
if not we_are_translated():
calldescr.verify_types(args_i, args_r, args_f, history.FLOAT + 'L')
return calldescr.call_stub_f(func, args_i, args_r, args_f)
def bh_call_v(self, func, args_i, args_r, args_f, calldescr):
assert isinstance(calldescr, CallDescr)
if not we_are_translated():
calldescr.verify_types(args_i, args_r, args_f, history.VOID)
# the 'i' return value is ignored (and nonsense anyway)
calldescr.call_stub_i(func, args_i, args_r, args_f)
final_descr_rd_locs = [rffi.cast(rffi.USHORT, 0)]
history.BasicFinalDescr.rd_locs = final_descr_rd_locs
compile._DoneWithThisFrameDescr.rd_locs = final_descr_rd_locs
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from unittest.mock import Mock
from autobahn.util import wildcards2patterns
from autobahn.twisted.websocket import WebSocketServerFactory
from autobahn.twisted.websocket import WebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketClientProtocol
from autobahn.websocket.types import TransportDetails
from autobahn.websocket.types import ConnectingRequest
from twisted.python.failure import Failure
from twisted.internet.error import ConnectionDone, ConnectionAborted, \
ConnectionLost
from twisted.trial import unittest
try:
from twisted.internet.testing import StringTransport
except ImportError:
from twisted.test.proto_helpers import StringTransport
from autobahn.testutil import FakeTransport
class ExceptionHandlingTests(unittest.TestCase):
"""
Tests that we format various exception variations properly during
connectionLost
"""
def setUp(self):
self.factory = WebSocketServerFactory()
self.proto = WebSocketServerProtocol()
self.proto.factory = self.factory
self.proto.log = Mock()
def tearDown(self):
for call in [
self.proto.autoPingPendingCall,
self.proto.autoPingTimeoutCall,
self.proto.openHandshakeTimeoutCall,
self.proto.closeHandshakeTimeoutCall,
]:
if call is not None:
call.cancel()
def test_connection_done(self):
# pretend we connected
self.proto._connectionMade()
self.proto.connectionLost(Failure(ConnectionDone()))
messages = ' '.join([str(x[1]) for x in self.proto.log.mock_calls])
self.assertTrue('closed cleanly' in messages)
def test_connection_aborted(self):
# pretend we connected
self.proto._connectionMade()
self.proto.connectionLost(Failure(ConnectionAborted()))
messages = ' '.join([str(x[1]) for x in self.proto.log.mock_calls])
self.assertTrue(' aborted ' in messages)
def test_connection_lost(self):
# pretend we connected
self.proto._connectionMade()
self.proto.connectionLost(Failure(ConnectionLost()))
messages = ' '.join([str(x[1]) for x in self.proto.log.mock_calls])
self.assertTrue(' was lost ' in messages)
def test_connection_lost_arg(self):
# pretend we connected
self.proto._connectionMade()
self.proto.connectionLost(Failure(ConnectionLost("greetings")))
messages = ' '.join([str(x[1]) + str(x[2]) for x in self.proto.log.mock_calls])
self.assertTrue(' was lost ' in messages)
self.assertTrue('greetings' in messages)
class Hixie76RejectionTests(unittest.TestCase):
"""
Hixie-76 should not be accepted by an Autobahn server.
"""
def test_handshake_fails(self):
"""
A handshake from a client only supporting Hixie-76 will fail.
"""
t = FakeTransport()
f = WebSocketServerFactory()
p = WebSocketServerProtocol()
p.factory = f
p.transport = t
# from http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
http_request = b"GET /demo HTTP/1.1\r\nHost: example.com\r\nConnection: Upgrade\r\nSec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\nSec-WebSocket-Protocol: sample\r\nUpgrade: WebSocket\r\nSec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\nOrigin: http://example.com\r\n\r\n^n:ds[4U"
p.openHandshakeTimeout = 0
p._connectionMade()
p.data = http_request
p.processHandshake()
self.assertIn(b"HTTP/1.1 400", t._written)
self.assertIn(b"Hixie76 protocol not supported", t._written)
class WebSocketOriginMatching(unittest.TestCase):
"""
Test that we match Origin: headers properly, when asked to
"""
def setUp(self):
self.factory = WebSocketServerFactory()
self.factory.setProtocolOptions(
allowedOrigins=['127.0.0.1:*', '*.example.com:*']
)
self.proto = WebSocketServerProtocol()
self.proto.transport = StringTransport()
self.proto.factory = self.factory
self.proto.failHandshake = Mock()
self.proto._connectionMade()
def tearDown(self):
for call in [
self.proto.autoPingPendingCall,
self.proto.autoPingTimeoutCall,
self.proto.openHandshakeTimeoutCall,
self.proto.closeHandshakeTimeoutCall,
]:
if call is not None:
call.cancel()
def test_match_full_origin(self):
self.proto.data = b"\r\n".join([
b'GET /ws HTTP/1.1',
b'Host: www.example.com',
b'Sec-WebSocket-Version: 13',
b'Origin: http://www.example.com.malicious.com',
b'Sec-WebSocket-Extensions: permessage-deflate',
b'Sec-WebSocket-Key: tXAxWFUqnhi86Ajj7dRY5g==',
b'Connection: keep-alive, Upgrade',
b'Upgrade: websocket',
b'\r\n', # last string doesn't get a \r\n from join()
])
self.proto.consumeData()
self.assertTrue(self.proto.failHandshake.called, "Handshake should have failed")
arg = self.proto.failHandshake.mock_calls[0][1][0]
self.assertTrue('not allowed' in arg)
def test_match_wrong_scheme_origin(self):
# some monkey-business since we already did this in setUp, but
# we want a different set of matching origins
self.factory.setProtocolOptions(
allowedOrigins=['http://*.example.com:*']
)
self.proto.allowedOriginsPatterns = self.factory.allowedOriginsPatterns
self.proto.allowedOrigins = self.factory.allowedOrigins
# the actual test
self.factory.isSecure = False
self.proto.data = b"\r\n".join([
b'GET /ws HTTP/1.1',
b'Host: www.example.com',
b'Sec-WebSocket-Version: 13',
b'Origin: https://www.example.com',
b'Sec-WebSocket-Extensions: permessage-deflate',
b'Sec-WebSocket-Key: tXAxWFUqnhi86Ajj7dRY5g==',
b'Connection: keep-alive, Upgrade',
b'Upgrade: websocket',
b'\r\n', # last string doesn't get a \r\n from join()
])
self.proto.consumeData()
self.assertTrue(self.proto.failHandshake.called, "Handshake should have failed")
arg = self.proto.failHandshake.mock_calls[0][1][0]
self.assertTrue('not allowed' in arg)
def test_match_origin_secure_scheme(self):
self.factory.isSecure = True
self.factory.port = 443
self.proto.data = b"\r\n".join([
b'GET /ws HTTP/1.1',
b'Host: www.example.com',
b'Sec-WebSocket-Version: 13',
b'Origin: https://www.example.com',
b'Sec-WebSocket-Extensions: permessage-deflate',
b'Sec-WebSocket-Key: tXAxWFUqnhi86Ajj7dRY5g==',
b'Connection: keep-alive, Upgrade',
b'Upgrade: websocket',
b'\r\n', # last string doesn't get a \r\n from join()
])
self.proto.consumeData()
self.assertFalse(self.proto.failHandshake.called, "Handshake should have succeeded")
def test_match_origin_documentation_example(self):
"""
Test the examples from the docs
"""
self.factory.setProtocolOptions(
allowedOrigins=['*://*.example.com:*']
)
self.factory.isSecure = True
self.factory.port = 443
self.proto.data = b"\r\n".join([
b'GET /ws HTTP/1.1',
b'Host: www.example.com',
b'Sec-WebSocket-Version: 13',
b'Origin: http://www.example.com',
b'Sec-WebSocket-Extensions: permessage-deflate',
b'Sec-WebSocket-Key: tXAxWFUqnhi86Ajj7dRY5g==',
b'Connection: keep-alive, Upgrade',
b'Upgrade: websocket',
b'\r\n', # last string doesn't get a \r\n from join()
])
self.proto.consumeData()
self.assertFalse(self.proto.failHandshake.called, "Handshake should have succeeded")
def test_match_origin_examples(self):
"""
All the example origins from RFC6454 (3.2.1)
"""
# we're just testing the low-level function here...
from autobahn.websocket.protocol import _is_same_origin, _url_to_origin
policy = wildcards2patterns(['*example.com:*'])
# should parametrize test ...
for url in ['http://example.com/', 'http://example.com:80/',
'http://example.com/path/file',
'http://example.com/;semi=true',
# 'http://example.com./',
'//example.com/',
'http://@example.com']:
self.assertTrue(_is_same_origin(_url_to_origin(url), 'http', 80, policy), url)
def test_match_origin_counter_examples(self):
"""
All the example 'not-same' origins from RFC6454 (3.2.1)
"""
# we're just testing the low-level function here...
from autobahn.websocket.protocol import _is_same_origin, _url_to_origin
policy = wildcards2patterns(['example.com'])
for url in ['http://ietf.org/', 'http://example.org/',
'https://example.com/', 'http://example.com:8080/',
'http://www.example.com/']:
self.assertFalse(_is_same_origin(_url_to_origin(url), 'http', 80, policy))
def test_match_origin_edge(self):
# we're just testing the low-level function here...
from autobahn.websocket.protocol import _is_same_origin, _url_to_origin
policy = wildcards2patterns(['http://*example.com:80'])
self.assertTrue(
_is_same_origin(_url_to_origin('http://example.com:80'), 'http', 80, policy)
)
self.assertFalse(
_is_same_origin(_url_to_origin('http://example.com:81'), 'http', 81, policy)
)
self.assertFalse(
_is_same_origin(_url_to_origin('https://example.com:80'), 'http', 80, policy)
)
def test_origin_from_url(self):
from autobahn.websocket.protocol import _url_to_origin
# basic function
self.assertEqual(
_url_to_origin('http://example.com'),
('http', 'example.com', 80)
)
# should lower-case scheme
self.assertEqual(
_url_to_origin('hTTp://example.com'),
('http', 'example.com', 80)
)
def test_origin_file(self):
from autobahn.websocket.protocol import _url_to_origin
self.assertEqual('null', _url_to_origin('file:///etc/passwd'))
def test_origin_null(self):
from autobahn.websocket.protocol import _is_same_origin, _url_to_origin
self.assertEqual('null', _url_to_origin('null'))
self.assertFalse(
_is_same_origin(_url_to_origin('null'), 'http', 80, [])
)
self.assertFalse(
_is_same_origin(_url_to_origin('null'), 'https', 80, [])
)
self.assertFalse(
_is_same_origin(_url_to_origin('null'), '', 80, [])
)
self.assertFalse(
_is_same_origin(_url_to_origin('null'), None, 80, [])
)
class WebSocketXForwardedFor(unittest.TestCase):
"""
Test that (only) a trusted X-Forwarded-For can replace the peer address.
"""
def setUp(self):
self.factory = WebSocketServerFactory()
self.factory.setProtocolOptions(
trustXForwardedFor=2
)
self.proto = WebSocketServerProtocol()
self.proto.transport = StringTransport()
self.proto.factory = self.factory
self.proto.failHandshake = Mock()
self.proto._connectionMade()
def tearDown(self):
for call in [
self.proto.autoPingPendingCall,
self.proto.autoPingTimeoutCall,
self.proto.openHandshakeTimeoutCall,
self.proto.closeHandshakeTimeoutCall,
]:
if call is not None:
call.cancel()
def test_trusted_addresses(self):
self.proto.data = b"\r\n".join([
b'GET /ws HTTP/1.1',
b'Host: www.example.com',
b'Origin: http://www.example.com',
b'Sec-WebSocket-Version: 13',
b'Sec-WebSocket-Extensions: permessage-deflate',
b'Sec-WebSocket-Key: tXAxWFUqnhi86Ajj7dRY5g==',
b'Connection: keep-alive, Upgrade',
b'Upgrade: websocket',
b'X-Forwarded-For: 1.2.3.4, 2.3.4.5, 111.222.33.44',
b'\r\n', # last string doesn't get a \r\n from join()
])
self.proto.consumeData()
self.assertEquals(
self.proto.peer, "2.3.4.5",
"The second address in X-Forwarded-For should have been picked as the peer address")
class OnConnectingTests(unittest.TestCase):
"""
Tests related to onConnecting callback
These tests are testing generic behavior, but are somewhat tied to
'a framework' so we're just testing using Twisted-specifics here.
"""
def test_on_connecting_client_fails(self):
class TestProto(WebSocketClientProtocol):
state = None
wasClean = True
log = Mock()
def onConnecting(self, transport_details):
raise RuntimeError("bad stuff")
from autobahn.testutil import FakeTransport
proto = TestProto()
proto.transport = FakeTransport()
d = proto.startHandshake()
self.successResultOf(d) # error is ignored
# ... but error should be logged
self.assertTrue(len(proto.log.mock_calls) > 0)
self.assertIn(
"bad stuff",
str(proto.log.mock_calls[0]),
)
def test_on_connecting_client_success(self):
class TestProto(WebSocketClientProtocol):
state = None
wasClean = True
perMessageCompressionOffers = []
version = 18
openHandshakeTimeout = 5
log = Mock()
def onConnecting(self, transport_details):
return ConnectingRequest(
host="example.com",
port=443,
resource="/ws",
)
from autobahn.test import FakeTransport
proto = TestProto()
proto.transport = FakeTransport()
proto.factory = Mock()
proto._connectionMade()
d = proto.startHandshake()
req = self.successResultOf(d)
self.assertEqual("example.com", req.host)
self.assertEqual(443, req.port)
self.assertEqual("/ws", req.resource)
def test_str_transport(self):
details = TransportDetails(
peer="example.com",
is_secure=False,
secure_channel_id={},
)
# we can str() this and it doesn't fail
str(details)
def test_str_connecting(self):
req = ConnectingRequest(host="example.com", port="1234", resource="/ws")
# we can str() this and it doesn't fail
str(req)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.