code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2012 Antoine Bertin <[email protected]>
#
# This file is part of pyextdirect.
#
# pyextdirect is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyextdirect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pyextdirect. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['BASIC', 'LOAD', 'SUBMIT', 'STORE_READ', 'STORE_CUD', 'ConfigurationMeta', 'create_configuration', 'expose']
#: Basic method
BASIC = 0
#: DirectLoad method
LOAD = 1
#: DirectSubmit method
SUBMIT = 2
#: DirectStore read method
STORE_READ = 3
#: DirectStore create-update-destroy methods
STORE_CUD = 4
class ConfigurationMeta(type):
"""Each class created with this metaclass will have its exposed methods registered
A method can be exposed with the :func:`expose` decorator
The registration is done by calling :meth:`~Base.register`
"""
def __init__(cls, name, bases, attrs):
for attrname, attrvalue in attrs.iteritems():
if not getattr(attrvalue, 'exposed', False):
continue
cls.register((cls, attrname), getattr(attrvalue, 'exposed_action') or name, getattr(attrvalue, 'exposed_method') or attrname)
return super(ConfigurationMeta, cls).__init__(name, bases, attrs)
def create_configuration(name='Base'):
"""Create a configuration base class
It is built using :class:`ConfigurationMeta`. Subclassing such a base class
will register exposed methods
.. class:: Base
.. attribute:: configuration
Configuration dict that can be used by a Router or the API
.. classmethod:: register(element, action, method)
Register an element in the :attr:`configuration`
:param element: the element to register
:type element: tuple of (class, method name) or function
:param string action: name of the exposed action that will hold the method
:param string method: name of the exposed method
"""
@classmethod
def register(cls, element, action, method):
if not action in cls.configuration:
cls.configuration[action] = {}
if method in cls.configuration[action]:
raise ValueError('Method %s already defined for action %s' % (method, action))
cls.configuration[action][method] = element
return ConfigurationMeta(name, (object,), {'configuration': {}, 'register': register})
def expose(f=None, base=None, action=None, method=None, kind=BASIC):
"""Decorator to expose a function
.. note::
A module function can be decorated but ``base`` parameter has to be specified
:param f: function to expose
:type f: function or None
:param base: base class that can register the function
:param string action: name of the exposed action that will hold the method
:param string method: name of the exposed method
:param kind: kind of the method
:type kind: :data:`BASIC` or :data:`LOAD` or :data:`SUBMIT`
"""
def expose_f(f):
f.exposed = True
f.exposed_action = action
f.exposed_method = method
f.exposed_kind = kind
return f
def register_f(f):
f = expose_f(f)
base.register(f, action or f.__module__, method or f.__name__)
return f
if f is not None: # @expose case (no parameters)
return expose_f(f)
if base is not None: # module-level function case
return register_f
return expose_f
def merge_configurations(configurations):
"""Merge configurations together and raise error if a conflict is detected
:param configurations: configurations to merge together
:type configurations: list of :attr:`~pyextdirect.configuration.Base.configuration` dicts
:return: merged configurations as a single one
:rtype: dict
"""
configuration = {}
for c in configurations:
for k, v in c.iteritems():
if k in configuration:
raise ValueError('%s already in a previous base configuration' % k)
configuration[k] = v
return configuration
| Diaoul/pyextdirect | pyextdirect/configuration.py | Python | gpl-3.0 | 4,570 |
#!/usr/bin/env python
# coding=utf-8
import sys
import argparse
parser = argparse.ArgumentParser(
description='convert a non-standord hostname like xx-xx-[1-3] to a '
'expansion state',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Sample:
$ ./converter.py xxx-xxx-\[1-3\]
xxx-xxx-1
xxx-xxx-2
xxx-xxx-3
Tips: You can pass many args behind the command,and you need to not forget to
escape the character of [ and ]
""")
parser.add_argument(
'hostname_pattern',
help='',
type=str,
nargs='+')
args = parser.parse_args()
if __name__ == '__main__':
for arg in args.hostname_pattern:
basestr=arg.split('-')
prefix='-'.join(basestr[:-2])
range_li=basestr[-2:]
start_num=int(range_li[0][1:])
end_num=int(range_li[1][:-1])
for i in range(start_num,end_num+1):
print prefix + '-' + str(i)
| supersu097/Mydailytools | converter.py | Python | gpl-3.0 | 905 |
#!/usr/bin/env python
#
# Generate report in Excel format (from xml input)
#
import sys,os,shelve
import re,dfxml,fiwalk
from bc_utils import filename_from_path
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def bc_generate_feature_xlsx(PdfReport, data, feature_file):
wb = Workbook()
dest_filename = PdfReport.featuredir +'/'+ (filename_from_path(feature_file))[10:-3] + "xlsx"
row_idx = [2]
ws = wb.worksheets[0]
ws.title = "File Feature Information"
ws.cell('%s%s'%('A', '1')).value = '%s' % "Filename"
ws.cell('%s%s'%('B', '1')).value = '%s' % "Position"
ws.cell('%s%s'%('C', '1')).value = '%s' % "Feature"
linenum=0
for row in data:
# Skip the lines with known text lines to be eliminated
if (re.match("Total features",str(row))):
continue
filename = "Unknown"
feature = "Unknown"
position = "Unknown"
# Some lines in the annotated_xxx.txt have less than three
# columns where filename or feature may be missing.
if len(row) > 3:
filename = row[3]
else:
filename = "Unknown"
if len(row) > 1:
feature = row[1]
else:
feature = "Unknown"
position = row[0]
# If it is a special file, check if the user wants it to
# be repoted. If not, exclude this from the table.
if (PdfReport.bc_config_report_special_files == False) and \
(is_special_file(filename)):
## print("D: File %s is special. So skipping" %(filename))
continue
ws.cell('%s%s'%('A', row_idx[0])).value = '%s' % filename
ws.cell('%s%s'%('B', row_idx[0])).value = '%s' % feature
ws.cell('%s%s'%('C', row_idx[0])).value = '%s' % position
row_idx[0] += 1
wb.save(filename=dest_filename)
| sesuncedu/bitcurator | python/bc_gen_feature_rep_xls.py | Python | gpl-3.0 | 1,974 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015, 2016 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <[email protected]>
# Janne Kotro fmi.fi
# Trygve Aspenes
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""AAPP Level-1 processing on NOAA and Metop HRPT Direct Readout data. Listens
for pytroll messages from Nimbus (NOAA/Metop file dispatch) and triggers
processing on direct readout HRPT level 0 files (full swaths - no granules at
the moment)
"""
from ConfigParser import RawConfigParser
import os
import sys
import logging
from logging import handlers
from trollsift.parser import compose
sys.path.insert(0, "trollduction/")
sys.path.insert(0, "/home/trygveas/git/trollduction-test/aapp_runner")
from read_aapp_config import read_config_file_options
from tle_satpos_prepare import do_tleing
from tle_satpos_prepare import do_tle_satpos
from do_commutation import do_decommutation
import socket
import netifaces
from helper_functions import run_shell_command
LOG = logging.getLogger(__name__)
# ----------------------------
# Default settings for logging
# ----------------------------
_DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
_DEFAULT_LOG_FORMAT = '[%(levelname)s: %(asctime)s : %(name)s] %(message)s'
# -------------------------------
# Default settings for satellites
# -------------------------------
SUPPORTED_NOAA_SATELLITES = ['NOAA-19', 'NOAA-18', 'NOAA-16', 'NOAA-15']
SUPPORTED_METOP_SATELLITES = ['Metop-B', 'Metop-A', 'Metop-C']
SUPPORTED_SATELLITES = SUPPORTED_NOAA_SATELLITES + SUPPORTED_METOP_SATELLITES
TLE_SATNAME = {'NOAA-19': 'NOAA 19', 'NOAA-18': 'NOAA 18',
'NOAA-15': 'NOAA 15',
'Metop-A': 'METOP-A', 'Metop-B': 'METOP-B',
'Metop-C': 'METOP-C'}
METOP_NAME = {'metop01': 'Metop-B', 'metop02': 'Metop-A'}
METOP_NAME_INV = {'metopb': 'metop01', 'metopa': 'metop02'}
SATELLITE_NAME = {'NOAA-19': 'noaa19', 'NOAA-18': 'noaa18',
'NOAA-15': 'noaa15', 'NOAA-14': 'noaa14',
'Metop-A': 'metop02', 'Metop-B': 'metop01',
'Metop-C': 'metop03'}
SENSOR_NAMES = ['amsu-a', 'amsu-b', 'mhs', 'avhrr/3', 'hirs/4']
SENSOR_NAME_CONVERTER = {
'amsua': 'amsu-a', 'amsub': 'amsu-b', 'hirs': 'hirs/4',
'mhs': 'mhs', 'avhrr': 'avhrt/3'}
METOP_NUMBER = {'b': '01', 'a': '02'}
"""
These are the standard names used by the various AAPP decommutation scripts.
If you change these, you will also have to change the decommutation scripts.
"""
STD_AAPP_OUTPUT_FILESNAMES = {'amsua_file':'aman.l1b',
'amsub_file':'ambn.l1b',
'hirs_file':'hrsn.l1b',
'avhrr_file':'hrpt.l1b'
}
# FIXME! This variable should be put in the config file:
SATS_ONLY_AVHRR = []
from urlparse import urlparse
import posttroll.subscriber
from posttroll.publisher import Publish
from posttroll.message import Message
from trollduction.helper_functions import overlapping_timeinterval
import tempfile
from glob import glob
# import os
import shutil
# import aapp_stat
import threading
from subprocess import Popen, PIPE
import shlex
# import subrocess
from datetime import timedelta, datetime
from time import time as _time
def get_local_ips():
inet_addrs = [netifaces.ifaddresses(iface).get(netifaces.AF_INET)
for iface in netifaces.interfaces()]
ips = []
for addr in inet_addrs:
if addr is not None:
for add in addr:
ips.append(add['addr'])
return ips
def nonblock_read(output):
"""An attempt to catch any hangup in reading the output (stderr/stdout)
from subprocess"""
import fcntl
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.readline()
except:
return ''
def reset_job_registry(objdict, key, start_end_times):
"""Remove job key from registry"""
LOG.debug("Register: " + str(objdict))
starttime, endtime = start_end_times
if key in objdict:
if objdict[key] and len(objdict[key]) > 0:
objdict[key].remove(start_end_times)
LOG.debug("Release/reset job-key " + str(key) + " " +
str(starttime) + " " + str(endtime) +
" from job registry")
LOG.debug("Register: " + str(objdict))
return
LOG.warning("Nothing to reset/release - " +
"Register didn't contain any entry matching: " +
str(key))
return
class AappLvl1Processor(object):
"""
Container for the Metop/NOAA level-1 processing based on AAPP
"""
def __init__(self, runner_config):
"""
Init with config file options
"""
self.noaa_data_out_dir = runner_config['noaa_data_out_dir']
self.metop_data_out_dir = runner_config['metop_data_out_dir']
self.noaa_run_script = runner_config['aapp_run_noaa_script']
self.metop_run_script = runner_config['aapp_run_metop_script']
self.tle_indir = runner_config['tle_indir']
self.tle_outdir = runner_config['tle_outdir']
self.tle_script = runner_config['tle_script']
self.pps_out_dir = runner_config['pps_out_dir']
self.pps_out_dir_format = runner_config['pps_out_dir_format']
self.aapp_prefix = runner_config['aapp_prefix']
self.aapp_workdir = runner_config['aapp_workdir']
self.aapp_outdir = runner_config['aapp_outdir']
self.aapp_outdir_format = runner_config['aapp_outdir_format']
self.copy_data_directories = runner_config['copy_data_directories']
self.move_data_directory = runner_config['move_data_directory']
self.use_dyn_work_dir = runner_config['use_dyn_work_dir']
self.subscribe_topics = runner_config['subscribe_topics']
self.publish_pps_format = runner_config['publish_pps_format']
self.publish_l1_format = runner_config['publish_l1_format']
self.publish_sift_format = runner_config['publish_sift_format']
self.aapp_log_files_dir = runner_config['aapp_log_files_dir']
self.aapp_log_files_backup = runner_config['aapp_log_files_backup']
self.servername = runner_config['servername']
self.dataserver = runner_config['dataserver']
self.station = runner_config['station']
self.environment = runner_config['environment']
self.locktime_before_rerun = int(
runner_config.get('locktime_before_rerun', 10))
self.passlength_threshold = int(runner_config['passlength_threshold'])
self.fullswath = True # Always a full swath (never HRPT granules)
self.working_dir = None
self.level0_filename = None
self.starttime = None
self.endtime = None
self.platform_name = "Unknown"
self.satnum = "0"
self.orbit = "00000"
self.result_files = None
self.level0files = None
self.lvl1_home = self.pps_out_dir
self.job_register = {}
self.my_env = os.environ.copy()
self.check_and_set_correct_orbit_number = False if runner_config['check_and_set_correct_orbit_number'] == 'False' else True
self.do_ana_correction = False if runner_config['do_ana_correction'] == 'False' else True
self.initialise()
def initialise(self):
"""Initialise the processor """
self.working_dir = None
self.level0_filename = None
self.starttime = None
self.endtime = None
self.platform_name = "Unknown"
self.satnum = "0"
self.orbit = "00000"
self.result_files = []
self.level0files = {}
self.out_dir_config_data = []
def cleanup_aapp_workdir(self):
"""Clean up the AAPP working dir after processing"""
filelist = glob('%s/*' % self.working_dir)
dummy = [os.remove(s) for s in filelist if os.path.isfile(s)]
filelist = glob('%s/*' % self.working_dir)
LOG.info("Number of items left after cleaning working dir = " +
str(len(filelist)))
LOG.debug("Files: " + str(filelist))
shutil.rmtree(self.working_dir)
return
def spack_aapplvl1_files(self, subd):
return spack_aapplvl1_files(self.result_files, self.lvl1_home, subd,
self.satnum)
def pack_aapplvl1_files(self, subd):
""" Copy AAPP lvl1 files to PPS source directory
from input pps sub-directory name generated by crete_pps_subdirname()
Return a dictionary with destination full path filename, sensor name and
data processing level"""
return pack_aapplvl1_files(self.result_files, self.pps_out_dir, subd,
self.satnum)
# def delete_old_log_files(self):
# """
# Clean old AAPP log files
# """
# #older_than = int(self.aapp_log_files_backup)*60*60*24
# LOG.debug("continue...")
# delete_old_dirs(self.aapp_log_files_dir,
# self.aapp_log_files_backup)
# return
def copy_aapplvl1_files(self, subd, out_dir_config_data):
"""Copy AAPP lvl1 files in to data processing level sub-directory
e.g. metop/level1b
Input directory is defined in config file metop_data_out_dir and
noaa_data_out_dir
Return a dictionary with destination full path filename, sensor name and
data processing level
"""
return copy_aapplvl1_files(self.result_files, subd, self.satnum, out_dir_config_data)
def smove_lvl1dir(self):
if len(self.result_files) == 0:
LOG.warning("No files in directory to move!")
return {}
# Get the subdirname:
path = os.path.dirname(self.result_files[0])
subd = os.path.basename(path)
LOG.debug("path = " + str(path))
LOG.debug("lvl1_home = " + str(self.lvl1_home))
try:
shutil.move(path, self.lvl1_home)
except shutil.Error:
LOG.warning("Directory already exists: " + str(subd))
if self.orbit == '00000' or self.orbit == None:
# Extract the orbit number from the sub-dir name:
dummy, dummy, dummy, self.orbit = subd.split('_')
# Return a dict with sensor and level for each filename:
filenames = glob(os.path.join(self.lvl1_home, subd, '*'))
LOG.info(filenames)
retv = {}
for fname in filenames:
mstr = os.path.basename(fname).split('_')[0]
if mstr == 'hrpt':
lvl = '1B'
instr = 'avhrr/3'
else:
lvl = mstr[-2:].upper()
try:
instr = SENSOR_NAME_CONVERTER[mstr[0:-3]]
except KeyError:
LOG.warning("Sensor name will not be converted %s" %
str(mstr[0:-3]))
LOG.debug("mstr = " + str(mstr))
instr = mstr[0:-3]
retv[fname] = {'level': lvl, 'sensor': instr}
LOG.info(str(retv))
return retv
def move_lvl1dir(self, out_dir):
"""Move sub-directory with AAPP level-1b|c|d files
Return a dictionary with sensor and data processing level
for each filename """
if len(self.result_files) == 0:
LOG.warning("No files in directory to move!")
return {}
# Get the subdirname:
path = os.path.dirname(self.result_files[0])
subd = os.path.basename(path)
LOG.debug("path = " + str(path))
LOG.debug("out_dir = " + out_dir)
try:
shutil.move(path, out_dir)
except shutil.Error:
LOG.warning("Directory already exists: " + str(subd))
if self.orbit == '00000' or self.orbit == None:
# Extract the orbit number from the sub-dir name:
dummy, dummy, dummy, self.orbit = subd.split('_')
filenames = glob(os.path.join(out_dir, subd, '*l1*'))
LOG.info(filenames)
retv = {}
for fname in filenames:
mstr = os.path.basename(fname).split('_')[0]
if mstr == 'hrpt':
lvl = '1b'
instr = 'avhrr/3'
else:
lvl = mstr[-2:]
try:
instr = SENSOR_NAME_CONVERTER[mstr[0:-3]]
except KeyError:
LOG.warning("Sensor name will not be converted %s",
str(mstr[0:-3]))
LOG.debug("mstr = " + str(mstr))
instr = mstr[0:-3]
retv[fname] = {'level': lvl, 'sensor': instr}
LOG.info(str(retv))
return retv
def move_aapp_log_files(self):
""" Move AAPP processing log files from AAPP working directory
in to sub-directory (PPS format).
The directory path is defined in config file (aapp_log_files)
"""
try:
filelist = glob('%s/*.log' % self.working_dir)
subd = create_pps_subdirname(self.starttime,
self.platform_name,
self.orbit)
destination = os.path.join(self.aapp_log_files_dir, subd)
LOG.debug("move_aapp_log_files destination: " + destination)
if not os.path.exists(destination):
try:
os.makedirs(destination)
except OSError:
LOG.warning("Can't create directory!")
return False # FIXME: Check!
LOG.debug(
"Created new directory for AAPP log files:" + destination)
for file_name in filelist:
LOG.debug("File_name: " + file_name)
base_filename = os.path.basename(file_name)
dst = os.path.join(destination, base_filename)
LOG.debug("dst: " + dst)
shutil.move(file_name, dst)
except OSError as err:
LOG.error("Moving AAPP log files to " +
destination + " failed ", err)
LOG.info("AAPP log files saved in to " + destination)
return
# def get_old_dirs(self, dir_path, older_than_days):
# """
# return a list of all subfolders under dirPath older than olderThanDays
# """
# older_than_days *= 86400 # convert days to seconds
# present = time.time()
# directories = []
# for root, dirs, files in os.walk(dir_path, topdown=False):
# for name in dirs:
# sub_dir_path = os.path.join(root, name)
# if (present - os.path.getmtime(sub_dir_path)) > older_than_days:
# directories.append(sub_dir_path)
# return directories
def create_scene_id(self, keyname):
# Use sat id, start and end time as the unique identifier of the scene!
if keyname in self.job_register and len(self.job_register[keyname]) > 0:
# Go through list of start,end time tuples and see if the current
# scene overlaps with any:
status = overlapping_timeinterval((self.starttime, self.endtime),
self.job_register[keyname])
if status:
LOG.warning("Processing of scene " + keyname +
" " + str(status[0]) + " " + str(status[1]) +
" with overlapping time has been"
" launched previously")
LOG.info("Skip it...")
return True
else:
LOG.debug(
"No overlap with any recently processed scenes...")
scene_id = (str(self.platform_name) + '_' +
self.starttime.strftime('%Y%m%d%H%M%S') +
'_' + self.endtime.strftime('%Y%m%d%H%M%S'))
LOG.debug("scene_id = " + str(scene_id))
return scene_id
def check_scene_id(self, scene_id):
# Check for keys representing the same scene (slightly different
# start/end times):
LOG.debug("Level-0files = " + str(self.level0files))
time_thr = timedelta(seconds=30)#FIXME configure
for key in self.level0files:
pltrfn, startt, endt = key.split('_')
if not self.platform_name == pltrfn:
continue
t1_ = datetime.strptime(startt, '%Y%m%d%H%M%S')
t2_ = datetime.strptime(endt, '%Y%m%d%H%M%S')
# Get the relative time overlap:
sec_inside = (
min(t2_, self.endtime) - max(t1_, self.starttime)).total_seconds()
dsec = (t2_ - t1_).total_seconds()
if dsec < 0.01:
LOG.warning(
"Something awkward with this scene: start_time = end_time!")
break
elif float(sec_inside / dsec) > 0.85:
# It is the same scene!
LOG.debug(
"It is the same scene,"
" though the file times may deviate a bit...")
scene_id = key
break
elif float(sec_inside / dsec) > 0.01:
LOG.warning("There was an overlap but probably not the " +
"same scene: Time interval = " +
"(%s, %s)",
t1_.strftime('%Y-%m-%d %H:%M:%S'),
t2_.strftime('%Y-%m-%d %H:%M:%S'))
return scene_id
def sensors_to_process(self, msg, sensors):
LOG.debug("Sensor = " + str(msg.data['sensor']))
LOG.debug("type: " + str(type(msg.data['sensor'])))
if isinstance(msg.data['sensor'], (str, unicode)):
sensors.append(msg.data['sensor'])
elif isinstance(msg.data['sensor'], (list, set, tuple)):
sensors.extend(msg.data['sensor'])
else:
sensors = []
LOG.warning('Failed interpreting sensor(s)!')
LOG.info("Sensor(s): " + str(sensors))
sensor_ok = False
for sensor in sensors:
if sensor in SENSOR_NAMES:
sensor_ok = True
break
if not sensor_ok:
LOG.info("No required sensors....")
return False
return True
def available_sensors(self, msg, sensors, scene_id):
if scene_id not in self.level0files:
LOG.debug("Reset level-0 files: scene_id = " + str(scene_id))
self.level0files[scene_id] = []
for sensor in sensors:
item = (self.level0_filename, sensor)
if item not in self.level0files[scene_id]:
self.level0files[scene_id].append(item)
LOG.debug("Appending item to list: " + str(item))
else:
LOG.debug("item already in list: " + str(item))
if len(self.level0files[scene_id]) < 4 and msg.data.get("variant") != "EARS":
LOG.info("Not enough sensor data available yet. " +
"Level-0 files = " +
str(self.level0files[scene_id]))
return False
else:
LOG.info("Level 0 files ready: " + str(self.level0files[scene_id]))
return True
def run(self, msg):
"""Start the AAPP level 1 processing on either a NOAA HRPT file or a
set of Metop HRPT files"""
try:
# Avoid 'collections' and other stuff:
if msg is None or msg.type != 'file':
return True
LOG.debug("Received message: " + str(msg))
# msg.data['platform_name'] = "NOAA-19"
LOG.debug(
"Supported Metop satellites: " + str(SUPPORTED_METOP_SATELLITES))
LOG.debug(
"Supported NOAA satellites: " + str(SUPPORTED_NOAA_SATELLITES))
try:
if (msg.data['platform_name'] not in
SUPPORTED_NOAA_SATELLITES and
msg.data['platform_name'] not in
SUPPORTED_METOP_SATELLITES):
LOG.info("Not a NOAA/Metop scene. Continue...")
return True
# FIXME:
except Exception, err:
LOG.warning(str(err))
return True
self.platform_name = msg.data['platform_name']
LOG.debug("Satellite = " + str(self.platform_name))
LOG.debug("")
LOG.debug("\tMessage:")
LOG.debug(str(msg))
urlobj = urlparse(msg.data['uri'])
url_ip = socket.gethostbyname(urlobj.netloc)
if urlobj.netloc and (url_ip not in get_local_ips()):
LOG.warning("Server %s not the current one: %s",
str(urlobj.netloc),
socket.gethostname())
return True
LOG.info("Ok... " + str(urlobj.netloc))
self.servername = urlobj.netloc
LOG.info("Sat and Sensor: " + str(msg.data['platform_name'])
+ " " + str(msg.data['sensor']))
self.starttime = msg.data['start_time']
try:
self.endtime = msg.data['end_time']
except KeyError:
LOG.warning(
"No end_time in message! Guessing start_time + 14 minutes...")
self.endtime = msg.data[
'start_time'] + timedelta(seconds=60 * 14)
# Test if the scene is longer than minimum required:
pass_length = self.endtime - self.starttime
if pass_length < timedelta(seconds=60 * self.passlength_threshold):
LOG.info("Pass is too short: Length in minutes = %6.1f",
pass_length.seconds / 60.0)
return True
#Due to different ways to start the orbit counting, it might be neccessary
#to correct the orbit number.
#
#Default is to check and correct if neccessary
#Add configuration to turn it off
start_orbnum = None
if self.check_and_set_correct_orbit_number:
try:
import pyorbital.orbital as orb
sat = orb.Orbital(
TLE_SATNAME.get(self.platform_name, self.platform_name), tle_file='')
start_orbnum = sat.get_orbit_number(self.starttime)
except ImportError:
LOG.warning("Failed importing pyorbital, " +
"cannot calculate orbit number")
except AttributeError:
LOG.warning("Failed calculating orbit number using pyorbital")
LOG.warning("platform name = " +
str(TLE_SATNAME.get(self.platform_name,
self.platform_name)) +
" " + str(self.platform_name))
LOG.info(
"Orbit number determined from pyorbital = " + str(start_orbnum))
try:
self.orbit = int(msg.data['orbit_number'])
except KeyError:
LOG.warning("No orbit_number in message! Set to none...")
self.orbit = None
if self.check_and_set_correct_orbit_number:
if start_orbnum and self.orbit != start_orbnum:
LOG.warning("Correcting orbit number: Orbit now = " +
str(start_orbnum) + " Before = " + str(self.orbit))
self.orbit = start_orbnum
else:
LOG.debug("Orbit number in message determined " +
"to be okay and not changed...")
if self.platform_name in SUPPORTED_METOP_SATELLITES:
metop_id = SATELLITE_NAME[self.platform_name].split('metop')[1]
self.satnum = METOP_NUMBER.get(metop_id, metop_id)
else:
self.satnum = SATELLITE_NAME[self.platform_name].strip('noaa')
year = self.starttime.year
keyname = str(self.platform_name)
LOG.debug("Keyname = " + str(keyname))
LOG.debug("Start: job register = " + str(self.job_register))
scene_id = self.create_scene_id(keyname)
#This means(from the create_scene_id) skipping this scene_is as it is already processed within a onfigured interval
#See create_scene_id for detailed info
if scene_id == True:
return True
scene_id = self.check_scene_id(scene_id)
LOG.debug("scene_id = " + str(scene_id))
if scene_id in self.level0files:
LOG.debug("Level-0 files = " + str(self.level0files[scene_id]))
else:
LOG.debug("scene_id = %s: No level-0 files yet...", str(scene_id))
self.level0_filename = urlobj.path
dummy, fname = os.path.split(self.level0_filename)
sensors = []
if not self.sensors_to_process(msg, sensors):
return True
if not self.available_sensors(msg, sensors, scene_id):
return True
#Need to do this here to add up all sensors for METOP
for (file,instr) in self.level0files[scene_id]:
if instr not in sensors:
LOG.debug("Adding instrumet to sensors list: {}".format(instr))
sensors.append(str(instr))
if not self.working_dir and self.use_dyn_work_dir:
try:
self.working_dir = tempfile.mkdtemp(dir=self.aapp_workdir)
except OSError:
self.working_dir = tempfile.mkdtemp()
finally:
LOG.info("Create new working dir...")
elif not self.working_dir:
self.working_dir = self.aapp_workdir
LOG.info("Working dir = " + str(self.working_dir))
# AAPP requires ENV variables
#my_env = os.environ.copy()
#my_env['AAPP_PREFIX'] = self.aapp_prefix
if self.use_dyn_work_dir:
self.my_env['DYN_WRK_DIR'] = self.working_dir
LOG.info("working dir: self.working_dir = " + str(self.working_dir))
LOG.info("Using AAPP_PREFIX:" + str(self.aapp_prefix))
for envkey in self.my_env:
LOG.debug("ENV: " + str(envkey) + " " + str(self.my_env[envkey]))
aapp_outdir_config_format = ""
if self.platform_name in SUPPORTED_SATELLITES:
LOG.info("This is a supported scene. Start the AAPP processing!")
# FIXME: LOG.info("Process the scene " +
# self.platform_name + self.orbit)
# TypeError: coercing to Unicode: need string or buffer, int
# found
LOG.info("Process the file " + str(self.level0_filename))
"""
COnfiguration for the various AAPP processing
This dict is passed to each module doing the actual processing.
The processing of each level is overridden by the available sensors retrived from the message
Meaning if processing of avhrr is set to True in the configuration but is not a mandatory sensor,
nor contained in the sensor list, then the processing av avhrr is overridden and set to False.
"""
process_config = {}
try:
process_config['platform'] = SATELLITE_NAME.get(self.platform_name,self.platform_name)
process_config['orbit_number'] = int(msg.data['orbit_number'])
process_config['working_directory'] = self.working_dir
process_config['process_amsua'] = False
process_config['process_amsub'] = False
process_config['process_hirs'] = False
process_config['process_avhrr'] = False
process_config['process_msu'] = False
process_config['process_dcs'] = False
process_config['process_ana'] = self.do_ana_correction
process_config['a_tovs'] = list("ATOVS")
process_config['hirs_file'] = STD_AAPP_OUTPUT_FILESNAMES['hirs_file']
process_config['amsua_file'] = STD_AAPP_OUTPUT_FILESNAMES['amsua_file']
process_config['amsub_file'] = STD_AAPP_OUTPUT_FILESNAMES['amsub_file']
process_config['avhrr_file'] = STD_AAPP_OUTPUT_FILESNAMES['avhrr_file']
process_config['calibration_location'] = "-c -l"
except KeyError as ke:
LOG.error("Could not initialize one or more process config parameters: {}.".format(ke))
return True #Meaning: can not process this.
print str(self.level0files[scene_id])
if 'metop' in process_config['platform']:
sensor_filename = {}
for (fname, instr) in self.level0files[scene_id]:
sensor_filename[instr] = fname
for instr in sensor_filename.keys():
print "instr: ",instr
if instr not in SENSOR_NAMES:
LOG.error("Sensor name mismatch! name = " + str(instr))
return True
if "avhrr/3" in sensor_filename:
process_config['input_avhrr_file'] = sensor_filename['avhrr/3']
if "amsu-a" in sensor_filename:
process_config['input_amsua_file'] = sensor_filename['amsu-a']
if "mhs" in sensor_filename:
process_config['input_amsub_file'] = sensor_filename['mhs']
if "hirs/4" in sensor_filename:
process_config['input_hirs_file'] = sensor_filename['hirs/4']
_platform = SATELLITE_NAME.get(self.platform_name,self.platform_name)
#DO tle
tle_proc_ok = True
if not do_tleing(self.starttime, _platform, self.working_dir, self.tle_indir):
LOG.warning("Tleing failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
tle_proc_ok = False
#DO tle satpos
satpos_proc_ok = True
if not do_tle_satpos(self.starttime, _platform, self.tle_indir):
LOG.warning("Tle satpos failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
satpos_proc_ok = False
#DO decom
decom_proc_ok = True
if not do_decommutation(process_config, sensors, self.starttime, self.level0_filename):
LOG.warning("The decommutaion failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
decom_proc_ok = False
return True #Meaning can not complete this and skip the rest of the processing
#DO HIRS
hirs_proc_ok = True
from do_hirs_calibration import do_hirs_calibration
if not do_hirs_calibration(process_config, self.starttime):
LOG.warning("Tle hirs calibration and location failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
hirs_proc_ok = False
#DO ATOVS
atovs_proc_ok = True
from do_atovs_calibration import do_atovs_calibration
if not do_atovs_calibration(process_config, self.starttime):
LOG.warning("The (A)TOVS calibration and location failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
atovs_proc_ok = False
#DO AVHRR
avhrr_proc_ok = True
from do_avhrr_calibration import do_avhrr_calibration
if not do_avhrr_calibration(process_config, self.starttime):
LOG.warning("The avhrr calibration and location failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
avhrr_proc_ok = False
#Do Preprocessing
atovpp_proc_ok = True
from do_atovpp_and_avh2hirs_processing import do_atovpp_and_avh2hirs_processing
if not do_atovpp_and_avh2hirs_processing(process_config, self.starttime):
LOG.warning("The preprocessing atovin, atopp and/or avh2hirs failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
atovpp_proc_ok = False
#DO IASI
iasi_proc_ok = True
from do_iasi_calibration import do_iasi_calibration
if not do_iasi_calibration(process_config, self.starttime):
LOG.warning("The iasi calibration and location failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
iasi_proc_ok = False
#DO ANA
ana_proc_ok = True
from do_ana_correction import do_ana_correction
if not do_ana_correction(process_config, self.starttime):
LOG.warning("The ana attitude correction failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
ana_proc_ok = False
#FIXME
#Need a general check to fail run of some of the AAPP scripts fails fatal.
#This is fallback choice if configured dir format fails
aapp_outdir_pps_format = os.path.join(self.aapp_outdir,"{0:}_{1:%Y%m%d}_{1:%H%M}_{2:05d}"\
.format(SATELLITE_NAME.get(self.platform_name, self.platform_name),
self.starttime,
int(msg.data['orbit_number'])))
#Make a copy of the msg.data so new needed variables can be added to this as needed
self.out_dir_config_data = msg.data
self.out_dir_config_data['satellite_name'] = SATELLITE_NAME.get(self.platform_name, self.platform_name)
self.out_dir_config_data['orbit_number'] = int(msg.data['orbit_number'])
try:
aapp_outdir_config_format = compose(self.aapp_outdir_format,self.out_dir_config_data)
except KeyError as ke:
LOG.warning("Unknown Key used in format: {}. Check spelling and/or availability.".format(self.aapp_outdir_format))
LOG.warning("Available keys are:")
for key in self.out_dir_config_data:
LOG.warning("{} = {}".format(key,self.out_dir_config_data[key]))
LOG.warning("Will continue with directory name format as used by SAFNWC PPS...")
aapp_outdir_config_format = aapp_outdir_pps_format
except ValueError as ve:
LOG.warning("value error : {}".format(ve))
LOG.warning("aapp_outdir_format : {}".format(self.aapp_outdir_format))
LOG.warning("out_dir_config_data: {}".format(self.out_dir_config_data))
aapp_outdir_config_format = os.path.join(self.aapp_outdir,aapp_outdir_config_format)
LOG.info("aapp outdir config format: " + aapp_outdir_config_format)
if not os.path.exists(aapp_outdir_config_format):
LOG.info("Create selected aapp_outdir: {}".format(aapp_outdir_config_format))
try:
os.mkdir(aapp_outdir_config_format)
except OSError as oe:
LOG.error("Could not create directory: {} with {}".format(aapp_outdir_config_format,oe))
else:
#FIXME Should we delete this directory if exists?
LOG.warning("The selected AAPP outdir for this processing exists already: " + aapp_outdir_config_format +". This can cause problems ....")
#Rename standard AAPP output file names to usefull ones
#and move files to final location.
from rename_aapp_filenames import rename_aapp_filenames
if not rename_aapp_filenames(process_config, self.starttime, aapp_outdir_config_format):
LOG.warning("The rename of standard aapp filenames to practical ones failed for some reason. It might be that the processing can continue")
LOG.warning("Please check the previous log carefully to see if this is an error you can accept.")
else:
LOG.warning("This satellite: {}, is not supported.".format(self.platform_name))
LOG.warning("Must be one of: {}".format("".join(SUPPORTED_SATELLITES)))
# Add to job register to avoid this to be run again
if keyname not in self.job_register.keys():
self.job_register[keyname] = []
self.job_register[keyname].append((self.starttime, self.endtime))
LOG.debug("End: job register = " + str(self.job_register))
# Block any future run on this scene for time_to_block_before_rerun
# (e.g. 10) minutes from now:
t__ = threading.Timer(self.locktime_before_rerun,
reset_job_registry, args=(self.job_register,
str(self.platform_name),
(self.starttime,
self.endtime)))
t__.start()
LOG.debug("After timer call: job register = " + str(self.job_register))
LOG.info("Ready with AAPP level-1 processing on NOAA scene: " + str(fname))
LOG.info("working dir: self.working_dir = " + str(self.working_dir))
globstr = os.path.join(self.aapp_outdir,
str(SATELLITE_NAME.get(self.platform_name, self.platform_name)) +
"_*" + str(int(msg.data['orbit_number'])))
globstr = aapp_outdir_config_format
LOG.debug("Glob string = " + str(globstr))
dirlist = glob(globstr)
if len(dirlist) != 1:
LOG.error("Cannot find output files in working dir!")
self.result_files = []
else:
self.result_files = get_aapp_lvl1_files(dirlist[0], msg.data['platform_name'])
LOG.info("Output files: " + str(self.result_files))
except:
LOG.exception("Failed in run...")
raise
return False
def aapp_rolling_runner(runner_config):
"""The AAPP runner. Listens and triggers processing on Metop/NOAA HRPT
level 0 files dispatched from reception."""
LOG.info("*** Start the NOAA/Metop HRPT AAPP runner:")
LOG.info("-" * 50)
os.environ["AAPP_PREFIX"] = runner_config['aapp_prefix']
aapp_atovs_conf = runner_config['aapp_prefix'] + "/ATOVS_ENV7"
status, returncode, out, err = run_shell_command("bash -c \"source {}\";env".format(aapp_atovs_conf))
if not status:
print "Command failed"
else:
for line in out.splitlines():
if line:
(key,_,value) = line.partition("=")
os.environ[key]=value
# init
aapp_proc = AappLvl1Processor(runner_config)
with posttroll.subscriber.Subscribe('',
aapp_proc.subscribe_topics,
True) as subscr:
with Publish('aapp_runner', 0) as publisher:
while True:
skip_rest = False
aapp_proc.initialise()
for msg in subscr.recv(timeout=90):
status = aapp_proc.run(msg)
if not status:
#skip_rest = True
break # end the loop and reinitialize!
if skip_rest:
skip_rest = False
continue
tobj = aapp_proc.starttime
LOG.info("Time used in sub-dir name: " +
str(tobj.strftime("%Y-%m-%d %H:%M")))
#Start internal distribution of data
#Copy data to destinations if configured
if runner_config['copy_data_directories']:
for dest_dir in runner_config['copy_data_directories'].split(','):
level1_files = aapp_proc.copy_aapplvl1_files(dest_dir, aapp_proc.out_dir_config_data)
publish_level1(publisher,
aapp_proc.servername,
aapp_proc.station,
aapp_proc.environment,
aapp_proc.publish_pps_format,
level1_files,
aapp_proc.orbit,
aapp_proc.starttime,
aapp_proc.endtime,
msg.data,
aapp_proc.publish_sift_format)
#move data to last destination if configured
if runner_config['move_data_directory']:
try:
move_dir = compose(runner_config['move_data_directory'],aapp_proc.out_dir_config_data)
except KeyError as ke:
LOG.warning("Unknown Key used in format: {}. Check spelling and/or availability.".format(runner_config['move_data_directory']))
LOG.warning("Available keys are:")
for key in aapp_proc-out_dir_config_data:
LOG.warning("{} = {}".format(key,aapp_proc.out_dir_config_data[key]))
LOG.error("Skipping this directory ... ")
continue
except TypeError as te:
LOG.error("Type Error: {}".format(te))
LOG.debug("Move into directory: {}".format(runner_config['move_data_directory']))
level1_files = aapp_proc.move_lvl1dir(runner_config['move_data_directory'])
publish_level1(publisher,
aapp_proc.servername,
aapp_proc.station,
aapp_proc.environment,
aapp_proc.publish_pps_format,
level1_files,
aapp_proc.orbit,
aapp_proc.starttime,
aapp_proc.endtime,
msg.data,
aapp_proc.publish_sift_format)
if False:
# Site specific processing
LOG.info("Station = " + str(aapp_proc.station))
if ('norrkoping' in aapp_proc.station or
'nkp' in aapp_proc.station):
if aapp_proc.platform_name.startswith('Metop'):
subd = create_pps_subdirname(tobj, aapp_proc.platform_name,
aapp_proc.orbit)
LOG.info("Create sub-directory for level-1 files: " +
str(subd))
level1_files = aapp_proc.smove_lvl1dir()
# level1_files = aapp_proc.spack_aapplvl1_files(subd)
else:
LOG.info("Move sub-directory with NOAA level-1 files")
LOG.debug(
"Orbit BEFORE call to move_lvl1dir: " + str(aapp_proc.orbit))
level1_files = aapp_proc.smove_lvl1dir()
LOG.debug(
"Orbit AFTER call to smove_lvl1dir: " + str(aapp_proc.orbit))
publish_level1(publisher,
aapp_proc.servername,
aapp_proc.station,
aapp_proc.environment,
aapp_proc.publish_pps_format,
level1_files,
aapp_proc.orbit,
aapp_proc.starttime,
aapp_proc.endtime,
msg.data)
elif (aapp_proc.station == 'helsinki' or
aapp_proc.station == 'kumpula'):
data_out_dir = ""
LOG.debug("aapp_proc.platform_name" +
aapp_proc.platform_name)
if (aapp_proc.platform_name.startswith('Metop') and
aapp_proc.metop_data_out_dir):
data_out_dir = aapp_proc.metop_data_out_dir
if (aapp_proc.platform_name.startswith('NOAA') and
aapp_proc.noaa_data_out_dir):
data_out_dir = aapp_proc.noaa_data_out_dir
LOG.debug("DATA_OUT_DIR:" + data_out_dir)
if aapp_proc.pps_out_dir:
subd = create_pps_subdirname(tobj,
aapp_proc.platform_name,
aapp_proc.orbit)
LOG.info("Created PPS sub-directory "
"for level-1 files: " + str(subd))
level1_files = aapp_proc.pack_aapplvl1_files(subd)
if level1_files is not None:
LOG.debug("PPS_OUT_DIR: level1_files: ")
for file_line in level1_files:
LOG.debug(str(file_line))
publish_level1(publisher,
aapp_proc.servername,
aapp_proc.station,
aapp_proc.environment,
aapp_proc.publish_pps_format,
level1_files,
aapp_proc.orbit,
aapp_proc.starttime,
aapp_proc.endtime,
msg.data)
else:
LOG.error("No files copied to " + subd)
# FIXED: If 'NoneType' object is not iterable
# = no files to publish!
if data_out_dir:
LOG.info("Copying level-1 files to " + data_out_dir)
level1_files = aapp_proc.copy_aapplvl1_files(
data_out_dir)
if level1_files is not None:
LOG.debug("aapp_proc.publish_l1_format:" +
aapp_proc.publish_l1_format)
LOG.debug("level1_files: ")
publish_level1(publisher,
aapp_proc.servername,
aapp_proc.station,
aapp_proc.environment,
aapp_proc.publish_l1_format,
level1_files,
aapp_proc.orbit,
aapp_proc.starttime,
aapp_proc.endtime,
msg.data)
else:
LOG.error("Nofile copied to " + data_out_dir)
#End site specific part.
if (aapp_proc.working_dir and
not aapp_proc.aapp_log_files_dir == ""):
LOG.info("Move AAPP log files")
aapp_proc.move_aapp_log_files()
LOG.info("Cleaning old log files...")
path_to_clean = aapp_proc.aapp_log_files_dir
older_than_days = int(aapp_proc.aapp_log_files_backup)
cleanup(older_than_days, path_to_clean)
LOG.info("Cleaning up directory " +
str(aapp_proc.working_dir))
# aapp_proc.cleanup_aapp_workdir()
elif aapp_proc.working_dir:
LOG.info("NOT Cleaning up directory %s",
aapp_proc.working_dir)
# aapp_proc.cleanup_aapp_workdir()
#LOG.info("Do the tleing now that aapp has finished...")
#do_tleing(aapp_proc.aapp_prefix,
# aapp_proc.tle_indir, aapp_proc.tle_outdir,
# aapp_proc.tle_script)
#LOG.info("...tleing done")
return
def publish_level1(publisher,
server,
env,
station,
publish_format,
result_files,
orbit, start_t, end_t, mda, publish_sift_format):
"""Publish the messages that AAPP lvl1 files are ready
"""
# Now publish:
for key in result_files:
resultfile = key
LOG.debug("File: " + str(os.path.basename(resultfile)))
filename = os.path.split(resultfile)[1]
to_send = mda.copy()
to_send['uri'] = ('ssh://%s%s' % (server, resultfile))
to_send['filename'] = filename
to_send['uid'] = filename
to_send['sensor'] = result_files[key]['sensor']
to_send['orbit_number'] = int(orbit)
to_send['format'] = publish_format
to_send['type'] = 'Binary'
to_send['data_processing_level'] = result_files[key]['level'].upper()
LOG.debug('level in message: ' + str(to_send['data_processing_level']))
to_send['start_time'], to_send['end_time'] = start_t, end_t
to_send['station'] = station
to_send['env'] = env
try:
publish_to = compose(publish_sift_format,to_send)
except KeyError as ke:
LOG.warning("Unknown Key used in format: {}. Check spelling and/or availability.".format(publish_sift_format))
LOG.warning("Available keys are:")
for key in to_send:
LOG.warning("{} = {}".format(key,to_send[key]))
LOG.error("Can not publish these data!")
return
except ValueError as ve:
LOG.error("Value Error: {}".format(ve))
return
LOG.debug("Publish to:{}".format(publish_to))
msg = Message(publish_to, "file", to_send).encode()
#msg = Message('/' + str(to_send['format']) + '/' +
# str(to_send['data_processing_level']) +
# '/' + station + '/' + env +
# '/polar/direct_readout/',
# "file", to_send).encode()
LOG.debug("sending: " + str(msg))
publisher.send(msg)
def get_aapp_lvl1_files(level1_dir, satid):
"""Get the aapp level-1 filenames for the NOAA/Metop direct readout
swath"""
if satid in SUPPORTED_METOP_SATELLITES:
lvl1_files = (glob(os.path.join(level1_dir, '*.l1b')) +
glob(os.path.join(level1_dir, '*.l1c')) +
glob(os.path.join(level1_dir, '*.l1d')))
else:
lvl1_files = (glob(os.path.join(level1_dir, "*%s*.l1b"
% (SATELLITE_NAME.get(satid, satid)))) +
glob(os.path.join(level1_dir, "*%s*.l1c"
% (SATELLITE_NAME.get(satid, satid)))) +
glob(os.path.join(level1_dir, "*%s*.l1d"
% (SATELLITE_NAME.get(satid, satid)))))
return lvl1_files
# FIXME:
# if MODE == 'SMHI_MODE':
# if satid in SUPPORTED_METOP_SATELLITES:
# # Level 1b/c data:
# lvl1_files = (glob(os.path.join(level1_dir, '*.l1b')) +
# glob(os.path.join(level1_dir, '*.l1c')) +
# glob(os.path.join(level1_dir, '*.l1d')))
# else:
# # SUBDIR example: noaa18_20140826_1108_47748
# LOG.debug(
# 'level1_dir = ' + str(level1_dir) + ' satid = ' + str(satid))
# # /home/users/satman/tmp/hrpt_noaa18_20150421_1425_51109.l1b
# matchstr = os.path.join(
# level1_dir, + '*' + SATELLITE_NAME.get(satid, satid) + '_????????_????_?????/') + '*'
# LOG.debug(matchstr)
# lvl1_files = glob(matchstr)
# LOG.debug('get_aapp_lvl1_files: ' + str(lvl1_files))
# if MODE == 'test':
# # AAPP convention
# LOG.debug('
# get_aapp_lvl1_files: ' + str(lvl1_files))
def create_pps_subdirname(obstime, satid, orbnum):
"""Generate the pps subdirectory name from the start observation time, ex.:
'noaa19_20120405_0037_02270'"""
return (SATELLITE_NAME.get(satid, satid) +
obstime.strftime('_%Y%m%d_%H%M_') +
'%.5d' % orbnum)
def spack_aapplvl1_files(aappfiles, base_dir, subdir, satnum):
"""Copy the AAPP lvl1 files to the sub-directory under the pps directory
structure"""
# aman => amsua
# ambn => amsub (satnum <= 17)
# ambn => mhs (satnum > 17)
# hrsn => hirs
# msun => msu
# Store the sensor name and the level corresponding to the file:
sensor_and_level = {}
name_converter = {'avhr': 'avhrr',
'aman': 'amsua',
'hrsn': 'hirs',
'msun': 'msu',
'hrpt': 'hrpt'
}
not_considered = ['dcsn', 'msun']
path = os.path.join(base_dir, subdir)
if not os.path.exists(path):
os.mkdir(path)
LOG.info("Number of AAPP lvl1 files: " + str(len(aappfiles)))
# retvl = []
for aapp_file in aappfiles:
fname = os.path.basename(aapp_file)
in_name, ext = fname.split('.')
if in_name in not_considered:
continue
if in_name == 'ambn':
instr = 'mhs'
try:
if int(satnum) <= 17:
instr = 'amsub'
except ValueError:
pass
firstname = instr + ext
level = ext.strip('l').upper()
elif in_name == 'hrpt':
firstname = name_converter.get(in_name)
instr = 'avhrr/3'
# Could also be 'avhrr'. Will anyhow be converted below...
level = '1B'
else:
instr = name_converter.get(in_name, in_name)
LOG.debug("Sensor = " + str(instr) + " from " + str(in_name))
firstname = instr + ext
level = ext.strip('l').upper()
newfilename = os.path.join(path, "%s_%s.%s" % (firstname,
subdir, ext))
LOG.info("Copy aapp-file to destination: " + newfilename)
shutil.copy(aapp_file, newfilename)
# retvl.append(newfilename)
sensor_and_level[newfilename] = {
'sensor': SENSOR_NAME_CONVERTER.get(instr, instr),
'level': level}
return sensor_and_level
# return retvl
def pack_aapplvl1_files(aappfiles, base_dir, subdir, satnum):
"""
Copy the AAPP lvl1 files to the sub-directory under the pps directory
structure
"""
# aman => amsua
# ambn => amsub (satnum <= 17)
# ambn => mhs (satnum > 17)
# hrsn => hirs
# msun => msu
# Store the sensor name and the level corresponding to the file:
sensor_and_level = {}
# name_converter = {'avhr': 'avhrr',
# 'aman': 'amsua',
# 'hrsn': 'hirs',
# 'msun': 'msu',
# 'hrpt': 'hrpt'
# }
# not_considered = ['dcsn', 'msun']
LOG.debug(" pack_aapplvl1_files subdir: " + subdir)
path = os.path.join(base_dir, subdir)
LOG.debug("path: " + path)
if not os.path.exists(path):
LOG.debug("mkdir")
os.makedirs(path)
# FIXME: OSError: [Errno 2] No such file or directory:
LOG.info("Number of AAPP lvl1 files: " + str(len(aappfiles)))
for aapp_file in aappfiles:
LOG.debug("Processing aapp_file: " + aapp_file)
# fname = os.path.basename(aapp_file)
filename = os.path.basename(aapp_file)
in_name, ext = filename.split('.')
#
# if in_name in not_considered:
# LOG.debug("File NOT consired: " + in_name)
# continue
if in_name.startswith('mhs'):
instr = 'mhs'
try:
if int(satnum) <= 17 and int(satnum) >= 15:
instr = 'amsub'
except ValueError:
pass
# firstname = instr + ext
# level = ext.strip('l')
elif in_name.startswith('hrpt'):
# firstname = name_converter.get(in_name)
instr = 'avhrr/3'
# Could also be 'avhrr'. Will anyhow be converted below...
# level = '1b'
elif in_name.startswith('hirs'):
instr = 'hirs'
elif in_name.startswith('amsua'):
instr = 'amsua'
elif in_name.startswith('amsub'):
instr = 'amsub'
else:
LOG.debug("File not consired: " + filename)
continue
# instr = name_converter.get(in_name, in_name)
# LOG.debug("Sensor = " + str(instr) + " from " + str(in_name))
# firstname = instr + ext
# level = ext.strip('l')
level = ext.strip('l')
# LOG.debug("Firstname " + firstname)
# newfilename = os.path.join(path, "%s_%s.%s" % (firstname,
# subdir, ext))
newfilename = os.path.join(path, filename)
LOG.info("Copy aapp-file to destination: " + newfilename)
shutil.copy(aapp_file, newfilename)
sensor_and_level[newfilename] = {
'sensor': SENSOR_NAME_CONVERTER.get(instr, instr),
'level': level}
return sensor_and_level
# AAPP output:
# METOP:
# hrpt_M01_20150428_1857_13540.l1b amsual1b_M01_20150428_1857_13540.l1b
# NOAA:
# hrpt_noaa18_20150428_1445_51208.l1b hirsl1b_noaa18_20150428_1445_51208.l1b
#
def copy_aapplvl1_files(aappfiles, output_data_basepath, satnum, out_dir_config_data):
"""
Copy AAPP lvl1 files to the sub-directories (level1b,
level1c, level1d)
Metop data under the metop_data_out
and in case of Noaa data under the directory noaa_data_out
Output format is defined in scripts AAPP_RUN_NOAA and AAPP_RUN_METOP
"""
LOG.info("Start copy level1 files to directory")
# Store the sensor name and the level corresponding to the file:
sensor_and_level = {}
# name_converter = {'avhr': 'avhrr',
# 'aman': 'amsua',
# 'hrsn': 'hirs',
# 'msun': 'msu',
# 'hrpt': 'hrpt'
# }
dir_name_converter = {'l1b': 'level1b',
'l1c': 'level1c',
'l1d': 'level1d'
}
# not_considered = ['dcsn', 'msun']
if len(aappfiles) == 0:
LOG.warning("No files in input directory to copy!")
return
errors = []
for aapp_file in aappfiles:
filename = os.path.basename(aapp_file)
in_name, ext = filename.split('.')
LOG.debug("in_name: " + in_name)
# if in_name in not_considered:
# LOG.debug("File NOT consired:" + in_name)
# continue
if in_name.startswith('mhs'):
instr = 'mhs'
try:
if int(satnum) <= 17 and int(satnum) >= 15:
instr = 'amsub'
except ValueError:
pass
# firstname = instr + ext
# level = ext.strip('l')
elif in_name.startswith('hrpt'):
# firstname = name_converter.get(in_name)
instr = 'avhrr/3'
# Could also be 'avhrr'. Will anyhow be converted below...
# level = '1b'
elif in_name.startswith('hirs'):
instr = 'hirs'
elif in_name.startswith('amsua'):
instr = 'amsua'
elif in_name.startswith('amsub'):
instr = 'amsub'
else:
LOG.debug("File not consired:" + filename)
continue
# level = '1c'
# instr = name_converter.get(in_name, in_name)
LOG.debug("Sensor = " + str(instr) + " from " + str(in_name))
# firstname = instr + ext
# level = ext.strip('l')
level = ext.strip('l')
# LOG.debug("Firstname " + firstname)
out_dir_config_data['level_of_data'] = dir_name_converter.get(ext)
try:
directory = compose(output_data_basepath, out_dir_config_data)
except KeyError as ke:
LOG.warning("Unknown Key used in format: {}. Check spelling and/or availability.".format(output_data_basepath))
LOG.warning("Available keys are:")
for key in out_dir_config_data:
LOG.warning("{} = {}".format(key,out_dir_config_data[key]))
LOG.error("Skipping this directory ... ")
return
LOG.debug("Copy into directory: {}".format(directory))
if not os.path.exists(directory):
LOG.info("Create new directory:" + directory)
try:
os.makedirs(directory)
except OSError as err:
# FIXME: error or fatal?
LOG.error("Couldn't make new directory " + directory + err)
return
else:
LOG.info("Directory already exists.")
destination_file = os.path.join(directory, filename)
LOG.debug("Destination_file: " + destination_file)
try:
shutil.copy(aapp_file, destination_file)
except (IOError, os.error) as err:
errors.append((aapp_file, destination_file, str(err)))
LOG.error(in_name + "copy failed %s", err.strerror)
# except Error as err:
# errors.extend(err.args[0])
if errors:
LOG.error("Too many errors!")
sensor_and_level[destination_file] = {
'sensor': SENSOR_NAME_CONVERTER.get(instr, instr),
'level': level}
LOG.debug("--------------------------")
for key in sensor_and_level:
LOG.debug("Filename: " + key)
LOG.info("All files copied.")
return sensor_and_level
def read_arguments():
"""
Read command line arguments
Return
name of the station, environment, config file and log file
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_file',
type=str,
dest='config_file',
default='',
help="The file containing " +
"configuration parameters e.g. aapp_runner.cfg")
parser.add_argument("-s", "--station",
help="Name of the station",
dest="station",
type=str,
default="unknown")
parser.add_argument("-e", "--environment",
dest="environment",
type=str,
help="Name of the environment (e.g. dev, test, oper)")
parser.add_argument("-v", "--verbose",
help="print debug messages too",
action="store_true")
parser.add_argument("-l", "--log", help="File to log to",
type=str,
default=None)
args = parser.parse_args()
if args.config_file == '':
print "Configuration file required! aapp_runner.py <file>"
sys.exit()
if args.station == '':
print "Station required! Use command-line switch -s <station>"
sys.exit()
else:
station = args.station.lower()
if not args.environment:
print ("Environment required! " +
"Use command-line switch -e <environment> e.g. de, test")
sys.exit()
else:
env = args.environment.lower()
if 'template' in args.config_file:
print "Template file given as master config, aborting!"
sys.exit()
return station, env, args.config_file, args.log
def remove(path):
"""
Remove the file or directory
"""
if os.path.isdir(path):
try:
os.rmdir(path)
LOG.debug("Removing dir: " + path)
except OSError:
LOG.warning("Unable to remove folder: " + path)
else:
try:
if os.path.exists(path):
LOG.debug("Removing file:" + path)
os.remove(path)
except OSError:
LOG.debug("Unable to remove file: " + path)
def cleanup(number_of_days, path):
"""
Removes files from the passed in path that are older than or equal
to number_of_days
"""
time_in_secs = _time() - number_of_days * 24 * 60 * 60
for root, dirs, files in os.walk(path, topdown=False):
LOG.debug("root dirs files: " + root)
for file_ in files:
full_path = os.path.join(root, file_)
stat = os.stat(full_path)
if stat.st_mtime <= time_in_secs:
LOG.debug("Removing: " + full_path)
remove(full_path)
if not os.listdir(root):
LOG.debug("Removing root: " + root)
remove(root)
def delete_old_dirs(dir_path, older_than_days):
"""
Delete old directories
"""
LOG.debug("delete_old_dirs in progress..." + older_than_days)
older_than = older_than_days * 86400 # convert days to seconds
time_now = _time()
LOG.debug("after: " + dir_path)
for path, folders, files in os.walk(dir_path):
LOG.debug("path, folders, files:" + path + folders + files)
for folder in folders[:]:
folder_path = os.path.join(path, folder)
if (time_now - os.path.getmtime(folder_path)) > older_than:
yield folder_path
LOG.debug("Deleting folder " + folder)
# folders.remove(folder)
if __name__ == "__main__":
# Read config file
#
# pylint: disable=C0103
# C0103: Invalid name "%s" (should match %s)
# Used when the name doesn't match the regular expression
# associated to its type (constant, variable, class...).
config = RawConfigParser()
(station_name, environment, config_filename, log_file) = read_arguments()
if not os.path.isfile(config_filename):
# config.read(config_filename)
# else:
print "ERROR: ", config_filename, ": No such config file."
sys.exit()
run_options = read_config_file_options(config_filename,
station_name, environment)
if not isinstance(run_options, dict):
print "Reading config file failed: ", config_filename
sys.exit()
# Logging
config.read(config_filename)
logging_cfg = dict(config.items("logging"))
print "----------------------------------------\n"
print logging_cfg
if log_file is not None:
try:
ndays = int(logging_cfg["log_rotation_days"])
ncount = int(logging_cfg["log_rotation_backup"])
except KeyError as err:
print err.args, \
"is missing. Please, check your config file",\
config_filename
raise IOError("Log file was given but doesn't " +
"know how to backup and rotate")
handler = handlers.TimedRotatingFileHandler(log_file,
when='midnight',
interval=ndays,
backupCount=ncount,
encoding=None,
delay=False,
utc=True)
handler.doRollover()
else:
handler = logging.StreamHandler(sys.stderr)
if (logging_cfg["logging_mode"] and
logging_cfg["logging_mode"] == "DEBUG"):
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
handler.setLevel(loglevel)
logging.getLogger('').setLevel(loglevel)
logging.getLogger('').addHandler(handler)
formatter = logging.Formatter(fmt=_DEFAULT_LOG_FORMAT,
datefmt=_DEFAULT_TIME_FORMAT)
handler.setFormatter(formatter)
logging.getLogger('posttroll').setLevel(logging.INFO)
LOG = logging.getLogger('aapp_runner')
if run_options['pps_out_dir'] == '':
LOG.warning("No pps_out_dir specified.")
for key in run_options:
print key, "=", run_options[key]
aapp_rolling_runner(run_options)
| TAlonglong/trollduction-test | aapp_runner/aapp_dr_runner.py | Python | gpl-3.0 | 72,713 |
# coding: utf-8
import time
import config_mqtt
class Asynch_result:
def __init__(self, correlation_id, requests, yield_to):
self.correlation_id = correlation_id
self._requests_need_result = requests
self.yield_to = yield_to
def get(self, timeout = config_mqtt.ASYNCH_RESULT_TIMEOUT):
# time.sleep(config_mqtt.ASYNCH_RESULT_WAIT_BEFORE_GET)
start_time = time.time()
request = self._requests_need_result.get(self.correlation_id)
if request:
while True:
current_time = time.time()
if request.get('is_replied'):
result = request.get('result')
# self._requests_need_result.pop(self.correlation_id)
return result
else:
if current_time - start_time > timeout: # timeout
# self._requests_need_result.pop(self.correlation_id)
raise Exception('Timeout: no result returned for request with correlation_id {}'.format(self.correlation_id))
else:
self.yield_to()
else:
raise Exception('No such request for request with correlation_id {}'.format(self.correlation_id))
| Wei1234c/Elastic_Network_of_Things_with_MQTT_and_MicroPython | codes/node/asynch_result.py | Python | gpl-3.0 | 1,312 |
"""Namegame, where you try to remember a team number starting with the last number of the previous played team"""
import asyncio
import gzip
import pickle
import traceback
from collections import OrderedDict
from functools import wraps
import discord
import tbapi
from discord.ext.commands import has_permissions
from fuzzywuzzy import fuzz
from dozer.bot import DOZER_LOGGER
from ._utils import *
from .. import db
SUPPORTED_MODES = ["frc", "ftc"]
def keep_alive(func):
"""Keeps the wrapped async function alive; functions must have self and ctx as args"""
@wraps(func)
async def wrapper(self, ctx, *args, **kwargs):
"""Wraps namegame"""
while True:
try:
return await func(self, ctx, *args, **kwargs)
except Exception as e:
# CancelledErrors are normal part of operation, ignore them
if isinstance(e, asyncio.CancelledError):
return
# panic to the console, and to chat
DOZER_LOGGER.error(traceback.format_exc())
await ctx.send(f"```Error in game loop:\n{e.__class__.__name__}: {e}```")
return wrapper
def game_is_running(func):
"""Check if there's an active game in a channel"""
@wraps(func)
async def wrapper(self, ctx, *args, **kwargs):
"""Wraps the checker"""
if ctx.channel.id not in self.games:
await ctx.send(f"There's not a game going on! Start one with `{ctx.prefix}ng startround`")
return
return await func(self, ctx, *args, **kwargs)
return wrapper
class NameGameSession():
"""NameGame session object"""
def __init__(self, mode):
self.running = True
self.pings_enabled = False
self.players = OrderedDict()
self.removed_players = []
self.picked = []
self.mode = mode
self.time = 60
self.vote_time = -1
self.number = 0
self.current_player = None
self.last_name = ""
self.last_team = 0
self.state_lock = None
self.turn_msg = None
self.turn_embed = None
self.turn_task = None
self.turn_count = 0
self.pass_tally = 0
self.fail_tally = 0
self.vote_correct = False
self.vote_player = None
self.vote_msg = None
self.vote_embed = None
self.vote_task = None
def create_embed(self, title="", description="", color=discord.Color.blurple(), extra_fields=[], start=False):
"""Creates an embed."""
v = "Starting " if start else "Current "
embed = discord.Embed()
embed.title = title
embed.description = description
embed.color = color
embed.add_field(name="Players", value=", ".join([p.display_name for p in self.players.keys()]) or "n/a")
embed.add_field(name=v + "Player", value=self.current_player)
embed.add_field(name=v + "Number", value=self.number or "Wildcard")
embed.add_field(name="Time Left", value=self.time)
for name, value in extra_fields:
embed.add_field(name=name, value=value)
return embed
def check_name(self, ctx, team, name):
"""Checks the name of the team"""
tba_parser = ctx.cog.tba_parser
ftc_teams = ctx.cog.ftc_teams
actual_name = ""
if self.mode == "frc":
# check for existence
team_data = tba_parser.get_team(team)
try:
getattr(team_data, "Errors")
except tbapi.InvalidKeyError:
"""There is no error, so do nothing"""
else:
return -1
actual_name = team_data.nickname
elif self.mode == "ftc":
if team not in ftc_teams:
return -1
actual_name = ftc_teams[team]
self.last_name = actual_name
self.last_team = team
return fuzz.ratio(actual_name.lower(), name.lower())
def next_turn(self):
"""Processes the next turn transition"""
self.turn_count += 1
self.pass_tally = 0
self.fail_tally = 0
self.time = 60
players = list(self.players.keys())
# set the current player to the next handle in the list
self.current_player = players[(players.index(self.current_player) + 1) % len(players)]
# self._idx = (self._idx + 1) % len(self.players)
def strike(self, player):
"""Gives players strikes"""
self.players[player] += 1
if self.players[player] >= 3 or len(self.players) == 1:
self.removed_players.append(player)
self.players.pop(player)
return True
return False
def check_win(self):
"""Checks if someone won the game"""
return len(self.players) == 1 and self.turn_count > 6
def get_picked(self):
"""Gets the picked teams"""
return ", ".join(map(str, sorted(self.picked))) or "No Picked Teams"
class NameGame(Cog):
"""Namegame commands"""
def __init__(self, bot):
super().__init__(bot)
with gzip.open("ftc_teams.pickle.gz") as f:
raw_teams = pickle.load(f)
self.ftc_teams = {team: data['seasons'][0]['name'] for (team, data) in raw_teams.items()}
self.games = {}
tba_config = bot.config['tba']
self.tba_parser = tbapi.TBAParser(tba_config['key'], cache=False)
@group(invoke_without_command=True)
async def ng(self, ctx):
"""Show info about and participate in a robotics team namegame.
Run the help command on each of the subcommands for more detailed help.
List of subcommands:
ng info
ng startround
ng addplayer
ng pick
ng drop
ng skip
ng gameinfo
"""
await self.info.callback(self, ctx)
ng.example_usage = """
`{prefix}ng` - show a description on how the robotics team namegame works.
"""
@ng.command()
@bot_has_permissions(embed_links=True)
async def info(self, ctx):
"""Show a description of the robotics team name game and how to play."""
game_embed = discord.Embed()
game_embed.color = discord.Color.magenta()
game_embed.title = "How to play"
game_embed.description = "This is a very simple little game where players will name a team number and name that " \
"starts with the last digit of the last named team. Some more specific rules are below:"
game_embed.add_field(name="No Double Picking", value="Only pick teams once.")
game_embed.add_field(name="Three Strikes, You're Out!",
value="You are only allowed three strikes, which are given by picking out of turn, "
"getting the team name wrong, picking a non existant team, being voted that your "
"pick is incorrect, not picking in time, or picking a already picked team.")
game_embed.add_field(name="No Cheatsy Doodles",
value="No looking up teams on TBA, TOA, VexDB, or other methods, that's just unfair.")
game_embed.add_field(name="Times up!",
value="You have 60 seconds to make a pick, or you get skipped and get a strike.")
game_embed.add_field(name="Shaking Things Up",
value="Any team number that ends in a 0 mean that the next player has a wildcard, "
"and can pick any legal team.")
game_embed.add_field(name="Pesky Commands", value=(f"To start a game, type `{ctx.prefix}ng startround` and "
f"mention the players you want to play with. "
f"You can add people with `{ctx.prefix}ng addplayer <user_pings>`. "
f"When it's your turn, type `{ctx.prefix}ng pick <team> "
f"<teamname>` to execute your pick. "
f"If you need to skip, typing `{ctx.prefix}ng skip` gives you"
f" a strike and skips your turn."
f"You can always do `{ctx.prefix}ng gameinfo` to get the "
f"current game status. "
f"If you ever need to quit, running `{ctx.prefix}ng drop` "
f"removes you from the game. "
f"For more detailed command help, run `{ctx.prefix}help ng.`"))
game_embed.add_field(name="Different Game Modes",
value=f"You can play the name game with FTC teams too! To start a game playing with "
f"FTC teams, run `{ctx.prefix}ng startround ftc`")
await ctx.send(embed=game_embed)
info.example_usage = """
`{prefix}ng help` - show a description on how the robotics team namegame works
"""
@ng.group(invoke_without_command=True)
async def config(self, ctx):
"""Configuration for namegame"""
await ctx.send(f"""`{ctx.prefix}ng config` reference:
`{ctx.prefix}ng config defaultmode [mode]` - set tbe default game mode used when startround is used with no arguments
`{ctx.prefix}ng config setchannel [channel_mention]` - set the channel that games are allowed to be run in
`{ctx.prefix}ng config clearsetchannel` - clear the set channel for games""")
@config.command()
@has_permissions(manage_guild=True)
async def defaultmode(self, ctx, mode: str = None):
"""Configuration of the default game mode (FRC, FTC, etc.)"""
query = await NameGameConfig.get_by(guild_id=ctx.guild.id)
config = query[0] if len(query) == 1 else None
if mode is None:
mode = SUPPORTED_MODES[0] if config is None else config.mode
await ctx.send(f"The current default game mode for this server is `{mode}`")
else:
if mode not in SUPPORTED_MODES:
await ctx.send(
f"Game mode `{mode}` not supported! Please pick a mode that is one of: `{', '.join(SUPPORTED_MODES)}`")
return
if config is None:
config = NameGameConfig(guild_id=ctx.guild.id, channel_id=None, mode=mode, pings_enabled=False)
await config.update_or_add()
else:
config.mode = mode
await config.update_or_add()
await ctx.send(f"Default game mode updated to `{mode}`")
@config.command()
@has_permissions(manage_guild=True)
async def setchannel(self, ctx, channel: discord.TextChannel = None):
"""Sets the namegame channel"""
query = await NameGameConfig.get_by(guild_id=ctx.guild.id)
config = query[0] if len(query) == 1 else None
if channel is None:
if config is None or config.channel_id is None:
await ctx.send(
f"There is no currently set namegame channel.\nTo set a channel, run `{ctx.prefix}ng config "
f"setchannel [channel_mention]`")
else:
await ctx.send(
f"The currently set namegame channel is {ctx.guild.get_channel(config.channel_id).mention}.\n"
f"To clear this, run `{ctx.prefix}ng config clearsetchannel`")
else:
if config is None:
config = NameGameConfig(guild_id=ctx.guild.id, channel_id=channel.id, mode=SUPPORTED_MODES[0],
pings_enabled=False)
else:
config.channel_id = channel.id
await config.update_or_add()
await ctx.send(f"Namegame channel set to {channel.mention}!")
@config.command()
@has_permissions(manage_guild=True)
async def clearsetchannel(self, ctx):
"""Clears the set namegame channel"""
query = await NameGameConfig.get_by(guild_id=ctx.guild.id)
config = query[0] if len(query) == 1 else None
if config is not None:
# update_or_add ignores attributes set to None. To set the column to None, we delete the record and insert
# a new one with channel set to None.
new_namegame_config = NameGameConfig(channel_id=None, guild_id=ctx.guild.id, pings_enabled=config.pings_enabled,
mode=config.mode)
await NameGameConfig.delete(guild_id=ctx.guild.id)
await new_namegame_config.update_or_add()
await ctx.send("Namegame channel cleared!")
@config.command()
@has_permissions(manage_guild=True)
async def setpings(self, ctx, enabled: bool):
"""Sets whether or not pings are enabled"""
query = await NameGameConfig.get_by(guild_id=ctx.guild.id)
config = query[0] if len(query) == 1 else None
if config is None:
config = NameGameConfig(guild_id=ctx.guild.id, channel_id=None, mode=SUPPORTED_MODES[0],
pings_enabled=int(enabled))
else:
config.pings_enabled = int(enabled)
await config.update_or_add()
await ctx.send(f"Pings enabled set to `{enabled}`!")
@config.command()
@has_permissions(manage_guild=True)
async def leaderboardedit(self, ctx, mode: str, user: discord.User, wins: int):
"""Edits the leaderboard"""
if mode not in SUPPORTED_MODES:
await ctx.send(
f"Game mode `{mode}` not supported! Please pick a mode that is one of: `{', '.join(SUPPORTED_MODES)}`")
return
query = await NameGameLeaderboard.get_by(user_id=user.id, game_mode=mode)
if not query:
await ctx.send("User not on leaderboard!")
return
record = query[0]
record.wins = wins
await record.update_or_add()
await ctx.send(f"{user.display_name}'s wins now set to: **{wins}**")
@config.command()
@has_permissions(manage_guild=True)
async def leaderboardclear(self, ctx, mode: str):
"""Clears the leaderboard"""
if mode not in SUPPORTED_MODES:
await ctx.send(
f"Game mode `{mode}` not supported! Please pick a mode that is one of: `{', '.join(SUPPORTED_MODES)}`")
return
await NameGameLeaderboard.delete(game_mode=mode)
await ctx.send(f"Cleared leaderboard for mode {mode}")
# TODO: configurable time limits, ping on event, etc
# MORE TODO:
"""
fix %ng help (done)
fix %ng startround (done)
fix the wrong team dialouge (????)
add pings
i hate bots
make %ng addplayer be rhetorical question (done)
figure out these stupid turn issues
"""
@ng.command()
@game_is_running
async def unheck(self, ctx):
"""
Emergency removal of a haywire session.
"""
game = self.games[ctx.channel.id]
game.running = False
try:
game.vote_task.cancel()
except Exception:
pass
try:
game.turn_task.cancel()
except Exception:
pass
self.games.pop(game)
@ng.command()
async def modes(self, ctx):
"""Returns a list of supported modes"""
await ctx.send(f"Supported game modes: `{', '.join(SUPPORTED_MODES)}`")
@ng.command()
async def startround(self, ctx, mode: str = None):
"""
Starts a namegame session.
One can select the robotics program by specifying one of "FRC" or "FTC".
"""
if mode is None or mode.lower() not in SUPPORTED_MODES:
config = await NameGameConfig.get_by(guild_id=ctx.guild.id)
mode = SUPPORTED_MODES[0] if len(config) == 0 else config[0].mode
await ctx.send(
f"Unspecified or invalid game mode, assuming game mode `{mode}`. For a full list of game modes, run "
f"`{ctx.prefix}ng modes`")
pings_enabled = False
config_query = await NameGameConfig.get_by(guild_id=ctx.guild.id)
if len(config_query) == 0:
config = None
else:
config = config_query[0]
if config is not None and config.channel_id is not None and config.channel_id != ctx.channel.id:
await ctx.send("Games cannot be started in this channel!")
return
pings_enabled = (config is not None and config.pings_enabled)
if ctx.channel.id in self.games:
await ctx.send("A game is currently going on! Wait till the players finish up to start again.")
return
game = NameGameSession(mode.lower())
game.state_lock = asyncio.Lock(loop=self.bot.loop)
game.pings_enabled = pings_enabled
game.players[ctx.author] = 0
game.current_player = ctx.author
for player in ctx.message.mentions:
if player == ctx.author:
continue
if player.bot:
await ctx.send(f"You can't invite bot users like {player.mention}!")
continue
game.players[player] = 0
await self.send_turn_embed(ctx, game,
title=f"{mode.upper()} Name Game",
description="A game has been started! The info about the game is as follows:",
color=discord.Color.green())
await self.notify(ctx, game, f"{game.current_player.mention}, start us off!")
# await ctx.send(f"{game.current_player.mention}, start us off!")
self.games[ctx.channel.id] = game
game.turn_task = self.bot.loop.create_task(self.game_turn_countdown(ctx, game))
startround.example_usage = """
`{prefix}ng startround frc` - start an FRC namegame session.
"""
@ng.command()
@game_is_running
async def addplayer(self, ctx):
"""Add players to the current game.
Only works if the user is currently playing."""
if ctx.channel.id not in self.games:
await ctx.send(f"There's not a game going on! Start one with `{ctx.prefix}ng startround`")
return
game = self.games[ctx.channel.id]
async with game.state_lock:
added = False
players = ctx.message.mentions or [ctx.author]
for player in ctx.message.mentions:
if player.bot:
await ctx.send(f"You can't invite bot users like {player.mention}!")
continue
if player in game.removed_players:
await ctx.send(f"{player.mention} is already out of the game and can't be added back in.")
elif player in game.players:
await ctx.send(f"{player.mention} is already in the game!")
game.players[player] = 0
added = True
if not added:
return
await ctx.send(embed=game.create_embed(
title="Players have been added to the game.",
description="See below for an updated player list.",
color=discord.Color.blurple()
))
addplayer.example_usage = """
`{prefix}ng addplayer @user1, @user2` - add user1 and user2 to the game.
"""
@ng.command()
@game_is_running
async def pick(self, ctx, team: int, *, name):
"""Attempt to pick a team in a game."""
game = self.games[ctx.channel.id]
async with game.state_lock:
if ctx.author != game.current_player:
if ctx.author in game.players:
await ctx.send(
"It's not your turn! You've been given a strike for this behaviour! Don't let it happen again...")
await self.strike(ctx, game, ctx.author)
else:
await ctx.send(
f"Let the people playing play! If you want to join, ask one of the people currently playing to "
f"run `{ctx.prefix}ng addplayer {ctx.author.display_name}`")
return
if game.time < 0:
await ctx.send("Vote on the current team before picking the next!")
return
if game.number != 0 and str(game.number) != str(team)[0]:
await self.skip_player(ctx, game, ctx.author,
"Your team doesn't start with the correct digit! Strike given, moving onto the next player!")
return
if team in game.picked:
await self.skip_player(ctx, game, ctx.author,
"That team has already been picked! You have been skipped and given a strike.")
return
ratio = game.check_name(ctx, team, name)
if ratio == -1:
# nonexistant team
await self.skip_player(ctx, game, ctx.author,
f"Team {team} doesn't exist! Strike given, moving onto the next player!")
return
if ratio > 60:
game.picked.append(team)
game.number = game.last_team % 10
game.next_turn()
game.vote_correct = True
game.vote_time = 20
game.vote_player = ctx.author
await self.send_turn_embed(ctx, game,
title="Team correct!",
description=f"Team {team} ({game.last_name}) was {ratio}% correct! Moving "
f"onto the next player as follows. Click the red X to override "
f"this decision.",
color=discord.Color.green(),
extra_fields=[("Voting Time", game.vote_time)])
await game.turn_msg.add_reaction('❌')
await self.notify(ctx, game, f"{game.current_player.mention}, you're up! Current number: {game.number}")
game.vote_msg = game.turn_msg
game.vote_embed = game.turn_embed
# EXTREMELY INCOMPLETE LOL
# (not anymore)
else:
game.time = -1
game.vote_time = 60
game.vote_player = ctx.author
game.vote_correct = False
vote_embed = discord.Embed()
vote_embed.color = discord.Color.gold()
vote_embed.title = "A vote is needed!"
vote_embed.description = "A player has made a choice with less than 50% similarity. The details of the " \
"pick are below. Click on the two emoji to vote if this is correct or not. A" \
" 50% majority of players is required to accept it, otherwise the player will " \
"get a strike."
vote_embed.add_field(name="Player", value=game.current_player.mention)
vote_embed.add_field(name="Team", value=team)
vote_embed.add_field(name="Said Name", value=name)
vote_embed.add_field(name="Actual Name", value=game.last_name)
vote_embed.add_field(name="Similarity", value=f"{ratio}%")
vote_embed.add_field(name="Voting Time", value=game.vote_time)
game.vote_embed = vote_embed
game.vote_msg = await ctx.send(embed=vote_embed)
await game.vote_msg.add_reaction('✅')
await game.vote_msg.add_reaction('❌')
game.vote_task = self.bot.loop.create_task(self.game_vote_countdown(ctx, game))
pick.example_usage = """
`{prefix}ng pick 254 poofy cheeses` - attempt to guess team 254 with a specified name of "poofy cheeses".
"""
@ng.command()
@game_is_running
async def drop(self, ctx):
"""Drops a player from the current game by eliminating them. Once dropped, they can no longer rejoin."""
game = self.games[ctx.channel.id]
async with game.state_lock:
if ctx.author not in game.players:
await ctx.send("You can't leave a game you're not in!")
return
game.players[ctx.author] = 2
if ctx.author == game.current_player:
await self.skip_player(ctx, game, ctx.author)
else:
await self.strike(ctx, game, ctx.author)
if game.running:
await self.display_info(ctx, game)
drop.example_usage = """
`{prefix}ng drop` - remove the initiator of the command from the current game
"""
@ng.command()
@game_is_running
async def skip(self, ctx):
"""Skips the current player if the player wishes to forfeit their turn."""
game = self.games[ctx.channel.id]
async with game.state_lock:
if ctx.author != game.current_player:
await ctx.send("It's not your turn! Only the current player can skip their turn!")
else:
await self.skip_player(ctx, game, ctx.author)
skip.example_usage = """
`{prefix}ng skip` - skip the current player's turn
"""
@ng.command()
@game_is_running
async def gameinfo(self, ctx):
"""Display info about the currently running game."""
game = self.games[ctx.channel.id]
await self.display_info(ctx, game)
gameinfo.example_usage = """
`{prefix}ng gameinfo` - display info about the currently running game.
"""
@ng.command()
async def leaderboard(self, ctx, mode: str = None):
"""Display top numbers of wins for the specified game mode"""
if mode is None:
config = await NameGameConfig.get_by(guild_id=ctx.guild.id)
mode = SUPPORTED_MODES[0] if len(config) == 0 else config[0].mode
if mode not in SUPPORTED_MODES:
await ctx.send(
f"Game mode `{mode}` not supported! Please pick a mode that is one of: `{', '.join(SUPPORTED_MODES)}`")
return
leaderboard = sorted(await NameGameLeaderboard.get_by(game_mode=mode),
key=lambda i: i.wins, reverse=True)[:10]
embed = discord.Embed(color=discord.Color.gold(), title=f"{mode.upper()} Name Game Leaderboard")
for idx, entry in enumerate(leaderboard, 1):
embed.add_field(name=f"#{idx}: {ctx.bot.get_user(entry.user_id).display_name}", value=entry.wins)
await ctx.send(embed=embed)
leaderboard.example_usage = """
`{prefix}ng leaderboard ftc` - display the namegame winning leaderboards for FTC.
"""
async def strike(self, ctx, game, player):
"""Gives a player a strike."""
if game.strike(player):
await ctx.send(f"Player {player.mention} is ELIMINATED!")
if len(game.players) == 0 or game.turn_count <= 6:
await ctx.send("Game disbanded, no winner called!")
game.running = False
if game.check_win():
# winning condition
winner = list(game.players.keys())[0]
query = await NameGameLeaderboard.get_by(user_id=winner.id, mode=game.mode)
if query:
record = query[0]
record.wins += 1
else:
record = NameGameLeaderboard(user_id=winner.id, wins=1, game_mode=game.mode)
await record.update_or_add()
win_embed = discord.Embed()
win_embed.color = discord.Color.gold()
win_embed.title = "We have a winner!"
win_embed.add_field(name="Winning Player", value=winner)
win_embed.add_field(name="Wins Total", value=record.wins)
win_embed.add_field(name="Teams Picked", value=game.get_picked())
await ctx.send(embed=win_embed)
game.running = False
if not game.running:
self.games.pop(ctx.channel.id)
async def display_info(self, ctx, game):
"""Displays info about the current game"""
info_embed = discord.Embed(title="Current Game Info", color=discord.Color.blue())
info_embed.add_field(name="Game Type", value=game.mode.upper())
info_embed.add_field(
name="Strikes",
value="\n".join([f"{player.display_name}: {strikes}" for player, strikes in game.players.items()])
)
info_embed.add_field(name="Current Player", value=game.current_player)
info_embed.add_field(name="Current Number", value=game.number or "Wildcard")
info_embed.add_field(name="Time Left", value=game.time)
info_embed.add_field(name="Teams Picked", value=game.get_picked())
await ctx.send(embed=info_embed)
async def skip_player(self, ctx, game, player, msg=None):
"""Skips a player"""
if msg is not None:
await ctx.send(msg)
game.vote_time = -1
game.next_turn()
await self.send_turn_embed(ctx, game,
title=f"Player {player.display_name} was skipped and now has {game.players[player]+1} strike(s)!",
color=discord.Color.red())
if player != game.current_player:
await self.notify(ctx, game, f"{game.current_player.mention}, you're up! Current number: {game.number}")
await self.strike(ctx, game, player)
# send an embed that starts a new turn
async def send_turn_embed(self, ctx, game, **kwargs):
"""Sends an embed that starts a new turn"""
game.turn_embed = game.create_embed(**kwargs)
game.turn_msg = await ctx.send(embed=game.turn_embed)
async def notify(self, ctx, game, msg):
"""Notifies people in the channel when it's their turn."""
if game.pings_enabled:
await ctx.send(msg)
@Cog.listener()
async def on_reaction_add(self, reaction, user):
"""When reactions are added, trigger the voting handler"""
if reaction.message.channel.id not in self.games:
return
game = self.games[reaction.message.channel.id]
async with game.state_lock:
if game.vote_msg is None or game.vote_time <= 0:
return
await self._on_reaction(game, reaction, user, 1)
# also handle voting logic
ctx = await self.bot.get_context(reaction.message)
if game.vote_correct:
if game.fail_tally > .5 * len(game.players):
await ctx.send(f"The decision was overruled! Player {game.vote_player.mention} is given a strike!")
await self.strike(ctx, game, game.vote_player)
game.vote_time = -1
else:
if game.pass_tally >= .5 * len(game.players):
game.picked.append(game.last_team)
game.number = game.last_team % 10
game.next_turn()
await self.send_turn_embed(ctx, game,
title="Team correct!",
description=f"Team {game.last_team} ({game.last_name}) was correct! "
f"Moving onto the next player as follows.",
color=discord.Color.green())
await self.notify(ctx, game,
f"{game.current_player.mention}, you're up! Current number: {game.number}")
game.vote_time = -1
elif game.fail_tally >= .5 * len(game.players):
await ctx.send(
f"Team {game.last_team} was guessed wrong! Strike given to the responsible player and player is skipped.")
await self.skip_player(ctx, game, game.current_player)
game.vote_time = -1
@Cog.listener()
async def on_reaction_remove(self, reaction, user):
"""When a reaction is removed, do vote handling"""
if reaction.message.channel.id not in self.games:
return
game = self.games[reaction.message.channel.id]
async with game.state_lock:
if game.vote_msg is None or game.vote_time <= 0:
return
await self._on_reaction(game, reaction, user, -1)
async def _on_reaction(self, game, reaction, user, inc):
"""Handles pass/fail reactions"""
if reaction.message.id == game.vote_msg.id and user in game.players:
if reaction.emoji == '❌':
game.fail_tally += inc
if reaction.emoji == '✅':
game.pass_tally += inc
return game
@keep_alive
async def game_turn_countdown(self, ctx, game):
"""Counts down the time remaining left in the turn"""
await asyncio.sleep(1)
async with game.state_lock:
if not game.running:
return
if game.time > 0:
game.time -= 1
game.turn_embed.set_field_at(3, name="Time Left", value=game.time)
if game.vote_time > 0 and game.vote_correct:
game.vote_time -= 1
game.turn_embed.set_field_at(4, name="Voting Time", value=game.vote_time)
if game.time % 5 == 0:
await game.turn_msg.edit(embed=game.turn_embed)
if game.time == 0:
await self.skip_player(ctx, game, game.current_player)
game.turn_task = self.bot.loop.create_task(self.game_turn_countdown(ctx, game))
@keep_alive
async def game_vote_countdown(self, ctx, game):
"""Counts down the time remaining left to vote"""
await asyncio.sleep(1)
async with game.state_lock:
if not (game.running and not game.vote_correct and game.vote_embed and game.vote_time > 0):
return
game.vote_time -= 1
game.vote_embed.set_field_at(5, name="Voting Time", value=game.vote_time)
if game.vote_time % 5 == 0:
await game.vote_msg.edit(embed=game.vote_embed)
if game.vote_time == 0:
await ctx.send(
"The vote did not reach 50% in favor or in failure, so the responsible player is given a strike and skipped.")
await self.skip_player(ctx, game, game.current_player)
game.vote_task = self.bot.loop.create_task(self.game_vote_countdown(ctx, game))
class NameGameConfig(db.DatabaseTable):
"""Configuration storage object"""
__tablename__ = 'namegame_config'
__uniques__ = 'guild_id'
@classmethod
async def initial_create(cls):
"""Create the table in the database"""
async with db.Pool.acquire() as conn:
await conn.execute(f"""
CREATE TABLE {cls.__tablename__} (
guild_id bigint PRIMARY KEY NOT NULL,
channel_id bigint null,
mode varchar NOT NULL,
pings_enabled bigint NOT NULL
)""")
def __init__(self, guild_id, mode, pings_enabled, channel_id=None):
super().__init__()
self.channel_id = channel_id
self.mode = mode
self.guild_id = guild_id
self.pings_enabled = pings_enabled
@classmethod
async def get_by(cls, **kwargs):
results = await super().get_by(**kwargs)
result_list = []
for result in results:
obj = NameGameConfig(guild_id=result.get("guild_id"), mode=result.get("mode"),
pings_enabled=result.get("pings_enabled"), channel_id=result.get("channel_id"))
result_list.append(obj)
return result_list
class NameGameLeaderboard(db.DatabaseTable):
"""Leaderboard storage object"""
__tablename__ = 'namegame_leaderboard'
__uniques__ = 'user_id'
@classmethod
async def initial_create(cls):
"""Create the table in the database"""
async with db.Pool.acquire() as conn:
await conn.execute(f"""
CREATE TABLE {cls.__tablename__} (
user_id bigint NOT NULL,
wins bigint NOT NULL,
game_mode varchar NOT NULL,
PRIMARY KEY (user_id, game_mode)
)""")
def __init__(self, user_id, game_mode, wins):
super().__init__()
self.game_mode = game_mode
self.user_id = user_id
self.wins = wins
@classmethod
async def get_by(cls, **kwargs):
results = await super().get_by(**kwargs)
result_list = []
for result in results:
obj = NameGameLeaderboard(user_id=result.get("user_id"), game_mode=result.get("game_mode"),
wins=result.get("wins"))
result_list.append(obj)
return result_list
def setup(bot):
"""Adds the namegame cog to the bot"""
bot.add_cog(NameGame(bot))
| FRCDiscord/Dozer | dozer/cogs/namegame.py | Python | gpl-3.0 | 37,558 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
require: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
require: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
require: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
# Create an ELB with load balanacer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
- load_balancer_port: 80
- instance_port: 80
tags: {}
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import time
import random
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
result = self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, StandardError) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, StandardError) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=str(e))
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
self.module.fail_json(msg=str(e))
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.iteritems():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
idle_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = ec2_connect(module)
grp_details = ec2.get_all_security_groups()
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| dustymabe/ansible-modules-core | cloud/amazon/ec2_elb_lb.py | Python | gpl-3.0 | 52,548 |
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class StorageData(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description=None, comment=None):
"""
StorageData - a model defined in Swagger
:param name: The name of this StorageData.
:type name: str
:param description: The description of this StorageData.
:type description: str
:param comment: The comment of this StorageData.
:type comment: str
"""
self.swagger_types = {
'name': str,
'description': str,
'comment': str
}
self.attribute_map = {
'name': 'name',
'description': 'description',
'comment': 'comment'
}
self._name = name
self._description = description
self._comment = comment
@classmethod
def from_dict(cls, dikt):
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The StorageData of this StorageData.
:rtype: StorageData
"""
return deserialize_model(dikt, cls)
@property
def name(self):
"""
Gets the name of this StorageData.
:return: The name of this StorageData.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this StorageData.
:param name: The name of this StorageData.
:type name: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this StorageData.
:return: The description of this StorageData.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this StorageData.
:param description: The description of this StorageData.
:type description: str
"""
self._description = description
@property
def comment(self):
"""
Gets the comment of this StorageData.
:return: The comment of this StorageData.
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""
Sets the comment of this StorageData.
:param comment: The comment of this StorageData.
:type comment: str
"""
self._comment = comment
| turdusmerula/kipartman | kipartbase/swagger_server/models/storage_data.py | Python | gpl-3.0 | 2,758 |
# -*- coding: utf-8 -*-
from __future__ import division
'''Test for checking variation of initial prestress force along a
post-tensioned member.
Data and rough calculation are taken from
Example 4.3 of the topic 4 of course "Prestressed Concrete Design
(SAB 4323) by Baderul Hisham Ahmad
ocw.utm.my
Problem statement:
Determine the initial prestress force distribution
along the beam if the anchorage draw-in is 5
mm. Given the following:
• Span = 20m, μ= 0.25 & K = 17 x 10-4 per metre
• fpi = 1239 N/ mm2 ; A ps = 2850 mm2
• e at both ends = 0
• e at mid-span = 558 mm
• Es = 195 kN/mm2
'''
__author__= "Ana Ortega (AO_O)"
__copyright__= "Copyright 2017, AO_O"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
import numpy as np
import math
from materials.prestressing import prestressed_concrete as presconc
from model.geometry import geom_utils
#Geometry
lBeam=20 #beam span [m]
#Parabola
eEnds=0 #eccentricity of cables at both ends of the beam
eMidspan=-0.558 #eccentricity of cables at midspan [m]
angl_Parab_XZ=math.pi/4 #angle between the vertical plane that contains the
#parabola and the plane XZ
#Material
Ep=195e9 #elastic modulus of prestressing steel [Pa]
#Prestressing process
mu=0.25 #coefficient of friction between the cables and their sheating
k=0.0017 #wobble coefficient per meter length of cable [1/m]
sigmap0max=1239e6 #Initial stress of cable [Pa]
Aps=2850e-6 #Area of cable [m2]
# Interpolation
n_points_rough=5 #number of points provided to the interpolation algorithm
n_points_fine=101 #number of points interpolated
#Anchorage slip
deltaL=5e-3 #anchorage draw-in (provided by manufacturer) [m]
#Rough results from direct calculation (formula):
lp_anch_lhe=419.3 #loss of prestress force at left-hand end anchorage [kN]
fl_frc=15.82 #loss of prestress due to friction [kN/m]
P_le=3111.9 #prestress force at left end [kN]
P_ms=3270.1 #prestress force at midspan [kN]
P_re=3214.8 #prestress force at right end [kN]
# XC model
#Tendon [m] definition, layout and friction losses
a,b,c=geom_utils.fit_parabola(x=np.array([0,lBeam/2.0,lBeam]), y=np.array([eEnds,eMidspan,eEnds]))
x_parab_rough,y_parab_rough,z_parab_rough=geom_utils.eq_points_parabola(0,lBeam,n_points_rough,a,b,c,angl_Parab_XZ)
tendon=presconc.PrestressTendon([])
tendon.roughCoordMtr=np.array([x_parab_rough,y_parab_rough,z_parab_rough])
#Interpolated 3D spline
tendon.pntsInterpTendon(n_points_fine,smoothness=1,kgrade=3)
# Losses of prestressing due to friction
lssFrict=tendon.getLossFriction(coefFric=mu,k=k,sigmaP0_extr1=sigmap0max,sigmaP0_extr2=0.0)
# Losses of prestressing due to anchorage slip (loss due to friction must be
# previously calculated
lssAnch=tendon.getLossAnchor(Ep=Ep,anc_slip_extr1=deltaL,anc_slip_extr2=0.0)
Laffected=tendon.projXYcoordZeroAnchLoss[0] # effective length of tendon
#affected by the anchorage slip in extremity 1 [m]
# Results
lssAnch_e1=lssAnch[0] #prestress loss due to anchorage draw-in extremity 1
lssAnch_md=lssAnch[int(len(lssAnch)/2)] #prestress loss due to anchorage draw-in midspan
lssAnch_e2=lssAnch[-1] #prestress loss due to anchorage draw-in extremity 2
lssFrict_e1=lssFrict[0] #prestress loss due to friction extremity 1
lssFrict_md=lssFrict[int(len(lssFrict)/2)] #prestress loss due to friction midspan
lssFrict_e2=lssFrict[-1] #prestress loss due to friction extremity 2
P_extr1=(sigmap0max-lssAnch_e1-lssFrict_e1)*Aps*1e-3
P_midspan=(sigmap0max-lssAnch_md-lssFrict_md)*Aps*1e-3
P_extr2=(sigmap0max-lssAnch_e2-lssFrict_e2)*Aps*1e-3
ratio1=abs(P_extr1-P_le)/P_le
ratio2=abs(P_midspan-P_ms)/P_ms
ratio3=abs(P_extr2-P_re)/P_re
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (ratio1<5.e-3 and ratio2<5.e-4 and ratio3<5.e-3):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| lcpt/xc | verif/tests/materials/prestressing/test_short_term_loss_prestress_01.py | Python | gpl-3.0 | 3,985 |
"""
This module contains various methods for checking the type of timelines and a
class that creates all kinds of timelines.
"""
import re
from functools import partial
from gettext import gettext as _
from turses.models import Timeline, is_DM
HOME_TIMELINE = 'home'
MENTIONS_TIMELINE = 'mentions'
FAVORITES_TIMELINE = 'favorites'
MESSAGES_TIMELINE = 'messages'
OWN_TWEETS_TIMELINE = 'own_tweets'
DEFAULT_TIMELINES = [
HOME_TIMELINE,
MENTIONS_TIMELINE,
FAVORITES_TIMELINE,
MESSAGES_TIMELINE,
OWN_TWEETS_TIMELINE,
]
def check_update_function_name(timeline, update_function_name=None):
if not isinstance(timeline, Timeline):
return False
update_function = timeline.update_function
if update_function is None:
return False
return update_function.__name__ == update_function_name
is_home_timeline = partial(check_update_function_name,
update_function_name='get_home_timeline')
is_mentions_timeline = partial(check_update_function_name,
update_function_name='get_mentions')
is_favorites_timeline = partial(check_update_function_name,
update_function_name='get_favorites')
is_own_timeline = partial(check_update_function_name,
update_function_name='get_own_timeline')
is_messages_timeline = partial(check_update_function_name,
update_function_name='get_direct_messages')
is_search_timeline = partial(check_update_function_name,
update_function_name='search')
is_user_timeline = partial(check_update_function_name,
update_function_name='get_user_timeline')
is_retweets_of_me_timeline = partial(check_update_function_name,
update_function_name='get_retweets_of_me')
is_thread_timeline = partial(check_update_function_name,
update_function_name='get_thread')
search_name_re = re.compile(r'^search:(?P<query>.+)$')
user_name_re = re.compile(r'^user:(?P<screen_name>[A-Za-z0-9_]+)$')
class TimelineFactory:
def __init__(self, api):
self.api = api
def __call__(self, timeline_string):
timeline = timeline_string.strip()
if timeline == HOME_TIMELINE:
return Timeline(name=_('tweets'),
update_function=self.api.get_home_timeline,)
elif timeline == MENTIONS_TIMELINE:
return Timeline(name=_('mentions'),
update_function=self.api.get_mentions,)
elif timeline == FAVORITES_TIMELINE:
return Timeline(name=_('favorites'),
update_function=self.api.get_favorites,)
elif timeline == MESSAGES_TIMELINE:
return Timeline(name=_('messages'),
update_function=self.api.get_direct_messages,)
elif timeline == OWN_TWEETS_TIMELINE:
return Timeline(name=_('me'),
update_function=self.api.get_own_timeline,)
elif timeline == 'retweets_of_me':
return Timeline(name=_('retweets of me'),
update_function=self.api.get_retweets_of_me,)
is_search = search_name_re.match(timeline)
if is_search:
query = is_search.groupdict()['query']
return Timeline(name=_('Search: %s' % query),
update_function=self.api.search,
update_function_args=query,)
is_user = user_name_re.match(timeline)
if is_user:
screen_name = is_user.groupdict()['screen_name']
timeline_name = _('@{screen_name}'.format(screen_name=screen_name))
return Timeline(name=timeline_name,
update_function=self.api.get_user_timeline,
update_function_args=screen_name,)
def valid_timeline_name(self, name):
if name in DEFAULT_TIMELINES:
return True
if name == 'retweets_of_me':
return True
# search
if search_name_re.match(name):
return True
# user
if user_name_re.match(name):
return True
return False
def thread(self, status):
"""
Create a timeline with the conversation to which `status` belongs.
`status` can be a regular status or a direct message.
"""
if is_DM(status):
participants = [status.sender_screen_name,
status.recipient_screen_name]
name = _('DM thread: %s' % ', '.join(participants))
update_function = self.api.get_message_thread
else:
participants = status.mentioned_usernames
author = status.authors_username
if author not in participants:
participants.insert(0, author)
name = _('thread: %s' % ', '.join(participants))
update_function = self.api.get_thread
return Timeline(name=name,
update_function=update_function,
update_function_args=status,)
| joedicastro/turses | turses/api/helpers.py | Python | gpl-3.0 | 5,201 |
import re
from tower import ugettext_lazy as _lazy
from tower import ugettext as _
from django import forms
from django.conf import settings
from django.forms.widgets import CheckboxSelectMultiple
from kuma.contentflagging.forms import ContentFlagForm
import kuma.wiki.content
from kuma.core.form_fields import StrippedCharField
from .constants import (SLUG_CLEANSING_REGEX, REVIEW_FLAG_TAGS,
LOCALIZATION_FLAG_TAGS, RESERVED_SLUGS)
from .models import (Document, Revision,
valid_slug_parent)
TITLE_REQUIRED = _lazy(u'Please provide a title.')
TITLE_SHORT = _lazy(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _lazy(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
TITLE_PLACEHOLDER = _lazy(u'Name Your Article')
SLUG_REQUIRED = _lazy(u'Please provide a slug.')
SLUG_INVALID = _lazy(u'The slug provided is not valid.')
SLUG_SHORT = _lazy(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _lazy(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _lazy(u'Please provide a summary.')
SUMMARY_SHORT = _lazy(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _lazy(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _lazy(u'Please provide content.')
CONTENT_SHORT = _lazy(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _lazy(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _lazy(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
SLUG_COLLIDES = _lazy(u'Another document with this slug already exists.')
OTHER_COLLIDES = _lazy(u'Another document with this metadata already exists.')
MIDAIR_COLLISION = _lazy(u'This document was modified while you were '
'editing it.')
MOVE_REQUIRED = _lazy(u"Changing this document's slug requires "
u"moving it and its children.")
class DocumentForm(forms.ModelForm):
"""Form to create/edit a document."""
title = StrippedCharField(min_length=1, max_length=255,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
category = forms.ChoiceField(choices=Document.CATEGORIES,
initial=10,
# Required for non-translations, which is
# enforced in Document.clean().
required=False,
label=_lazy(u'Category:'),
help_text=_lazy(u'Type of article'),
widget=forms.HiddenInput())
parent_topic = forms.ModelChoiceField(queryset=Document.objects.all(),
required=False,
label=_lazy(u'Parent:'))
locale = forms.CharField(widget=forms.HiddenInput())
def clean_slug(self):
slug = self.cleaned_data['slug']
if slug == '':
# Default to the title, if missing.
slug = self.cleaned_data['title']
# "?", " ", quote disallowed in slugs altogether
if '?' in slug or ' ' in slug or '"' in slug or "'" in slug:
raise forms.ValidationError(SLUG_INVALID)
# Pattern copied from urls.py
if not re.compile(r'^[^\$]+$').match(slug):
raise forms.ValidationError(SLUG_INVALID)
# Guard against slugs that match urlpatterns
for pat in RESERVED_SLUGS:
if re.compile(pat).match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
class Meta:
model = Document
fields = ('title', 'slug', 'category', 'locale')
def save(self, parent_doc, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, **kwargs)
doc.parent = parent_doc
if 'parent_topic' in self.cleaned_data:
doc.parent_topic = self.cleaned_data['parent_topic']
doc.save()
# not strictly necessary since we didn't change
# any m2m data since we instantiated the doc
self.save_m2m()
return doc
class RevisionForm(forms.ModelForm):
"""Form to create new revisions."""
title = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
tags = StrippedCharField(required=False,
label=_lazy(u'Tags:'))
keywords = StrippedCharField(required=False,
label=_lazy(u'Keywords:'),
help_text=_lazy(u'Affects search results'))
summary = StrippedCharField(required=False,
min_length=5, max_length=1000, widget=forms.Textarea(),
label=_lazy(u'Search result summary:'),
help_text=_lazy(u'Only displayed on search results page'),
error_messages={'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG})
content = StrippedCharField(
min_length=5, max_length=300000,
label=_lazy(u'Content:'),
widget=forms.Textarea(),
error_messages={'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG})
comment = StrippedCharField(required=False, label=_lazy(u'Comment:'))
review_tags = forms.MultipleChoiceField(
label=_("Tag this revision for review?"),
widget=CheckboxSelectMultiple, required=False,
choices=REVIEW_FLAG_TAGS)
localization_tags = forms.MultipleChoiceField(
label=_("Tag this revision for localization?"),
widget=CheckboxSelectMultiple, required=False,
choices=LOCALIZATION_FLAG_TAGS)
current_rev = forms.CharField(required=False,
widget=forms.HiddenInput())
class Meta(object):
model = Revision
fields = ('title', 'slug', 'tags', 'keywords', 'summary', 'content',
'comment', 'based_on', 'toc_depth',
'render_max_age')
def __init__(self, *args, **kwargs):
# Snag some optional kwargs and delete them before calling
# super-constructor.
for n in ('section_id', 'is_iframe_target'):
if n not in kwargs:
setattr(self, n, None)
else:
setattr(self, n, kwargs[n])
del kwargs[n]
super(RevisionForm, self).__init__(*args, **kwargs)
self.fields['based_on'].widget = forms.HiddenInput()
if self.instance and self.instance.pk:
# Ensure both title and slug are populated from parent document, if
# last revision didn't have them
if not self.instance.title:
self.initial['title'] = self.instance.document.title
if not self.instance.slug:
self.initial['slug'] = self.instance.document.slug
content = self.instance.content
if not self.instance.document.is_template:
tool = kuma.wiki.content.parse(content)
tool.injectSectionIDs()
if self.section_id:
tool.extractSection(self.section_id)
tool.filterEditorSafety()
content = tool.serialize()
self.initial['content'] = content
self.initial['review_tags'] = [x.name
for x in self.instance.review_tags.all()]
self.initial['localization_tags'] = [x.name
for x in self.instance.localization_tags.all()]
if self.section_id:
self.fields['toc_depth'].required = False
def _clean_collidable(self, name):
value = self.cleaned_data[name]
if self.is_iframe_target:
# Since these collidables can change the URL of the page, changes
# to them are ignored for an iframe submission
return getattr(self.instance.document, name)
error_message = {'slug': SLUG_COLLIDES}.get(name, OTHER_COLLIDES)
try:
existing_doc = Document.objects.get(
locale=self.instance.document.locale,
**{name: value})
if self.instance and self.instance.document:
if (not existing_doc.redirect_url() and
existing_doc.pk != self.instance.document.pk):
# There's another document with this value,
# and we're not a revision of it.
raise forms.ValidationError(error_message)
else:
# This document-and-revision doesn't exist yet, so there
# shouldn't be any collisions at all.
raise forms.ValidationError(error_message)
except Document.DoesNotExist:
# No existing document for this value, so we're good here.
pass
return value
def clean_slug(self):
# TODO: move this check somewhere else?
# edits can come in without a slug, so default to the current doc slug
if not self.cleaned_data['slug']:
existing_slug = self.instance.document.slug
self.cleaned_data['slug'] = self.instance.slug = existing_slug
cleaned_slug = self._clean_collidable('slug')
return cleaned_slug
def clean_content(self):
"""Validate the content, performing any section editing if necessary"""
content = self.cleaned_data['content']
# If we're editing a section, we need to replace the section content
# from the current revision.
if self.section_id and self.instance and self.instance.document:
# Make sure we start with content form the latest revision.
full_content = self.instance.document.current_revision.content
# Replace the section content with the form content.
tool = kuma.wiki.content.parse(full_content)
tool.replaceSection(self.section_id, content)
content = tool.serialize()
return content
def clean_current_rev(self):
"""If a current revision is supplied in the form, compare it against
what the document claims is the current revision. If there's a
difference, then an edit has occurred since the form was constructed
and we treat it as a mid-air collision."""
current_rev = self.cleaned_data.get('current_rev', None)
if not current_rev:
# If there's no current_rev, just bail.
return current_rev
try:
doc_current_rev = self.instance.document.current_revision.id
if unicode(current_rev) != unicode(doc_current_rev):
if (self.section_id and self.instance and
self.instance.document):
# This is a section edit. So, even though the revision has
# changed, it still might not be a collision if the section
# in particular hasn't changed.
orig_ct = (Revision.objects.get(pk=current_rev)
.get_section_content(self.section_id))
curr_ct = (self.instance.document.current_revision
.get_section_content(self.section_id))
if orig_ct != curr_ct:
# Oops. Looks like the section did actually get
# changed, so yeah this is a collision.
raise forms.ValidationError(MIDAIR_COLLISION)
return current_rev
else:
# No section edit, so this is a flat-out collision.
raise forms.ValidationError(MIDAIR_COLLISION)
except Document.DoesNotExist:
# If there's no document yet, just bail.
return current_rev
def save_section(self, creator, document, **kwargs):
"""Save a section edit."""
# This is separate because the logic is slightly different and
# may need to evolve over time; a section edit doesn't submit
# all the fields, and we need to account for that when we
# construct the new Revision.
old_rev = Document.objects.get(pk=self.instance.document.id).current_revision
new_rev = super(RevisionForm, self).save(commit=False, **kwargs)
new_rev.document = document
new_rev.creator = creator
new_rev.toc_depth = old_rev.toc_depth
new_rev.save()
new_rev.review_tags.set(*[t.name for t in
old_rev.review_tags.all()])
return new_rev
def save(self, creator, document, **kwargs):
"""Persist me, and return the saved Revision.
Take several other necessary pieces of data that aren't from the
form.
"""
if self.section_id and self.instance and \
self.instance.document:
return self.save_section(creator, document, **kwargs)
# Throws a TypeError if somebody passes in a commit kwarg:
new_rev = super(RevisionForm, self).save(commit=False, **kwargs)
new_rev.document = document
new_rev.creator = creator
new_rev.toc_depth = self.cleaned_data['toc_depth']
new_rev.save()
new_rev.review_tags.set(*self.cleaned_data['review_tags'])
new_rev.localization_tags.set(*self.cleaned_data['localization_tags'])
return new_rev
class RevisionValidationForm(RevisionForm):
"""Created primarily to disallow slashes in slugs during validation"""
def clean_slug(self):
is_valid = True
original = self.cleaned_data['slug']
# "/", "?", and " " disallowed in form input
if (u'' == original or
'/' in original or
'?' in original or
' ' in original):
is_valid = False
raise forms.ValidationError(SLUG_INVALID)
# Append parent slug data, call super, ensure still valid
self.cleaned_data['slug'] = self.data['slug'] = (self.parent_slug +
'/' +
original)
is_valid = (is_valid and
super(RevisionValidationForm, self).clean_slug())
# Set the slug back to original
# if not is_valid:
self.cleaned_data['slug'] = self.data['slug'] = original
return self.cleaned_data['slug']
class TreeMoveForm(forms.Form):
title = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'New slug:'),
help_text=_lazy(u'New article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
locale = StrippedCharField(min_length=2, max_length=5,
widget=forms.HiddenInput())
def clean_slug(self):
# We only want the slug here; inputting a full URL would lead
# to disaster.
if '://' in self.cleaned_data['slug']:
raise forms.ValidationError('Please enter only the slug to move '
'to, not the full URL.')
# Removes leading slash and {locale/docs/} if necessary
# IMPORTANT: This exact same regex is used on the client side, so
# update both if doing so
self.cleaned_data['slug'] = re.sub(re.compile(SLUG_CLEANSING_REGEX),
'', self.cleaned_data['slug'])
return self.cleaned_data['slug']
def clean(self):
cleaned_data = super(TreeMoveForm, self).clean()
if set(['slug', 'locale']).issubset(cleaned_data):
slug, locale = cleaned_data['slug'], cleaned_data['locale']
try:
valid_slug_parent(slug, locale)
except Exception, e:
raise forms.ValidationError(e.args[0])
return cleaned_data
class DocumentDeletionForm(forms.Form):
reason = forms.CharField(widget=forms.Textarea(attrs={'autofocus': 'true'}))
class DocumentContentFlagForm(ContentFlagForm):
flag_type = forms.ChoiceField(
choices=settings.WIKI_FLAG_REASONS,
widget=forms.RadioSelect)
| mastizada/kuma | kuma/wiki/forms.py | Python | mpl-2.0 | 19,906 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez [email protected]
import logging
import requests
import sys
from datetime import datetime
from configlib import getConfig, OptionParser
from logging.handlers import SysLogHandler
from pymongo import MongoClient
import os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib'))
from utilities.toUTC import toUTC
from elasticsearch_client import ElasticsearchClient
from query_models import SearchQuery, TermMatch
logger = logging.getLogger(sys.argv[0])
def loggerTimeStamp(self, record, datefmt=None):
return toUTC(datetime.now()).isoformat()
def initLogger():
logger.level = logging.INFO
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter.formatTime = loggerTimeStamp
if options.output == 'syslog':
logger.addHandler(
SysLogHandler(
address=(options.sysloghostname,
options.syslogport)))
else:
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
def getFrontendStats(es):
search_query = SearchQuery(minutes=15)
search_query.add_must([
TermMatch('_type', 'mozdefhealth'),
TermMatch('category', 'mozdef'),
TermMatch('tags', 'latest'),
])
results = search_query.execute(es, indices=['events'])
return results['hits']
def writeFrontendStats(data, mongo):
# Empty everything before
mongo.healthfrontend.remove({})
for host in data:
for key in host['_source']['details'].keys():
# remove unwanted data
if '.' in key:
del host['_source']['details'][key]
mongo.healthfrontend.insert(host['_source'])
def writeEsClusterStats(data, mongo):
# Empty everything before
mongo.healthescluster.remove({})
mongo.healthescluster.insert(data)
def getEsNodesStats():
r = requests.get(options.esservers[0] + '/_nodes/stats/os,jvm,fs')
jsonobj = r.json()
results = []
for nodeid in jsonobj['nodes']:
# Skip non masters and data nodes since it won't have full stats
if ('attributes' in jsonobj['nodes'][nodeid] and
jsonobj['nodes'][nodeid]['attributes']['master'] == 'false' and
jsonobj['nodes'][nodeid]['attributes']['data'] == 'false'):
continue
results.append({
'hostname': jsonobj['nodes'][nodeid]['host'],
'disk_free': jsonobj['nodes'][nodeid]['fs']['total']['free_in_bytes'] / (1024 * 1024 * 1024),
'disk_total': jsonobj['nodes'][nodeid]['fs']['total']['total_in_bytes'] / (1024 * 1024 * 1024),
'mem_heap_per': jsonobj['nodes'][nodeid]['jvm']['mem']['heap_used_percent'],
'cpu_usage': jsonobj['nodes'][nodeid]['os']['cpu_percent'],
'load': jsonobj['nodes'][nodeid]['os']['load_average']
})
return results
def writeEsNodesStats(data, mongo):
# Empty everything before
mongo.healthesnodes.remove({})
for nodedata in data:
mongo.healthesnodes.insert(nodedata)
def getEsHotThreads():
r = requests.get(options.esservers[0] + '/_nodes/hot_threads')
results = []
for line in r.text.split('\n'):
if 'cpu usage' in line:
results.append(line)
return results
def writeEsHotThreads(data, mongo):
# Empty everything before
mongo.healtheshotthreads.remove({})
for line in data:
mongo.healtheshotthreads.insert({'line': line})
def main():
logger.debug('starting')
logger.debug(options)
try:
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
client = MongoClient(options.mongohost, options.mongoport)
# use meteor db
mongo = client.meteor
writeFrontendStats(getFrontendStats(es), mongo)
writeEsClusterStats(es.get_cluster_health(), mongo)
writeEsNodesStats(getEsNodesStats(), mongo)
writeEsHotThreads(getEsHotThreads(), mongo)
except Exception as e:
logger.error("Exception %r sending health to mongo" % e)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig('output', 'stdout', options.configfile)
# syslog hostname
options.sysloghostname = getConfig('sysloghostname', 'localhost',
options.configfile)
# syslog port
options.syslogport = getConfig('syslogport', 514, options.configfile)
# elastic search server settings
options.esservers = list(getConfig('esservers', 'http://localhost:9200',
options.configfile).split(','))
options.mongohost = getConfig('mongohost', 'localhost', options.configfile)
options.mongoport = getConfig('mongoport', 3001, options.configfile)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
"-c",
dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
initLogger()
main()
| ameihm0912/MozDef | cron/healthToMongo.py | Python | mpl-2.0 | 5,358 |
# encoding: utf-8
#
#
# self Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with self file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import Mapping
import mo_dots as dot
from mo_math import SUM
from pyLibrary.queries.containers import Container
from pyLibrary.queries.domains import Domain, ALGEBRAIC, KNOWN
from mo_dots import Null, coalesce, join_field, split_field, Data
from mo_dots.lists import FlatList
from mo_times.timer import Timer
from mo_logs import Log
from mo_dots import wrap, listwrap
DEFAULT_QUERY_LIMIT = 20
class Dimension(Container):
__slots__ = ["name", "full_name", "where", "type", "limit", "index", "parent", "edges", "partitions", "fields"]
def __init__(self, dim, parent, jx):
dim = wrap(dim)
self.name = dim.name
self.parent = coalesce(parent)
self.full_name = join_field(split_field(self.parent.full_name)+[self.name])
self.edges = None # FOR NOW
dot.set_default(self, dim)
self.where = dim.where
self.type = coalesce(dim.type, "set")
self.limit = coalesce(dim.limit, DEFAULT_QUERY_LIMIT)
self.index = coalesce(dim.index, coalesce(parent, Null).index, jx.settings.index)
if not self.index:
Log.error("Expecting an index name")
# ALLOW ACCESS TO SUB-PART BY NAME (IF ONLY THERE IS NO NAME COLLISION)
self.edges = Data()
for e in listwrap(dim.edges):
new_e = Dimension(e, self, jx)
self.edges[new_e.full_name] = new_e
self.partitions = wrap(coalesce(dim.partitions, []))
parse_partition(self)
fields = coalesce(dim.field, dim.fields)
if not fields:
return # NO FIELDS TO SEARCH
elif isinstance(fields, Mapping):
self.fields = wrap(fields)
edges = wrap([{"name": k, "value": v, "allowNulls": False} for k, v in self.fields.items()])
else:
self.fields = listwrap(fields)
edges = wrap([{"name": f, "value": f, "index": i, "allowNulls": False} for i, f in enumerate(self.fields)])
if dim.partitions:
return # ALREADY HAVE PARTS
if self.type not in KNOWN - ALGEBRAIC:
return # PARTS OR TOO FUZZY (OR TOO NUMEROUS) TO FETCH
jx.get_columns()
with Timer("Get parts of {{name}}", {"name": self.name}):
parts = jx.query({
"from": self.index,
"select": {"name": "count", "aggregate": "count"},
"edges": edges,
"where": self.where,
"limit": self.limit
})
Log.note("{{name}} has {{num}} parts", name= self.name, num= len(parts))
d = parts.edges[0].domain
if dim.path:
if len(edges) > 1:
Log.error("Not supported yet")
# EACH TERM RETURNED IS A PATH INTO A PARTITION TREE
temp = Data(partitions=[])
for i, count in enumerate(parts):
a = dim.path(d.getEnd(d.partitions[i]))
if not isinstance(a, list):
Log.error("The path function on " + dim.name + " must return an ARRAY of parts")
addParts(
temp,
dim.path(d.getEnd(d.partitions[i])),
count,
0
)
self.value = coalesce(dim.value, "name")
self.partitions = temp.partitions
elif isinstance(fields, Mapping):
self.value = "name" # USE THE "name" ATTRIBUTE OF PARTS
partitions = FlatList()
for g, p in parts.groupby(edges):
if p:
partitions.append({
"value": g,
"where": {"and": [
{"term": {e.value: g[e.name]}}
for e in edges
]},
"count": int(p)
})
self.partitions = partitions
elif len(edges) == 1:
self.value = "name" # USE THE "name" ATTRIBUTE OF PARTS
# SIMPLE LIST OF PARTS RETURNED, BE SURE TO INTERRELATE THEM
self.partitions = wrap([
{
"name": str(d.partitions[i].name), # CONVERT TO STRING
"value": d.getEnd(d.partitions[i]),
"where": {"term": {edges[0].value: d.partitions[i].value}},
"count": count
}
for i, count in enumerate(parts)
])
self.order = {p.value: i for i, p in enumerate(self.partitions)}
elif len(edges) == 2:
self.value = "name" # USE THE "name" ATTRIBUTE OF PARTS
d2 = parts.edges[1].domain
# SIMPLE LIST OF PARTS RETURNED, BE SURE TO INTERRELATE THEM
array = parts.data.values()[0].cube # DIG DEEP INTO RESULT (ASSUME SINGLE VALUE CUBE, WITH NULL AT END)
def edges2value(*values):
if isinstance(fields, Mapping):
output = Data()
for e, v in zip(edges, values):
output[e.name] = v
return output
else:
return tuple(values)
self.partitions = wrap([
{
"name": str(d.partitions[i].name), # CONVERT TO STRING
"value": d.getEnd(d.partitions[i]),
"where": {"term": {edges[0].value: d.partitions[i].value}},
"count": SUM(subcube),
"partitions": [
{
"name": str(d2.partitions[j].name), # CONVERT TO STRING
"value": edges2value(d.getEnd(d.partitions[i]), d2.getEnd(d2.partitions[j])),
"where": {"and": [
{"term": {edges[0].value: d.partitions[i].value}},
{"term": {edges[1].value: d2.partitions[j].value}}
]},
"count": count2
}
for j, count2 in enumerate(subcube)
if count2 > 0 # ONLY INCLUDE PROPERTIES THAT EXIST
]
}
for i, subcube in enumerate(array)
])
else:
Log.error("Not supported")
parse_partition(self) # RELATE THE PARTS TO THE PARENTS
def __getitem__(self, item):
return self.__getattr__(item)
def __getattr__(self, key):
"""
RETURN CHILD EDGE OR PARTITION BY NAME
"""
#TODO: IGNORE THE STANDARD DIMENSION PROPERTIES TO AVOID ACCIDENTAL SELECTION OF EDGE OR PART
if key in Dimension.__slots__:
return None
e = self.edges[key]
if e:
return e
for p in self.partitions:
if p.name == key:
return p
return Null
def getDomain(self, **kwargs):
# kwargs.depth IS MEANT TO REACH INTO SUB-PARTITIONS
kwargs = wrap(kwargs)
kwargs.depth = coalesce(kwargs.depth, len(self.fields)-1 if isinstance(self.fields, list) else None)
if not self.partitions and self.edges:
# USE EACH EDGE AS A PARTITION, BUT isFacet==True SO IT ALLOWS THE OVERLAP
partitions = [
{
"name": v.name,
"value": v.name,
"where": v.where,
"style": v.style,
"weight": v.weight # YO! WHAT DO WE *NOT* COPY?
}
for i, v in enumerate(self.edges)
if i < coalesce(self.limit, DEFAULT_QUERY_LIMIT) and v.where
]
self.isFacet = True
elif kwargs.depth == None: # ASSUME self.fields IS A dict
partitions = FlatList()
for i, part in enumerate(self.partitions):
if i >= coalesce(self.limit, DEFAULT_QUERY_LIMIT):
break
partitions.append({
"name":part.name,
"value":part.value,
"where":part.where,
"style":coalesce(part.style, part.parent.style),
"weight":part.weight # YO! WHAT DO WE *NOT* COPY?
})
elif kwargs.depth == 0:
partitions = [
{
"name":v.name,
"value":v.value,
"where":v.where,
"style":v.style,
"weight":v.weight # YO! WHAT DO WE *NOT* COPY?
}
for i, v in enumerate(self.partitions)
if i < coalesce(self.limit, DEFAULT_QUERY_LIMIT)]
elif kwargs.depth == 1:
partitions = FlatList()
rownum = 0
for i, part in enumerate(self.partitions):
if i >= coalesce(self.limit, DEFAULT_QUERY_LIMIT):
continue
rownum += 1
try:
for j, subpart in enumerate(part.partitions):
partitions.append({
"name":join_field(split_field(subpart.parent.name) + [subpart.name]),
"value":subpart.value,
"where":subpart.where,
"style":coalesce(subpart.style, subpart.parent.style),
"weight":subpart.weight # YO! WHAT DO WE *NOT* COPY?
})
except Exception as e:
Log.error("", e)
else:
Log.error("deeper than 2 is not supported yet")
return Domain(
type=self.type,
name=self.name,
partitions=wrap(partitions),
min=self.min,
max=self.max,
interval=self.interval,
# THE COMPLICATION IS THAT SOMETIMES WE WANT SIMPLE PARTITIONS, LIKE
# STRINGS, DATES, OR NUMBERS. OTHER TIMES WE WANT PARTITION OBJECTS
# WITH NAME, VALUE, AND OTHER MARKUP.
# USUALLY A "set" IS MEANT TO BE SIMPLE, BUT THE end() FUNCTION IS
# OVERRIDES EVERYTHING AND IS EXPLICIT. - NOT A GOOD SOLUTION BECAUSE
# end() IS USED BOTH TO INDICATE THE QUERY PARTITIONS *AND* DISPLAY
# COORDINATES ON CHARTS
# PLEASE SPLIT end() INTO value() (replacing the string value) AND
# label() (for presentation)
value="name" if not self.value and self.partitions else self.value,
key="value",
label=coalesce(self.label, (self.type == "set" and self.name)),
end=coalesce(self.end, (self.type == "set" and self.name)),
isFacet=self.isFacet,
dimension=self
)
def getSelect(self, **kwargs):
if self.fields:
if len(self.fields) == 1:
return Data(
name=self.full_name,
value=self.fields[0],
aggregate="none"
)
else:
return Data(
name=self.full_name,
value=self.fields,
aggregate="none"
)
domain = self.getDomain(**kwargs)
if not domain.getKey:
Log.error("Should not happen")
if not domain.NULL:
Log.error("Should not happen")
return Data(
name=self.full_name,
domain=domain,
aggregate="none"
)
def addParts(parentPart, childPath, count, index):
"""
BUILD A hierarchy BY REPEATEDLY CALLING self METHOD WITH VARIOUS childPaths
count IS THE NUMBER FOUND FOR self PATH
"""
if index == None:
index = 0
if index == len(childPath):
return
c = childPath[index]
parentPart.count = coalesce(parentPart.count, 0) + count
if parentPart.partitions == None:
parentPart.partitions = FlatList()
for i, part in enumerate(parentPart.partitions):
if part.name == c.name:
addParts(part, childPath, count, index + 1)
return
parentPart.partitions.append(c)
addParts(c, childPath, count, index + 1)
def parse_partition(part):
for p in part.partitions:
if part.index:
p.index = part.index # COPY INDEX DOWN
parse_partition(p)
p.value = coalesce(p.value, p.name)
p.parent = part
if not part.where:
if len(part.partitions) > 100:
Log.error("Must define an where on {{name}} there are too many partitions ({{num_parts}})",
name= part.name,
num_parts= len(part.partitions))
# DEFAULT where IS THE UNION OF ALL CHILD FILTERS
if part.partitions:
part.where = {"or": part.partitions.where}
| klahnakoski/esReplicate | pyLibrary/queries/dimensions.py | Python | mpl-2.0 | 13,303 |
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Reads board information from boards/BOARDNAME.py - used by build_board_docs,
# build_pininfo, and build_platform_config
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
silent = os.getenv("SILENT");
if silent:
class Discarder(object):
def write(self, text):
pass # do nothing
# now discard everything coming out of stdout
sys.stdout = Discarder()
# http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if "check_output" not in dir( subprocess ):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
# Scans files for comments of the form /*JSON......*/
#
# Comments look like:
#
#/*JSON{ "type":"staticmethod|staticproperty|constructor|method|property|function|variable|class|library|idle|init|kill",
# // class = built-in class that does not require instantiation
# // library = built-in class that needs require('classname')
# // idle = function to run on idle regardless
# // init = function to run on initialisation
# // kill = function to run on deinitialisation
# "class" : "Double", "name" : "doubleToIntBits",
# "needs_parentName":true, // optional - if for a method, this makes the first 2 args parent+parentName (not just parent)
# "generate_full|generate|wrap" : "*(JsVarInt*)&x",
# "description" : " Convert the floating point value given into an integer representing the bits contained in it",
# "params" : [ [ "x" , "float|int|int32|bool|pin|JsVar|JsVarName|JsVarArray", "A floating point number"] ],
# // float - parses into a JsVarFloat which is passed to the function
# // int - parses into a JsVarInt which is passed to the function
# // int32 - parses into a 32 bit int
# // bool - parses into a boolean
# // pin - parses into a pin
# // JsVar - passes a JsVar* to the function (after skipping names)
# // JsVarArray - parses this AND ANY SUBSEQUENT ARGUMENTS into a JsVar of type JSV_ARRAY. THIS IS ALWAYS DEFINED, EVEN IF ZERO LENGTH. Currently it must be the only parameter
# "return" : ["int|float|JsVar", "The integer representation of x"],
# "return_object" : "ObjectName", // optional - used for tern's code analysis - so for example we can do hints for openFile(...).yyy
# "no_create_links":1 // optional - if this is set then hyperlinks are not created when this name is mentioned (good example = bit() )
# "not_real_object" : "anything", // optional - for classes, this means we shouldn't treat this as a built-in object, as internally it isn't stored in a JSV_OBJECT
# "prototype" : "Object", // optional - for classes, this is what their prototype is. It's particlarly helpful if not_real_object, because there is no prototype var in that case
# "check" : "jsvIsFoo(var)", // for classes - this is code that returns true if 'var' is of the given type
# "ifndef" : "SAVE_ON_FLASH", // if the given preprocessor macro is defined, don't implement this
# "ifdef" : "USE_LCD_FOO", // if the given preprocessor macro isn't defined, don't implement this
# "#if" : "A>2", // add a #if statement in the generated C file (ONLY if type==object)
#}*/
#
# description can be an array of strings as well as a simple string (in which case each element is separated by a newline),
# and adding ```sometext``` in the description surrounds it with HTML code tags
#
def get_jsondata(is_for_document, parseArgs = True, board = False):
scriptdir = os.path.dirname (os.path.realpath(__file__))
print("Script location "+scriptdir)
os.chdir(scriptdir+"/..")
jswraps = []
defines = []
if board and ("build" in board.info) and ("defines" in board.info["build"]):
for i in board.info["build"]["defines"]:
print("Got define from board: " + i);
defines.append(i)
if parseArgs and len(sys.argv)>1:
print("Using files from command line")
for i in range(1,len(sys.argv)):
arg = sys.argv[i]
if arg[0]=="-":
if arg[1]=="D":
defines.append(arg[2:])
elif arg[1]=="B":
board = importlib.import_module(arg[2:])
if "usart" in board.chip: defines.append("USART_COUNT="+str(board.chip["usart"]));
if "spi" in board.chip: defines.append("SPI_COUNT="+str(board.chip["spi"]));
if "i2c" in board.chip: defines.append("I2C_COUNT="+str(board.chip["i2c"]));
if "USB" in board.devices: defines.append("defined(USB)=True");
else: defines.append("defined(USB)=False");
elif arg[1]=="F":
"" # -Fxxx.yy in args is filename xxx.yy, which is mandatory for build_jswrapper.py
else:
print("Unknown command-line option")
exit(1)
else:
jswraps.append(arg)
else:
print("Scanning for jswrap.c files")
jswraps = subprocess.check_output(["find", ".", "-name", "jswrap*.c"]).strip().split("\n")
if len(defines)>1:
print("Got #DEFINES:")
for d in defines: print(" "+d)
jsondatas = []
for jswrap in jswraps:
# ignore anything from archives
if jswrap.startswith("./archives/"): continue
# now scan
print("Scanning "+jswrap)
code = open(jswrap, "r").read()
if is_for_document and "DO_NOT_INCLUDE_IN_DOCS" in code:
print("FOUND 'DO_NOT_INCLUDE_IN_DOCS' IN FILE "+jswrap)
continue
for comment in re.findall(r"/\*JSON.*?\*/", code, re.VERBOSE | re.MULTILINE | re.DOTALL):
charnumber = code.find(comment)
linenumber = 1+code.count("\n", 0, charnumber)
# Strip off /*JSON .. */ bit
comment = comment[6:-2]
endOfJson = comment.find("\n}")+2;
jsonstring = comment[0:endOfJson];
description = comment[endOfJson:].strip();
# print("Parsing "+jsonstring)
try:
jsondata = json.loads(jsonstring)
if len(description): jsondata["description"] = description;
jsondata["filename"] = jswrap
jsondata["include"] = jswrap[:-2]+".h"
jsondata["githublink"] = "https://github.com/espruino/Espruino/blob/master/"+jswrap+"#L"+str(linenumber)
dropped_prefix = "Dropped "
if "name" in jsondata: dropped_prefix += jsondata["name"]+" "
elif "class" in jsondata: dropped_prefix += jsondata["class"]+" "
drop = False
if not is_for_document:
if ("ifndef" in jsondata) and (jsondata["ifndef"] in defines):
print(dropped_prefix+" because of #ifndef "+jsondata["ifndef"])
drop = True
if ("ifdef" in jsondata) and not (jsondata["ifdef"] in defines):
print(dropped_prefix+" because of #ifdef "+jsondata["ifdef"])
drop = True
if ("#if" in jsondata):
expr = jsondata["#if"]
for defn in defines:
if defn.find('=')!=-1:
dname = defn[:defn.find('=')]
dkey = defn[defn.find('=')+1:]
expr = expr.replace(dname, dkey);
try:
r = eval(expr)
except:
print("WARNING: error evaluating '"+expr+"' - from '"+jsondata["#if"]+"'")
r = True
if not r:
print(dropped_prefix+" because of #if "+jsondata["#if"]+ " -> "+expr)
drop = True
if not drop:
jsondatas.append(jsondata)
except ValueError as e:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+ str(e) + "\n")
exit(1)
except:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+str(sys.exc_info()[0]) + "\n" )
exit(1)
print("Scanning finished.")
return jsondatas
# Takes the data from get_jsondata and restructures it in prepartion for output as JS
#
# Results look like:,
#{
# "Pin": {
# "desc": [
# "This is the built-in class for Pins, such as D0,D1,LED1, or BTN",
# "You can call the methods on Pin, or you can use Wiring-style functions such as digitalWrite"
# ],
# "methods": {
# "read": {
# "desc": "Returns the input state of the pin as a boolean",
# "params": [],
# "return": [
# "bool",
# "Whether pin is a logical 1 or 0"
# ]
# },
# "reset": {
# "desc": "Sets the output state of the pin to a 0",
# "params": [],
# "return": []
# },
# ...
# },
# "props": {},
# "staticmethods": {},
# "staticprops": {}
# },
# "print": {
# "desc": "Print the supplied string",
# "return": []
# },
# ...
#}
#
def get_struct_from_jsondata(jsondata):
context = {"modules": {}}
def checkClass(details):
cl = details["class"]
if not cl in context:
context[cl] = {"type": "class", "methods": {}, "props": {}, "staticmethods": {}, "staticprops": {}, "desc": details.get("description", "")}
return cl
def addConstructor(details):
cl = checkClass(details)
context[cl]["constructor"] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addMethod(details, type = ""):
cl = checkClass(details)
context[cl][type + "methods"][details["name"]] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addProp(details, type = ""):
cl = checkClass(details)
context[cl][type + "props"][details["name"]] = {"return": details.get("return", []), "desc": details.get("description", "")}
def addFunc(details):
context[details["name"]] = {"type": "function", "return": details.get("return", []), "desc": details.get("description", "")}
def addObj(details):
context[details["name"]] = {"type": "object", "instanceof": details.get("instanceof", ""), "desc": details.get("description", "")}
def addLib(details):
context["modules"][details["class"]] = {"desc": details.get("description", "")}
def addVar(details):
return
for data in jsondata:
type = data["type"]
if type=="class":
checkClass(data)
elif type=="constructor":
addConstructor(data)
elif type=="method":
addMethod(data)
elif type=="property":
addProp(data)
elif type=="staticmethod":
addMethod(data, "static")
elif type=="staticproperty":
addProp(data, "static")
elif type=="function":
addFunc(data)
elif type=="object":
addObj(data)
elif type=="library":
addLib(data)
elif type=="variable":
addVar(data)
else:
print(json.dumps(data, sort_keys=True, indent=2))
return context
def get_includes_from_jsondata(jsondatas):
includes = []
for jsondata in jsondatas:
include = jsondata["include"]
if not include in includes:
includes.append(include)
return includes
def is_property(jsondata):
return jsondata["type"]=="property" or jsondata["type"]=="staticproperty" or jsondata["type"]=="variable"
def is_function(jsondata):
return jsondata["type"]=="function" or jsondata["type"]=="method"
def get_prefix_name(jsondata):
if jsondata["type"]=="event": return "event"
if jsondata["type"]=="constructor": return "constructor"
if jsondata["type"]=="function": return "function"
if jsondata["type"]=="method": return "function"
if jsondata["type"]=="variable": return "variable"
if jsondata["type"]=="property": return "property"
return ""
def get_ifdef_description(d):
if d=="SAVE_ON_FLASH": return "devices with low flash memory"
if d=="STM32F1": return "STM32F1 devices (including Espruino Board)"
if d=="USE_LCD_SDL": return "Linux with SDL support compiled in"
if d=="USE_TLS": return "devices with TLS and SSL support (Espruino Pico only)"
if d=="RELEASE": return "release builds"
if d=="LINUX": return "Linux-based builds"
if d=="USE_USB_HID": return "devices that support USB HID (Espruino Pico)"
if d=="USE_AES": return "devices that support AES (Espruino Pico, Espruino Wifi or Linux)"
if d=="USE_CRYPTO": return "devices that support Crypto Functionality (Espruino Pico, Espruino Wifi, Linux or ESP8266)"
print("WARNING: Unknown ifdef '"+d+"' in common.get_ifdef_description")
return d
def get_script_dir():
return os.path.dirname(os.path.realpath(__file__))
def get_version():
# Warning: the same release label derivation is also in the Makefile
scriptdir = get_script_dir()
jsutils = scriptdir+"/../src/jsutils.h"
version = re.compile("^.*JS_VERSION.*\"(.*)\"");
alt_release = os.getenv("ALT_RELEASE")
if alt_release == None:
# Default release labeling based on commits since last release tag
latest_release = subprocess.check_output('git tag 2>nul | grep RELEASE_ | sort | tail -1', shell=True).strip()
commits_since_release = subprocess.check_output('git log --oneline 2>nul '+latest_release.decode("utf-8")+'..HEAD | wc -l', shell=True).decode("utf-8").strip()
else:
# Alternate release labeling with fork name (in ALT_RELEASE env var) plus branch
# name plus commit SHA
sha = subprocess.check_output('git rev-parse --short HEAD 2>nul', shell=True).strip()
branch = subprocess.check_output('git name-rev --name-only HEAD 2>nul', shell=True).strip()
commits_since_release = alt_release + '_' + branch + '_' + sha
for line in open(jsutils):
match = version.search(line);
if (match != None):
v = match.group(1);
if commits_since_release=="0": return v
else: return v+"."+commits_since_release
return "UNKNOWN"
def get_name_or_space(jsondata):
if "name" in jsondata: return jsondata["name"]
return ""
def get_bootloader_size(board):
if board.chip["family"]=="STM32F4": return 16*1024; # 16kb Pages, so we have no choice
return 10*1024;
# On normal chips this is 0x00000000
# On boards with bootloaders it's generally + 10240
# On F401, because of the setup of pages we put the bootloader in the first 16k, then in the 16+16+16 we put the saved code, and then finally we but the binary somewhere else
def get_espruino_binary_address(board):
if "place_text_section" in board.chip:
return board.chip["place_text_section"]
if "bootloader" in board.info and board.info["bootloader"]==1:
return get_bootloader_size(board);
return 0;
def get_board_binary_name(board):
return board.info["binary_name"].replace("%v", get_version());
| redbear/Espruino | scripts/common.py | Python | mpl-2.0 | 16,565 |
# Copyright (c) 2015-2020 Contributors as noted in the AUTHORS file
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# System imports
import json
import logging
import re
import uuid
from threading import Event
# Third-party imports
from pyre import Pyre
# Local imports
from ..tools import zmq, green # , spy_call, w_spy_call, spy_object
logger = logging.getLogger(__name__)
class PyreNode(Pyre):
def __init__(self, *args, **kwargs):
# spy_object(self, class_=Pyre, except_=['name', 'uuid'], with_caller=False)
# spy_call(self.__init__, args, kwargs, with_caller=False); print
self._name = None
self._uuid = None
super(self.__class__, self).__init__(*args, **kwargs)
self.request_results = {} # TODO: Fuse the two dicts
self.request_events = {}
self.poller = zmq.Poller()
self.poller.register(self.inbox, zmq.POLLIN)
self.join('SURVEY')
def run(self):
self.task = green.spawn(self._run, 100)
def _run(self, timeout=None):
self._running = True
self.start()
while self._running:
try:
# logger.debug('Polling')
items = dict(self.poller.poll(timeout))
# logger.debug('polled out: %s, %s', len(items), items)
while len(items) > 0:
for fd, ev in items.items():
if (self.inbox == fd) and (ev == zmq.POLLIN):
self._process_message()
# logger.debug('quick polling')
items = dict(self.poller.poll(0))
# logger.debug('qpoll: %s, %s', len(items), items)
except (KeyboardInterrupt, SystemExit):
logger.debug('(%s) KeyboardInterrupt or SystemExit', self.name())
break
logger.debug('(%s) Exiting loop and stopping', self.name())
self.stop()
def _process_message(self):
logger.debug('(%s) processing message', self.name())
msg = self.recv()
logger.debug('(%s) received stuff: %s', self.name(), msg)
msg_type = msg.pop(0)
logger.debug('(%s) msg_type: %s', self.name(), msg_type)
peer_id = uuid.UUID(bytes=msg.pop(0))
logger.debug('(%s) peer_id: %s', self.name(), peer_id)
peer_name = msg.pop(0)
logger.debug('(%s) peer_name: %s', self.name(), peer_name)
if msg_type == b'ENTER':
self.on_peer_enter(peer_id, peer_name, msg)
elif msg_type == b'EXIT':
self.on_peer_exit(peer_id, peer_name, msg)
elif msg_type == b'SHOUT':
self.on_peer_shout(peer_id, peer_name, msg)
elif msg_type == b'WHISPER':
self.on_peer_whisper(peer_id, peer_name, msg)
def on_peer_enter(self, peer_id, peer_name, msg):
logger.debug('(%s) ZRE ENTER: %s, %s', self.name(), peer_name, peer_id)
pub_endpoint = self.get_peer_endpoint(peer_id, 'pub')
rpc_endpoint = self.get_peer_endpoint(peer_id, 'rpc')
self.on_new_peer(peer_id, peer_name, pub_endpoint, rpc_endpoint)
def on_new_peer(self, peer_id, peer_name, pub_endpoint, rpc_endpoint):
pass
def on_peer_exit(self, peer_id, peer_name, msg):
logger.debug('(%s) ZRE EXIT: %s, %s', self.name(), peer_name, peer_id)
self.on_peer_gone(peer_id, peer_name)
def on_peer_gone(self, peer_id, peer_name):
pass
def on_peer_shout(self, peer_id, peer_name, msg):
group = msg.pop(0)
data = msg.pop(0)
logger.debug('(%s) ZRE SHOUT: %s, %s > (%s) %s',
self.name(), peer_name, peer_id, group, data)
if group == b'SURVEY':
self.on_survey(peer_id, peer_name, json.loads(data))
elif group == b'EVENT':
self.on_event(peer_id, peer_name, json.loads(data))
def on_survey(self, peer_id, peer_name, request):
pass
def on_event(self, peer_id, peer_name, request):
pass
def on_peer_whisper(self, peer_id, peer_name, msg):
logger.debug('(%s) ZRE WHISPER: %s, %s > %s', self.name(), peer_name, peer_id, msg)
reply = json.loads(msg[0])
if reply['req_id'] in self.request_results:
logger.debug('(%s) Received reply from %s: %s', self.name(), peer_name, reply['data'])
self.request_results[reply['req_id']].append((peer_name, reply['data']))
ev, limit_peers = self.request_events[reply['req_id']]
if limit_peers and (len(self.request_results[reply['req_id']]) >= limit_peers):
ev.set()
green.sleep(0) # Yield
else:
logger.warning(
'(%s) Discarding reply from %s because the request ID is unknown',
self.name(), peer_name
)
def get_peer_endpoint(self, peer, prefix):
pyre_endpoint = self.peer_address(peer)
ip = re.search('.*://(.*):.*', pyre_endpoint).group(1)
return '%s://%s:%s' % (
self.peer_header_value(peer, prefix + '_proto'),
ip,
self.peer_header_value(peer, prefix + '_port')
)
def join_event(self):
self.join('EVENT')
def leave_event(self):
self.leave('EVENT')
def send_survey(self, request, timeout, limit_peers):
# request['req_id'] = ('%x' % randint(0, 0xFFFFFFFF)).encode()
self.request_results[request['req_id']] = []
ev = Event()
self.request_events[request['req_id']] = (ev, limit_peers)
self.shout('SURVEY', json.dumps(request).encode())
ev.wait(timeout)
result = self.request_results[request['req_id']]
del self.request_results[request['req_id']]
del self.request_events[request['req_id']]
return result
def send_event(self, request):
self.shout('EVENT', json.dumps(request).encode())
def reply_survey(self, peer_id, reply):
self.whisper(peer_id, json.dumps(reply).encode())
def shutdown(self):
self._running = False
def name(self):
if self._name is None:
# f = w_spy_call(super(self.__class__, self).name, with_caller=False)
f = super(self.__class__, self).name
self._name = f()
return self._name
def uuid(self):
if self._uuid is None:
# f = w_spy_call(super(self.__class__, self).uuid, with_caller=False)
f = super(self.__class__, self).uuid
self._uuid = f()
return self._uuid
| Alidron/alidron-isac | isac/transport/pyre_node.py | Python | mpl-2.0 | 6,714 |
"""URL routes for the sample app."""
from django.conf.urls import include, url
from django.views.generic import TemplateView
from rest_framework.routers import DefaultRouter
from .viewsets import ChoiceViewSet, QuestionViewSet, UserViewSet
router = DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'questions', QuestionViewSet)
router.register(r'choices', ChoiceViewSet)
urlpatterns = [
url(r'^$', TemplateView.as_view(
template_name='sample_poll_app/home.html'), name='home'),
url(r'^api-auth/', include(
'rest_framework.urls', namespace='rest_framework')),
url(r'^api/', include(router.urls))
]
| jwhitlock/drf-cached-instances | sample_poll_app/urls.py | Python | mpl-2.0 | 652 |
import os
from outlawg import Outlawg
from fftool import (
DIR_CONFIGS,
local
)
from ini_handler import IniHandler
Log = Outlawg()
env = IniHandler()
env.load_os_config(DIR_CONFIGS)
def launch_firefox(profile_path, channel, logging, nspr_log_modules=''):
"""relies on the other functions (download, install, profile)
having completed.
"""
FIREFOX_APP_BIN = env.get(channel, 'PATH_FIREFOX_BIN_ENV')
Log.header('LAUNCH FIREFOX')
print("Launching Firefox {0} with profile: {1}".format(
channel,
profile_path)
)
cmd = '"{0}" -profile "{1}"'.format(FIREFOX_APP_BIN, profile_path)
print('CMD: ' + cmd)
# NSPR_LOG_MODULES
if nspr_log_modules:
Log.header('FIREFOX NSPR_LOG_MODULES LOGGING')
os.environ['NSPR_LOG_MODULES'] = nspr_log_modules
local(cmd, logging)
| rpappalax/ff-tool | fftool/firefox_run.py | Python | mpl-2.0 | 849 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 8 14:45:26 2017
@author: leonidas
"""
import numpy as np
import operator
def classify(inputPoint,dataSet,labels,k):
dataSetSize = dataSet.shape[0]
diffMat = np.tile(inputPoint,(dataSetSize,1))-dataSet
sqDiffMat = pow(diffMat,2)
sqDistances = sqDiffMat.sum(axis=1)
distances = pow(sqDistances,0.5)
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[ sortedDistIndicies[i] ]
classCount[voteIlabel] = classCount.get(voteIlabel,0)+1
#sort by the apperance number
sortedClassCount = sorted(classCount.items(), key = operator.itemgetter(1), reverse = True)
return sortedClassCount[0][0]
def mat_to_vect(filename):
vect = []
data = open(filename)
for i in range(32):
temp = data.readline()
for j in range(32):
try:
vect.append(int(temp[j]))
except(ValueError):
print temp[j],'error',ValueError
return vect
def load_train_data():
train_lables = []
size = 100
train_data = np.zeros((size*10,1024))
for i in range(10):
for j in range(size):
train_lables.append(i)
train_data[i*100+j,:] = mat_to_vect('train/%s/%s.txt' %( i,j ))
return train_lables,train_data
def classnumCut(fileName):
return int(fileName[0]) | leonidas141/HIT-ML-2016-report | load.py | Python | mpl-2.0 | 1,459 |
import mock
from crashstats.base.tests.testbase import TestCase
from crashstats.api.cleaner import Cleaner, SmartWhitelistMatcher
from crashstats import scrubber
class TestCleaner(TestCase):
def test_simplest_case(self):
whitelist = {'hits': ('foo', 'bar')}
data = {
'hits': [
{'foo': 1,
'bar': 2,
'baz': 3},
{'foo': 4,
'bar': 5,
'baz': 6},
]
}
cleaner = Cleaner(whitelist)
cleaner.start(data)
expect = {
'hits': [
{'foo': 1,
'bar': 2},
{'foo': 4,
'bar': 5},
]
}
assert data == expect
@mock.patch('warnings.warn')
def test_simplest_case_with_warning(self, p_warn):
whitelist = {'hits': ('foo', 'bar')}
data = {
'hits': [
{'foo': 1,
'bar': 2,
'baz': 3},
{'foo': 4,
'bar': 5,
'baz': 6},
]
}
cleaner = Cleaner(whitelist, debug=True)
cleaner.start(data)
p_warn.assert_called_with("Skipping 'baz'")
def test_all_dict_data(self):
whitelist = {Cleaner.ANY: ('foo', 'bar')}
data = {
'WaterWolf': {
'foo': 1,
'bar': 2,
'baz': 3,
},
'NightTrain': {
'foo': 7,
'bar': 8,
'baz': 9,
},
}
cleaner = Cleaner(whitelist)
cleaner.start(data)
expect = {
'WaterWolf': {
'foo': 1,
'bar': 2,
},
'NightTrain': {
'foo': 7,
'bar': 8,
},
}
assert data == expect
def test_simple_list(self):
whitelist = ('foo', 'bar')
data = [
{
'foo': 1,
'bar': 2,
'baz': 3,
},
{
'foo': 7,
'bar': 8,
'baz': 9,
},
]
cleaner = Cleaner(whitelist)
cleaner.start(data)
expect = [
{
'foo': 1,
'bar': 2,
},
{
'foo': 7,
'bar': 8,
},
]
assert data == expect
def test_plain_dict(self):
whitelist = ('foo', 'bar')
data = {
'foo': 1,
'bar': 2,
'baz': 3,
}
cleaner = Cleaner(whitelist)
cleaner.start(data)
expect = {
'foo': 1,
'bar': 2,
}
assert data == expect
def test_dict_data_with_lists(self):
whitelist = {
'hits': {
Cleaner.ANY: ('foo', 'bar')
}
}
data = {
'hits': {
'WaterWolf': [
{'foo': 1, 'bar': 2, 'baz': 3},
{'foo': 4, 'bar': 5, 'baz': 6}
],
'NightTrain': [
{'foo': 7, 'bar': 8, 'baz': 9},
{'foo': 10, 'bar': 11, 'baz': 12}
]
}
}
cleaner = Cleaner(whitelist)
cleaner.start(data)
expect = {
'hits': {
'WaterWolf': [
{'foo': 1, 'bar': 2},
{'foo': 4, 'bar': 5}
],
'NightTrain': [
{'foo': 7, 'bar': 8},
{'foo': 10, 'bar': 11}
]
}
}
assert data == expect
def test_all_dict_data_deeper(self):
whitelist = {Cleaner.ANY: {Cleaner.ANY: ('foo', 'bar')}}
data = {
'WaterWolf': {
'2012': {
'foo': 1,
'bar': 2,
'baz': 3,
},
'2013': {
'foo': 4,
'bar': 5,
'baz': 6,
}
},
'NightTrain': {
'2012': {
'foo': 7,
'bar': 8,
'baz': 9,
},
'2013': {
'foo': 10,
'bar': 11,
'baz': 12,
}
},
}
cleaner = Cleaner(whitelist)
cleaner.start(data)
expect = {
'WaterWolf': {
'2012': {
'foo': 1,
'bar': 2,
},
'2013': {
'foo': 4,
'bar': 5,
}
},
'NightTrain': {
'2012': {
'foo': 7,
'bar': 8,
},
'2013': {
'foo': 10,
'bar': 11,
}
},
}
assert data == expect
def test_with_scrubber_cleaning(self):
whitelist = {'hits': ('foo', 'bar', 'baz')}
data = {
'hits': [
{'foo': "Bla bla",
'bar': "contact me on [email protected]",
'baz': "when I visited http://www.p0rn.com"},
{'foo': "Ble ble [email protected]",
'bar': "other things on https://google.com here",
'baz': "talk to [email protected]"},
]
}
cleaner = Cleaner(
whitelist,
clean_scrub=(
('bar', scrubber.EMAIL),
('bar', scrubber.URL),
('baz', scrubber.URL),
)
)
cleaner.start(data)
expect = {
'hits': [
{'foo': "Bla bla",
'bar': "contact me on ",
'baz': "when I visited "},
{'foo': "Ble ble [email protected]",
'bar': "other things on here",
# because 'baz' doesn't have an EMAIL scrubber
'baz': "talk to [email protected]"},
]
}
assert data == expect
class TestSmartWhitelistMatcher(TestCase):
def test_basic_in(self):
whitelist = ['some', 'thing*']
matcher = SmartWhitelistMatcher(whitelist)
assert 'some' in matcher
assert 'something' not in matcher
assert 'awesome' not in matcher
assert 'thing' in matcher
assert 'things' in matcher
assert 'nothing' not in matcher
| Tayamarn/socorro | webapp-django/crashstats/api/tests/test_cleaner.py | Python | mpl-2.0 | 6,751 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from jx_base.expressions import Variable, DateOp, TupleOp, LeavesOp, BinaryOp, OrOp, InequalityOp, extend, Literal, NullOp, TrueOp, FalseOp, DivOp, FloorOp, \
NeOp, NotOp, LengthOp, NumberOp, StringOp, CountOp, MultiOp, RegExpOp, CoalesceOp, MissingOp, ExistsOp, \
PrefixOp, UnixOp, FromUnixOp, NotLeftOp, RightOp, NotRightOp, FindOp, InOp, RangeOp, CaseOp, AndOp, \
ConcatOp, LeftOp, EqOp, WhenOp, BasicIndexOfOp, IntegerOp, MaxOp, BasicSubstringOp, FALSE, MinOp, BooleanOp, SuffixOp, BetweenOp, simplified, ZERO, SqlInstrOp, SqlSubstrOp, NULL, ONE, builtin_ops, TRUE, SqlEqOp, BasicMultiOp
from jx_base.queries import get_property_name
from jx_sqlite import quoted_GUID, GUID
from mo_dots import coalesce, wrap, Null, split_field, listwrap, startswith_field
from mo_dots import join_field, ROOT_PATH, relative_field
from mo_future import text_type
from mo_json import json2value
from mo_json.typed_encoder import OBJECT, BOOLEAN, EXISTS, NESTED
from mo_logs import Log
from mo_math import Math
from pyLibrary import convert
from pyLibrary.sql import SQL, SQL_AND, SQL_EMPTY_STRING, SQL_OR, SQL_TRUE, SQL_ZERO, SQL_FALSE, SQL_NULL, SQL_ONE, SQL_IS_NOT_NULL, sql_list, sql_iso, SQL_IS_NULL, SQL_END, SQL_ELSE, SQL_THEN, SQL_WHEN, SQL_CASE, sql_concat, sql_coalesce
from pyLibrary.sql.sqlite import quote_column, quote_value
@extend(Variable)
def to_sql(self, schema, not_null=False, boolean=False):
if self.var == GUID:
return wrap([{"name": ".", "sql": {"s": quoted_GUID}, "nested_path": ROOT_PATH}])
vars = schema[self.var]
if not vars:
# DOES NOT EXIST
return wrap([{"name": ".", "sql": {"0": SQL_NULL}, "nested_path": ROOT_PATH}])
var_name = list(set(listwrap(vars).name))
if len(var_name) > 1:
Log.error("do not know how to handle")
var_name = var_name[0]
cols = schema.leaves(self.var)
acc = {}
if boolean:
for col in cols:
cname = relative_field(col.name, var_name)
nested_path = col.nested_path[0]
if col.type == OBJECT:
value = SQL_TRUE
elif col.type == BOOLEAN:
value = quote_column(col.es_column)
else:
value = quote_column(col.es_column) + SQL_IS_NOT_NULL
tempa = acc.setdefault(nested_path, {})
tempb = tempa.setdefault(get_property_name(cname), {})
tempb['b'] = value
else:
for col in cols:
cname = relative_field(col.name, var_name)
if col.type == OBJECT:
prefix = self.var + "."
for cn, cs in schema.items():
if cn.startswith(prefix):
for child_col in cs:
tempa = acc.setdefault(child_col.nested_path[0], {})
tempb = tempa.setdefault(get_property_name(cname), {})
tempb[json_type_to_sql_type[col.type]] = quote_column(child_col.es_column)
else:
nested_path = col.nested_path[0]
tempa = acc.setdefault(nested_path, {})
tempb = tempa.setdefault(get_property_name(cname), {})
tempb[json_type_to_sql_type[col.type]] = quote_column(col.es_column)
return wrap([
{"name": cname, "sql": types, "nested_path": nested_path}
for nested_path, pairs in acc.items() for cname, types in pairs.items()
])
@extend(Literal)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.value
v = quote_value(value)
if v == None:
return wrap([{"name": "."}])
elif isinstance(value, text_type):
return wrap([{"name": ".", "sql": {"s": quote_value(value)}}])
elif Math.is_number(v):
return wrap([{"name": ".", "sql": {"n": quote_value(value)}}])
elif v in [True, False]:
return wrap([{"name": ".", "sql": {"b": quote_value(value)}}])
else:
return wrap([{"name": ".", "sql": {"j": quote_value(self.json)}}])
@extend(NullOp)
def to_sql(self, schema, not_null=False, boolean=False):
return Null
@extend(TrueOp)
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
@extend(FalseOp)
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
@extend(DateOp)
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": {"n": quote_value(self.value)}}])
@extend(TupleOp)
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": t.to_sql(schema)[0].sql} for t in self.terms])
@extend(LeavesOp)
def to_sql(self, schema, not_null=False, boolean=False):
if not isinstance(self.term, Variable):
Log.error("Can only handle Variable")
term = self.term.var
prefix_length = len(split_field(term))
output = wrap([
{
"name": join_field(split_field(schema.get_column_name(c))[prefix_length:]),
"sql": Variable(schema.get_column_name(c)).to_sql(schema)[0].sql
}
for c in schema.columns
if startswith_field(c.name, term) and (
(c.jx_type not in (EXISTS, OBJECT, NESTED) and startswith_field(schema.nested_path[0], c.nested_path[0])) or
(c.jx_type not in (EXISTS, OBJECT) and schema.nested_path[0] == c.nested_path[0])
)
])
return output
@extend(EqOp)
def to_sql(self, schema, not_null=False, boolean=False):
lhs = self.lhs.to_sql(schema)
rhs = self.rhs.to_sql(schema)
acc = []
if len(lhs) != len(rhs):
Log.error("lhs and rhs have different dimensionality!?")
for l, r in zip(lhs, rhs):
for t in "bsnj":
if l.sql[t] == None:
if r.sql[t] == None:
pass
else:
acc.append(sql_iso(r.sql[t]) + SQL_IS_NULL)
else:
if r.sql[t] == None:
acc.append(sql_iso(l.sql[t]) + SQL_IS_NULL)
else:
acc.append(sql_iso(l.sql[t]) + " = " + sql_iso(r.sql[t]))
if not acc:
return FALSE.to_sql(schema)
else:
return wrap([{"name": ".", "sql": {"b": SQL_OR.join(acc)}}])
@extend(EqOp)
@simplified
def partial_eval(self):
lhs = self.lhs.partial_eval()
rhs = self.rhs.partial_eval()
if isinstance(lhs, Literal) and isinstance(rhs, Literal):
return TRUE if builtin_ops["eq"](lhs.value, rhs.value) else FALSE
else:
rhs_missing = rhs.missing().partial_eval()
return CaseOp(
"case",
[
WhenOp("when", lhs.missing(), **{"then": rhs_missing}),
WhenOp("when", rhs_missing, **{"then": FALSE}),
SqlEqOp("eq", [lhs, rhs])
]
).partial_eval()
@extend(NeOp)
def to_sql(self, schema, not_null=False, boolean=False):
return NotOp('not', EqOp('eq', [self.lhs, self.rhs]).partial_eval()).partial_eval().to_sql(schema)
@extend(BasicIndexOfOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.value.to_sql(schema)[0].sql.s
find = self.find.to_sql(schema)[0].sql.s
start = self.start
if isinstance(start, Literal) and start.value == 0:
return wrap([{"name": ".", "sql": {"n": "INSTR" + sql_iso(value + "," + find) + "-1"}}])
else:
start_index = start.to_sql(schema)[0].sql.n
found = "INSTR(SUBSTR" + sql_iso(value + "," + start_index + "+1)," + find)
return wrap([{"name": ".", "sql": {"n": (
SQL_CASE +
SQL_WHEN + found +
SQL_THEN + found + "+" + start_index + "-1" +
SQL_ELSE + "-1" +
SQL_END
)}}])
@extend(BasicSubstringOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.value.to_sql(schema)[0].sql.s
start = MultiOp("add", [self.start, Literal(None, 1)]).partial_eval().to_sql(schema)[0].sql.n
length = BinaryOp("subtract", [self.end, self.start]).partial_eval().to_sql(schema)[0].sql.n
return wrap([{"name": ".", "sql": {"s": "SUBSTR" + sql_iso(value + "," + start + ", " + length)}}])
@extend(BinaryOp)
def to_sql(self, schema, not_null=False, boolean=False):
lhs = self.lhs.to_sql(schema)[0].sql.n
rhs = self.rhs.to_sql(schema)[0].sql.n
return wrap([{"name": ".", "sql": {"n": sql_iso(lhs) + " " + BinaryOp.operators[self.op] + " " + sql_iso(rhs)}}])
@extend(MinOp)
def to_sql(self, schema, not_null=False, boolean=False):
terms = [t.partial_eval().to_sql(schema)[0].sql.n for t in self.terms]
return wrap([{"name": ".", "sql": {"n": "min" + sql_iso((sql_list(terms)))}}])
@extend(MaxOp)
def to_sql(self, schema, not_null=False, boolean=False):
terms = [t.partial_eval().to_sql(schema)[0].sql.n for t in self.terms]
return wrap([{"name": ".", "sql": {"n": "max" + sql_iso((sql_list(terms)))}}])
@extend(InequalityOp)
def to_sql(self, schema, not_null=False, boolean=False):
lhs = self.lhs.to_sql(schema, not_null=True)[0].sql
rhs = self.rhs.to_sql(schema, not_null=True)[0].sql
lhs_exists = self.lhs.exists().to_sql(schema)[0].sql
rhs_exists = self.rhs.exists().to_sql(schema)[0].sql
if len(lhs) == 1 and len(rhs) == 1:
return wrap([{"name": ".", "sql": {
"b": sql_iso(lhs.values()[0]) + " " + InequalityOp.operators[self.op] + " " + sql_iso(rhs.values()[0])
}}])
ors = []
for l in "bns":
ll = lhs[l]
if not ll:
continue
for r in "bns":
rr = rhs[r]
if not rr:
continue
elif r == l:
ors.append(
sql_iso(lhs_exists[l]) + SQL_AND + sql_iso(rhs_exists[r]) + SQL_AND + sql_iso(lhs[l]) + " " +
InequalityOp.operators[self.op] + " " + sql_iso(rhs[r])
)
elif (l > r and self.op in ["gte", "gt"]) or (l < r and self.op in ["lte", "lt"]):
ors.append(
sql_iso(lhs_exists[l]) + SQL_AND + sql_iso(rhs_exists[r])
)
sql = sql_iso(SQL_OR.join(sql_iso(o) for o in ors))
return wrap([{"name": ".", "sql": {"b": sql}}])
@extend(DivOp)
def to_sql(self, schema, not_null=False, boolean=False):
lhs = self.lhs.to_sql(schema)[0].sql.n
rhs = self.rhs.to_sql(schema)[0].sql.n
d = self.default.to_sql(schema)[0].sql.n
if lhs and rhs:
if d == None:
return wrap([{
"name": ".",
"sql": {"n": sql_iso(lhs) + " / " + sql_iso(rhs)}
}])
else:
return wrap([{
"name": ".",
"sql": {"n": sql_coalesce([sql_iso(lhs) + " / " + sql_iso(rhs), d])}
}])
else:
return Null
@extend(FloorOp)
def to_sql(self, schema, not_null=False, boolean=False):
lhs = self.lhs.to_sql(schema)
rhs = self.rhs.to_sql(schema)
acc = []
if len(lhs) != len(rhs):
Log.error("lhs and rhs have different dimensionality!?")
for l, r in zip(lhs, rhs):
for t in "bsnj":
if l.sql[t] == None:
if r.sql[t] == None:
pass
else:
acc.append(sql_iso(r.sql[t]) + " IS " + SQL_NULL)
else:
if r.sql[t] == None:
acc.append(sql_iso(l.sql[t]) + " IS " + SQL_NULL)
else:
acc.append("(" + sql_iso(l.sql[t]) + " = " + sql_iso(r.sql[t]) + " OR (" + sql_iso(l.sql[t]) + " IS" + SQL_NULL + SQL_AND + "(" + r.sql[
t] + ") IS NULL))")
if not acc:
return FALSE.to_sql(schema)
else:
return wrap([{"name": ".", "sql": {"b": SQL_OR.join(acc)}}])
# @extend(NeOp)
# def to_sql(self, schema, not_null=False, boolean=False):
# return NotOp("not", EqOp("eq", [self.lhs, self.rhs])).to_sql(schema, not_null, boolean)
@extend(NotOp)
def to_sql(self, schema, not_null=False, boolean=False):
not_expr = NotOp("not", BooleanOp("boolean", self.term)).partial_eval()
if isinstance(not_expr, NotOp):
return wrap([{"name": ".", "sql": {"b": "NOT " + sql_iso(not_expr.term.to_sql(schema)[0].sql.b)}}])
else:
return not_expr.to_sql(schema)
@extend(BooleanOp)
def to_sql(self, schema, not_null=False, boolean=False):
term = self.term.partial_eval()
if term.type == "boolean":
sql = term.to_sql(schema)
return sql
else:
sql = term.exists().partial_eval().to_sql(schema)
return sql
@extend(AndOp)
def to_sql(self, schema, not_null=False, boolean=False):
if not self.terms:
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
elif all(self.terms):
return wrap([{"name": ".", "sql": {
"b": SQL_AND.join([sql_iso(t.to_sql(schema, boolean=True)[0].sql.b) for t in self.terms])
}}])
else:
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
@extend(OrOp)
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{
"name": ".",
"sql": {"b": SQL_OR.join(
sql_iso(t.to_sql(schema, boolean=True)[0].sql.b)
for t in self.terms
)}
}])
@extend(LengthOp)
def to_sql(self, schema, not_null=False, boolean=False):
term = self.term.partial_eval()
if isinstance(term, Literal):
val = term.value
if isinstance(val, text_type):
return wrap([{"name": ".", "sql": {"n": convert.value2json(len(val))}}])
elif isinstance(val, (float, int)):
return wrap([{"name": ".", "sql": {"n": convert.value2json(len(convert.value2json(val)))}}])
else:
return Null
value = term.to_sql(schema)[0].sql.s
return wrap([{"name": ".", "sql": {"n": "LENGTH" + sql_iso(value)}}])
@extend(IntegerOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.term.to_sql(schema, not_null=True)
acc = []
for c in value:
for t, v in c.sql.items():
if t == "s":
acc.append("CAST(" + v + " as INTEGER)")
else:
acc.append(v)
if not acc:
return wrap([])
elif len(acc) == 1:
return wrap([{"name": ".", "sql": {"n": acc[0]}}])
else:
return wrap([{"name": ".", "sql": {"n": sql_coalesce(acc)}}])
@extend(NumberOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.term.to_sql(schema, not_null=True)
acc = []
for c in value:
for t, v in c.sql.items():
if t == "s":
acc.append("CAST(" + v + " as FLOAT)")
else:
acc.append(v)
if not acc:
return wrap([])
elif len(acc) == 1:
return wrap([{"name": ".", "sql": {"n": acc}}])
else:
return wrap([{"name": ".", "sql": {"n": sql_coalesce(acc)}}])
@extend(StringOp)
def to_sql(self, schema, not_null=False, boolean=False):
test = self.term.missing().to_sql(schema, boolean=True)[0].sql.b
value = self.term.to_sql(schema, not_null=True)[0].sql
acc = []
for t, v in value.items():
if t == "b":
acc.append(SQL_CASE+SQL_WHEN + sql_iso(test) + SQL_THEN + SQL_NULL + SQL_WHEN + sql_iso(v) + SQL_THEN+"'true'"+SQL_ELSE+"'false'"+SQL_END)
elif t == "s":
acc.append(v)
else:
acc.append("RTRIM(RTRIM(CAST" + sql_iso(v + " as TEXT), " + quote_value('0')) + ", " + quote_value(".") + ")")
if not acc:
return wrap([{}])
elif len(acc) == 1:
return wrap([{"name": ".", "sql": {"s": acc[0]}}])
else:
return wrap([{"name": ".", "sql": {"s": sql_coalesce(acc)}}])
@extend(CountOp)
def to_sql(self, schema, not_null=False, boolean=False):
acc = []
for term in self.terms:
sqls = term.to_sql(schema)
if len(sqls) > 1:
acc.append(SQL_TRUE)
else:
for t, v in sqls[0].sql.items():
if t in ["b", "s", "n"]:
acc.append(SQL_CASE+SQL_WHEN + sql_iso(v) + SQL_IS_NULL + SQL_THEN+"0"+SQL_ELSE+"1"+SQL_END)
else:
acc.append(SQL_TRUE)
if not acc:
return wrap([{}])
else:
return wrap([{"nanme": ".", "sql": {"n": SQL("+").join(acc)}}])
_sql_operators = {
"add": (SQL(" + "), SQL_ZERO), # (operator, zero-array default value) PAIR
"basic.add": (SQL(" + "), SQL_ZERO), # (operator, zero-array default value) PAIR
"sum": (SQL(" + "), SQL_ZERO),
"mul": (SQL(" * "), SQL_ONE),
"mult": (SQL(" * "), SQL_ONE),
"multiply": (SQL(" * "), SQL_ONE),
"basic.mult": (SQL(" * "), SQL_ONE)
}
@extend(BasicMultiOp)
def to_sql(self, schema, not_null=False, boolean=False):
op, identity = _sql_operators[self.op]
sql = op.join(sql_iso(t.to_sql(schema)[0].sql.n) for t in self.terms)
return wrap([{"name": ".", "sql": {"n": sql}}])
@extend(RegExpOp)
def to_sql(self, schema, not_null=False, boolean=False):
pattern = quote_value(json2value(self.pattern.json))
value = self.var.to_sql(schema)[0].sql.s
return wrap([
{"name": ".", "sql": {"b": value + " REGEXP " + pattern}}
])
@extend(CoalesceOp)
def to_sql(self, schema, not_null=False, boolean=False):
acc = {
"b": [],
"s": [],
"n": []
}
for term in self.terms:
for t, v in term.to_sql(schema)[0].sql.items():
acc[t].append(v)
output = {}
for t, terms in acc.items():
if not terms:
continue
elif len(terms) == 1:
output[t] = terms[0]
else:
output[t] = sql_coalesce(terms)
return wrap([{"name": ".", "sql": output}])
@extend(MissingOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.expr.partial_eval()
missing_value = value.missing().partial_eval()
if not isinstance(missing_value, MissingOp):
return missing_value.to_sql(schema)
value_sql = value.to_sql(schema)
if len(value_sql) > 1:
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
acc = []
for c in value_sql:
for t, v in c.sql.items():
if t == "b":
acc.append(sql_iso(v) + SQL_IS_NULL)
if t == "s":
acc.append(sql_iso(sql_iso(v) + SQL_IS_NULL) + SQL_OR + sql_iso(sql_iso(v) + "=" + SQL_EMPTY_STRING))
if t == "n":
acc.append(sql_iso(v) + SQL_IS_NULL)
if not acc:
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
else:
return wrap([{"name": ".", "sql": {"b": SQL_AND.join(acc)}}])
@extend(WhenOp)
def to_sql(self, schema, not_null=False, boolean=False):
when = self.when.partial_eval().to_sql(schema, boolean=True)[0].sql
then = self.then.partial_eval().to_sql(schema, not_null=not_null)[0].sql
els_ = self.els_.partial_eval().to_sql(schema, not_null=not_null)[0].sql
output = {}
for t in "bsn":
if then[t] == None:
if els_[t] == None:
pass
else:
output[t] = SQL_CASE+SQL_WHEN + when.b + SQL_THEN + SQL_NULL + SQL_ELSE + els_[t] + SQL_END
else:
if els_[t] == None:
output[t] = SQL_CASE+SQL_WHEN + when.b + SQL_THEN + then[t] + SQL_END
else:
output[t] = SQL_CASE+SQL_WHEN + when.b + SQL_THEN + then[t] + SQL_ELSE + els_[t] + SQL_END
if not output:
return wrap([{"name": ".", "sql": {"0": SQL_NULL}}])
else:
return wrap([{"name": ".", "sql": output}])
@extend(ExistsOp)
def to_sql(self, schema, not_null=False, boolean=False):
field = self.field.to_sql(schema)[0].sql
acc = []
for t, v in field.items():
if t in "bns":
acc.append(sql_iso(v + SQL_IS_NOT_NULL))
if not acc:
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
else:
return wrap([{"name": ".", "sql": {"b": SQL_OR.join(acc)}}])
@extend(PrefixOp)
def to_sql(self, schema, not_null=False, boolean=False):
if not self.expr:
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
else:
return wrap([{"name": ".", "sql": {
"b": "INSTR" + sql_iso(self.expr.to_sql(schema)[0].sql.s + ", " + self.prefix.to_sql(schema)[0].sql.s) + "==1"
}}])
@extend(SuffixOp)
def to_sql(self, schema, not_null=False, boolean=False):
if not self.expr:
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
elif isinstance(self.suffix, Literal) and not self.suffix.value:
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
else:
return EqOp(
"eq",
[
RightOp("right", [self.expr, LengthOp("length", self.suffix)]),
self.suffix
]
).partial_eval().to_sql(schema)
@extend(ConcatOp)
def to_sql(self, schema, not_null=False, boolean=False):
defult = self.default.to_sql(schema)
if len(self.terms) == 0:
return defult
defult = coalesce(defult[0].sql, SQL_NULL)
sep = self.separator.to_sql(schema)[0].sql.s
acc = []
for t in self.terms:
missing = t.missing().partial_eval()
term = t.to_sql(schema, not_null=True)[0].sql
if term.s:
term_sql = term.s
elif term.n:
term_sql = "cast(" + term.n + " as text)"
else:
term_sql = SQL_CASE + SQL_WHEN + term.b + SQL_THEN + quote_value("true") + SQL_ELSE + quote_value("false") + SQL_END
if isinstance(missing, TrueOp):
acc.append(SQL_EMPTY_STRING)
elif missing:
acc.append(
SQL_CASE +
SQL_WHEN + sql_iso(missing.to_sql(schema, boolean=True)[0].sql.b) +
SQL_THEN + SQL_EMPTY_STRING +
SQL_ELSE + sql_iso(sql_concat([sep, term_sql])) +
SQL_END
)
else:
acc.append(sql_concat([sep, term_sql]))
expr_ = "substr(" + sql_concat(acc) + ", " + LengthOp(None, self.separator).to_sql(schema)[0].sql.n + "+1)"
missing = self.missing()
if not missing:
return wrap([{"name": ".", "sql": {"s": expr_}}])
else:
return wrap([{
"name": ".",
"sql": {
"s": SQL_CASE+SQL_WHEN+"(" + missing.to_sql(schema, boolean=True)[0].sql.b +
")"+SQL_THEN+"(" + defult +
")"+SQL_ELSE+"(" + expr_ +
")"+SQL_END
}
}])
@extend(UnixOp)
def to_sql(self, schema, not_null=False, boolean=False):
v = self.value.to_sql(schema)[0].sql
return wrap([{
"name": ".",
"sql": {"n": "UNIX_TIMESTAMP" + sql_iso(v.n)}
}])
@extend(FromUnixOp)
def to_sql(self, schema, not_null=False, boolean=False):
v = self.value.to_sql(schema)[0].sql
return wrap([{
"name": ".",
"sql": {"n": "FROM_UNIXTIME" + sql_iso(v.n)}
}])
@extend(LeftOp)
def to_sql(self, schema, not_null=False, boolean=False):
return SqlSubstrOp(
"substr",
[
self.value,
ONE,
self.length
]
).partial_eval().to_sql(schema)
@extend(NotLeftOp)
def to_sql(self, schema, not_null=False, boolean=False):
# test_v = self.value.missing().to_sql(boolean=True)[0].sql.b
# test_l = self.length.missing().to_sql(boolean=True)[0].sql.b
v = self.value.to_sql(schema, not_null=True)[0].sql.s
l = "max(0, " + self.length.to_sql(schema, not_null=True)[0].sql.n + ")"
expr = "substr(" + v + ", " + l + "+1)"
return wrap([{"name": ".", "sql": {"s": expr}}])
@extend(RightOp)
def to_sql(self, schema, not_null=False, boolean=False):
v = self.value.to_sql(schema, not_null=True)[0].sql.s
r = self.length.to_sql(schema, not_null=True)[0].sql.n
l = "max(0, length" + sql_iso(v) + "-max(0, " + r + "))"
expr = "substr(" + v + ", " + l + "+1)"
return wrap([{"name": ".", "sql": {"s": expr}}])
@extend(RightOp)
@simplified
def partial_eval(self):
value = self.value.partial_eval()
length = self.length.partial_eval()
max_length = LengthOp("length", value)
return BasicSubstringOp("substring", [
value,
MaxOp("max", [ZERO, MinOp("min", [max_length, BinaryOp("sub", [max_length, length])])]),
max_length
])
@extend(NotRightOp)
def to_sql(self, schema, not_null=False, boolean=False):
v = self.value.to_sql(schema, not_null=True)[0].sql.s
r = self.length.to_sql(schema, not_null=True)[0].sql.n
l = "max(0, length" + sql_iso(v) + "-max(0, " + r + "))"
expr = "substr" + sql_iso(v + ", 1, " + l)
return wrap([{"name": ".", "sql": {"s": expr}}])
@extend(FindOp)
def to_sql(self, schema, not_null=False, boolean=False):
test = SqlInstrOp("substr", [
SqlSubstrOp("substr", [
self.value,
MultiOp("add", [self.start, ONE]),
NULL
]),
self.find
]).partial_eval()
if boolean:
return test.to_sql(schema)
else:
offset = BinaryOp("sub", [self.start, ONE]).partial_eval()
index = MultiOp("add", [test, offset]).partial_eval()
temp = index.to_sql(schema)
return WhenOp(
"when",
EqOp("eq", [test, ZERO]),
**{
"then": self.default,
"else": index
}
).partial_eval().to_sql(schema)
@extend(FindOp)
@simplified
def partial_eval(self):
return FindOp(
"find",
[
self.value.partial_eval(),
self.find.partial_eval()
],
**{
"start": self.start.partial_eval(),
"default": self.default.partial_eval()
}
)
@extend(BetweenOp)
def to_sql(self, schema, not_null=False, boolean=False):
return self.partial_eval().to_sql(schema)
@extend(InOp)
def to_sql(self, schema, not_null=False, boolean=False):
if not isinstance(self.superset, Literal):
Log.error("Not supported")
j_value = json2value(self.superset.json)
if j_value:
var = self.value.to_sql(schema)
return SQL_OR.join(sql_iso(var + "==" + quote_value(v)) for v in j_value)
else:
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
@extend(RangeOp)
def to_sql(self, schema, not_null=False, boolean=False):
when = self.when.to_sql(schema, boolean=True)[0].sql
then = self.then.to_sql(schema, not_null=not_null)[0].sql
els_ = self.els_.to_sql(schema, not_null=not_null)[0].sql
output = {}
for t in "bsn":
if then[t] == None:
if els_[t] == None:
pass
else:
output[t] = SQL_CASE+SQL_WHEN + when.b + SQL_THEN + SQL_NULL + SQL_ELSE + els_[t] + SQL_END
else:
if els_[t] == None:
output[t] = SQL_CASE+SQL_WHEN + when.b + SQL_THEN + then[t] + SQL_END
else:
output[t] = SQL_CASE+SQL_WHEN + when.b + SQL_THEN + then[t] + SQL_ELSE + els_[t] + SQL_END
if not output:
return wrap([{"name": ".", "sql": {"0": SQL_NULL}}])
else:
return wrap([{"name": ".", "sql": output}])
@extend(CaseOp)
def to_sql(self, schema, not_null=False, boolean=False):
if len(self.whens) == 1:
return self.whens[-1].to_sql(schema)
output = {}
for t in "bsn": # EXPENSIVE LOOP to_sql() RUN 3 TIMES
els_ = coalesce(self.whens[-1].to_sql(schema)[0].sql[t], SQL_NULL)
acc = SQL_ELSE + els_ + SQL_END
for w in reversed(self.whens[0:-1]):
acc = SQL_WHEN + w.when.to_sql(schema, boolean=True)[0].sql.b + SQL_THEN + coalesce(w.then.to_sql(schema)[0].sql[t], SQL_NULL) + acc
output[t] = SQL_CASE + acc
return wrap([{"name": ".", "sql": output}])
@extend(SqlEqOp)
def to_sql(self, schema, not_null=False, boolean=False):
lhs = self.lhs.partial_eval().to_sql(schema)[0].sql.values()[0]
rhs = self.rhs.partial_eval().to_sql(schema)[0].sql.values()[0]
return wrap([{"name": ".", "sql": {
"b": sql_iso(lhs) + "=" + sql_iso(rhs)
}}])
@extend(SqlInstrOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.value.to_sql(schema)[0].sql.s
find = self.find.to_sql(schema)[0].sql.s
return wrap([{"name": ".", "sql": {
"n": "INSTR" + sql_iso(sql_list([value, find]))
}}])
@extend(SqlInstrOp)
@simplified
def partial_eval(self):
value = self.value.partial_eval()
find = self.find.partial_eval()
return SqlInstrOp("instr", [value, find])
@extend(SqlSubstrOp)
def to_sql(self, schema, not_null=False, boolean=False):
value = self.value.to_sql(schema)[0].sql.s
start = self.start.to_sql(schema)[0].sql.n
if self.length is NULL:
return wrap([{"name": ".", "sql": {
"s": "SUBSTR" + sql_iso(sql_list([value, start]))
}}])
else:
length = self.length.to_sql(schema)[0].sql.n
return wrap([{"name": ".", "sql": {
"s": "SUBSTR" + sql_iso(sql_list([value, start, length]))
}}])
@extend(SqlSubstrOp)
@simplified
def partial_eval(self):
value = self.value.partial_eval()
start = self.start.partial_eval()
length = self.length.partial_eval()
if isinstance(start, Literal) and start.value == 1:
if length is NULL:
return value
return SqlSubstrOp("substr", [value, start, length])
json_type_to_sql_type = {
"null": "0",
"boolean": "b",
"number": "n",
"string": "s",
"object": "j",
"nested": "j"
}
sql_type_to_json_type = {
"0": "null",
"b": "boolean",
"n": "number",
"s": "string",
"j": "object"
}
| klahnakoski/JsonSchemaToMarkdown | vendor/jx_sqlite/expressions.py | Python | mpl-2.0 | 30,120 |
"""TxnReconcile allow txn_id to be null
Revision ID: 08b6358a04bf
Revises: 04e61490804b
Create Date: 2018-03-07 19:48:06.050926
"""
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '08b6358a04bf'
down_revision = '04e61490804b'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column(
'txn_reconciles', 'txn_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True
)
def downgrade():
conn = op.get_bind()
conn.execute("SET FOREIGN_KEY_CHECKS=0")
op.alter_column(
'txn_reconciles', 'txn_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False
)
conn.execute("SET FOREIGN_KEY_CHECKS=1")
| jantman/biweeklybudget | biweeklybudget/alembic/versions/08b6358a04bf_txnreconcile_allow_txn_id_to_be_null.py | Python | agpl-3.0 | 765 |
from cl.api import views
from cl.audio import api_views as audio_views
from cl.people_db import api_views as judge_views
from cl.search import api_views as search_views
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
# Search & Audio
router.register(r'dockets', search_views.DocketViewSet)
router.register(r'courts', search_views.CourtViewSet)
router.register(r'audio', audio_views.AudioViewSet)
router.register(r'clusters', search_views.OpinionClusterViewSet)
router.register(r'opinions', search_views.OpinionViewSet)
router.register(r'opinions-cited', search_views.OpinionsCitedViewSet)
router.register(r'search', search_views.SearchViewSet, base_name='search')
# Judges
router.register(r'people', judge_views.PersonViewSet)
router.register(r'positions', judge_views.PositionViewSet)
router.register(r'retention-events', judge_views.RetentionEventViewSet)
router.register(r'educations', judge_views.EducationViewSet)
router.register(r'schools', judge_views.SchoolViewSet)
router.register(r'political-affiliations',
judge_views.PoliticalAffiliationViewSet)
router.register(r'sources', judge_views.SourceViewSet)
router.register(r'aba-ratings', judge_views.ABARatingViewSet)
urlpatterns = [
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/rest/(?P<version>[v3]+)/', include(router.urls)),
# Documentation
url(r'^api/$',
views.api_index,
name='api_index'),
url(r'^api/jurisdictions/$',
views.court_index,
name='court_index'),
url(r'^api/rest-info/(?P<version>v[123])?/?$',
views.rest_docs,
name='rest_docs'),
url(r'^api/bulk-info/$',
views.bulk_data_index,
name='bulk_data_index'),
url(r'^api/rest/v(?P<version>[123])/coverage/(?P<court>.+)/$',
views.coverage_data,
name='coverage_data'),
# Pagerank file
url(r'^api/bulk/external_pagerank/$',
views.serve_pagerank_file,
name='pagerank_file'),
# Deprecation Dates:
# v1: 2016-04-01
# v2: 2016-04-01
url(r'^api/rest/v(?P<v>[12])/.*',
views.deprecated_api,
name='deprecated_api'),
]
| voutilad/courtlistener | cl/api/urls.py | Python | agpl-3.0 | 2,240 |
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from akvo.rsr.forms import (check_password_minimum_length, check_password_has_number,
check_password_has_upper, check_password_has_lower,
check_password_has_symbol)
from akvo.rsr.models import ProjectHierarchy
from .employment import EmploymentSerializer
from .organisation import (
OrganisationExtraSerializer, OrganisationBasicSerializer, UserManagementOrgSerializer)
from .program import ProgramSerializer
from .rsr_serializer import BaseRSRSerializer
class UserRawSerializer(BaseRSRSerializer):
"""
Raw user serializer.
"""
class Meta:
model = get_user_model()
fields = (
'id',
'first_name',
'last_name',
'email',
'is_active',
'is_staff',
'is_admin',
'is_support',
'is_superuser',
)
class UserSerializer(BaseRSRSerializer):
# Needed to show only the first organisation of the user
organisation = OrganisationExtraSerializer(source='first_organisation', required=False,)
organisations = OrganisationExtraSerializer(many=True, required=False,)
user_management_organisations = UserManagementOrgSerializer(many=True, required=False)
approved_employments = EmploymentSerializer(many=True, required=False,)
api_key = serializers.ReadOnlyField(source='get_api_key')
# Legacy fields to support Tastypie API emulation
legacy_org = serializers.SerializerMethodField()
username = serializers.SerializerMethodField()
can_manage_users = serializers.SerializerMethodField()
programs = serializers.SerializerMethodField()
class Meta:
model = get_user_model()
fields = (
'id',
'first_name',
'last_name',
'email',
'username',
'is_active',
'is_staff',
'is_admin',
'is_support',
'is_superuser',
'can_manage_users',
'organisation',
'organisations',
'approved_employments',
'api_key',
'legacy_org',
'programs',
'user_management_organisations',
'seen_announcements',
)
def __init__(self, *args, **kwargs):
""" Delete the 'absolute_url' field added in BaseRSRSerializer.__init__().
It's neither correct nor do we want this data to be visible.
Remove the fields "legacy_org" and "username" that are only present to support older
versions of Up calling the Tastypie API endpoints that we now emulate using DRF
"""
super(UserSerializer, self).__init__(*args, **kwargs)
del self.fields['absolute_url']
# Remove the fields unless we're called via Tastypie URLs
request = kwargs.get("context", {}).get("request", None)
if request and "/api/v1/" not in request.path:
del self.fields['legacy_org']
del self.fields['username']
def get_legacy_org(self, obj):
""" Up needs the last tag to be the user's org, it only needs the org ID
"""
if obj.first_organisation():
return {"object": {"id": obj.first_organisation().id}}
return None
def get_username(self, obj):
return obj.email
def get_can_manage_users(self, obj):
return obj.has_perm('rsr.user_management')
def get_programs(self, user):
hierarchies = ProjectHierarchy.objects.select_related('root_project')\
.prefetch_related('root_project__partners').all()
if not (user.is_superuser or user.is_admin):
hierarchies = hierarchies.filter(root_project__in=user.my_projects()).distinct()
return ProgramSerializer(hierarchies, many=True, context=self.context).data
class UserPasswordSerializer(serializers.Serializer):
"""Change password serializer"""
old_password = serializers.CharField(
help_text='Current Password',
)
new_password1 = serializers.CharField(
help_text='New Password',
)
new_password2 = serializers.CharField(
help_text='New Password (confirmation)',
)
class Meta:
fields = '__all__'
def validate_old_password(self, value):
"""Check for current password"""
if not self.instance.check_password(value):
raise serializers.ValidationError(_('Old password is not correct.'))
return value
def validate(self, data):
"""Check if password1 and password2 match"""
if data['new_password1'] != data['new_password2']:
raise serializers.ValidationError(_('Passwords do not match.'))
password = data['new_password1']
check_password_minimum_length(password)
check_password_has_number(password)
check_password_has_upper(password)
check_password_has_lower(password)
check_password_has_symbol(password)
return data
def update(self, instance, validated_data):
instance.set_password(validated_data.get('new_password2', instance.password))
return instance
class UserDetailsSerializer(BaseRSRSerializer):
approved_organisations = OrganisationBasicSerializer(many=True, required=False)
email = serializers.ReadOnlyField()
class Meta:
model = get_user_model()
fields = (
'id',
'email',
'first_name',
'last_name',
'approved_organisations',
)
def __init__(self, *args, **kwargs):
""" Delete the 'absolute_url' field added in BaseRSRSerializer.__init__().
It's neither correct nor do we want this data to be visible.
"""
super(UserDetailsSerializer, self).__init__(*args, **kwargs)
del self.fields['absolute_url']
| akvo/akvo-rsr | akvo/rest/serializers/user.py | Python | agpl-3.0 | 6,303 |
# -*- coding: utf-8 -*-
# © 2015 Elico corp (www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import base64
import random
import string
from binascii import hexlify, unhexlify
from openerp import api, fields, models
try:
from captcha.image import ImageCaptcha
except ImportError:
pass
try:
from simplecrypt import decrypt, encrypt
except ImportError:
pass
class Website(models.Model):
_inherit = 'website'
captcha = fields.Text('Captcha', compute="_captcha", store=False)
captcha_crypt_challenge = fields.Char(
'Crypt', compute="_captcha", store=False)
captcha_crypt_password = fields.Char(
default=lambda self: self._default_salt(),
required=True, help='''
The secret value used as the basis for a key.
This should be as long as varied as possible.
Try to avoid common words.''')
captcha_length = fields.Selection(
'_captcha_length', default='4', required=True)
captcha_chars = fields.Selection(
'_captcha_chars', default='digits', required=True)
def is_captcha_valid(self, crypt_challenge, response):
challenge = decrypt(
self.captcha_crypt_password, unhexlify(crypt_challenge))
if response.upper() == challenge:
return True
return False
@api.depends('captcha_length', 'captcha_chars')
@api.one
def _captcha(self):
captcha = ImageCaptcha()
captcha_challenge = self._generate_random_str(
self._get_captcha_chars(), int(self.captcha_length))
self.captcha_crypt_challenge = hexlify(
encrypt(self.captcha_crypt_password, captcha_challenge))
out = captcha.generate(captcha_challenge).getvalue()
self.captcha = base64.b64encode(out)
def _generate_random_str(self, chars, size):
return ''.join(random.choice(chars) for _ in range(size))
def _default_salt(self):
return self._generate_random_str(
string.digits + string.letters + string.punctuation, 100)
# generate a random salt
def _captcha_length(self):
return [(str(i), str(i)) for i in range(1, 11)]
def _captcha_chars(self):
return [
('digits', 'Digits only'),
('hexadecimal', 'Hexadecimal'),
('all', 'Letters and Digits')]
def _get_captcha_chars(self):
chars = string.digits
if self.captcha_chars == 'hexadecimal':
# do not use the default string.hexdigits because it contains
# lowercase
chars += 'ABCDEF'
elif self.captcha_chars == 'all':
chars += string.uppercase
return chars
| Elico-Corp/odoo-addons | website_captcha_nogoogle/website.py | Python | agpl-3.0 | 2,704 |
# Generated by Django 2.2.15 on 2020-12-01 13:12
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
("projects", "0008_auto_20190220_1133"),
]
operations = [
migrations.AddField(
model_name="project",
name="amount_invoiced",
field=djmoney.models.fields.MoneyField(
blank=True,
decimal_places=2,
default_currency="CHF",
max_digits=10,
null=True,
),
),
migrations.AddField(
model_name="project",
name="amount_invoiced_currency",
field=djmoney.models.fields.CurrencyField(
choices=[
("XUA", "ADB Unit of Account"),
("AFN", "Afghani"),
("DZD", "Algerian Dinar"),
("ARS", "Argentine Peso"),
("AMD", "Armenian Dram"),
("AWG", "Aruban Guilder"),
("AUD", "Australian Dollar"),
("AZN", "Azerbaijanian Manat"),
("BSD", "Bahamian Dollar"),
("BHD", "Bahraini Dinar"),
("THB", "Baht"),
("PAB", "Balboa"),
("BBD", "Barbados Dollar"),
("BYN", "Belarussian Ruble"),
("BYR", "Belarussian Ruble"),
("BZD", "Belize Dollar"),
("BMD", "Bermudian Dollar (customarily known as Bermuda Dollar)"),
("BTN", "Bhutanese ngultrum"),
("VEF", "Bolivar Fuerte"),
("BOB", "Boliviano"),
("XBA", "Bond Markets Units European Composite Unit (EURCO)"),
("BRL", "Brazilian Real"),
("BND", "Brunei Dollar"),
("BGN", "Bulgarian Lev"),
("BIF", "Burundi Franc"),
("XOF", "CFA Franc BCEAO"),
("XAF", "CFA franc BEAC"),
("XPF", "CFP Franc"),
("CAD", "Canadian Dollar"),
("CVE", "Cape Verde Escudo"),
("KYD", "Cayman Islands Dollar"),
("CLP", "Chilean peso"),
("XTS", "Codes specifically reserved for testing purposes"),
("COP", "Colombian peso"),
("KMF", "Comoro Franc"),
("CDF", "Congolese franc"),
("BAM", "Convertible Marks"),
("NIO", "Cordoba Oro"),
("CRC", "Costa Rican Colon"),
("HRK", "Croatian Kuna"),
("CUP", "Cuban Peso"),
("CUC", "Cuban convertible peso"),
("CZK", "Czech Koruna"),
("GMD", "Dalasi"),
("DKK", "Danish Krone"),
("MKD", "Denar"),
("DJF", "Djibouti Franc"),
("STD", "Dobra"),
("DOP", "Dominican Peso"),
("VND", "Dong"),
("XCD", "East Caribbean Dollar"),
("EGP", "Egyptian Pound"),
("SVC", "El Salvador Colon"),
("ETB", "Ethiopian Birr"),
("EUR", "Euro"),
("XBB", "European Monetary Unit (E.M.U.-6)"),
("XBD", "European Unit of Account 17(E.U.A.-17)"),
("XBC", "European Unit of Account 9(E.U.A.-9)"),
("FKP", "Falkland Islands Pound"),
("FJD", "Fiji Dollar"),
("HUF", "Forint"),
("GHS", "Ghana Cedi"),
("GIP", "Gibraltar Pound"),
("XAU", "Gold"),
("XFO", "Gold-Franc"),
("PYG", "Guarani"),
("GNF", "Guinea Franc"),
("GYD", "Guyana Dollar"),
("HTG", "Haitian gourde"),
("HKD", "Hong Kong Dollar"),
("UAH", "Hryvnia"),
("ISK", "Iceland Krona"),
("INR", "Indian Rupee"),
("IRR", "Iranian Rial"),
("IQD", "Iraqi Dinar"),
("IMP", "Isle of Man Pound"),
("JMD", "Jamaican Dollar"),
("JOD", "Jordanian Dinar"),
("KES", "Kenyan Shilling"),
("PGK", "Kina"),
("LAK", "Kip"),
("KWD", "Kuwaiti Dinar"),
("AOA", "Kwanza"),
("MMK", "Kyat"),
("GEL", "Lari"),
("LVL", "Latvian Lats"),
("LBP", "Lebanese Pound"),
("ALL", "Lek"),
("HNL", "Lempira"),
("SLL", "Leone"),
("LSL", "Lesotho loti"),
("LRD", "Liberian Dollar"),
("LYD", "Libyan Dinar"),
("SZL", "Lilangeni"),
("LTL", "Lithuanian Litas"),
("MGA", "Malagasy Ariary"),
("MWK", "Malawian Kwacha"),
("MYR", "Malaysian Ringgit"),
("TMM", "Manat"),
("MUR", "Mauritius Rupee"),
("MZN", "Metical"),
("MXV", "Mexican Unidad de Inversion (UDI)"),
("MXN", "Mexican peso"),
("MDL", "Moldovan Leu"),
("MAD", "Moroccan Dirham"),
("BOV", "Mvdol"),
("NGN", "Naira"),
("ERN", "Nakfa"),
("NAD", "Namibian Dollar"),
("NPR", "Nepalese Rupee"),
("ANG", "Netherlands Antillian Guilder"),
("ILS", "New Israeli Sheqel"),
("RON", "New Leu"),
("TWD", "New Taiwan Dollar"),
("NZD", "New Zealand Dollar"),
("KPW", "North Korean Won"),
("NOK", "Norwegian Krone"),
("PEN", "Nuevo Sol"),
("MRO", "Ouguiya"),
("TOP", "Paanga"),
("PKR", "Pakistan Rupee"),
("XPD", "Palladium"),
("MOP", "Pataca"),
("PHP", "Philippine Peso"),
("XPT", "Platinum"),
("GBP", "Pound Sterling"),
("BWP", "Pula"),
("QAR", "Qatari Rial"),
("GTQ", "Quetzal"),
("ZAR", "Rand"),
("OMR", "Rial Omani"),
("KHR", "Riel"),
("MVR", "Rufiyaa"),
("IDR", "Rupiah"),
("RUB", "Russian Ruble"),
("RWF", "Rwanda Franc"),
("XDR", "SDR"),
("SHP", "Saint Helena Pound"),
("SAR", "Saudi Riyal"),
("RSD", "Serbian Dinar"),
("SCR", "Seychelles Rupee"),
("XAG", "Silver"),
("SGD", "Singapore Dollar"),
("SBD", "Solomon Islands Dollar"),
("KGS", "Som"),
("SOS", "Somali Shilling"),
("TJS", "Somoni"),
("SSP", "South Sudanese Pound"),
("LKR", "Sri Lanka Rupee"),
("XSU", "Sucre"),
("SDG", "Sudanese Pound"),
("SRD", "Surinam Dollar"),
("SEK", "Swedish Krona"),
("CHF", "Swiss Franc"),
("SYP", "Syrian Pound"),
("BDT", "Taka"),
("WST", "Tala"),
("TZS", "Tanzanian Shilling"),
("KZT", "Tenge"),
(
"XXX",
"The codes assigned for transactions where no currency is involved",
),
("TTD", "Trinidad and Tobago Dollar"),
("MNT", "Tugrik"),
("TND", "Tunisian Dinar"),
("TRY", "Turkish Lira"),
("TMT", "Turkmenistan New Manat"),
("TVD", "Tuvalu dollar"),
("AED", "UAE Dirham"),
("XFU", "UIC-Franc"),
("USD", "US Dollar"),
("USN", "US Dollar (Next day)"),
("UGX", "Uganda Shilling"),
("CLF", "Unidad de Fomento"),
("COU", "Unidad de Valor Real"),
("UYI", "Uruguay Peso en Unidades Indexadas (URUIURUI)"),
("UYU", "Uruguayan peso"),
("UZS", "Uzbekistan Sum"),
("VUV", "Vatu"),
("CHE", "WIR Euro"),
("CHW", "WIR Franc"),
("KRW", "Won"),
("YER", "Yemeni Rial"),
("JPY", "Yen"),
("CNY", "Yuan Renminbi"),
("ZMK", "Zambian Kwacha"),
("ZMW", "Zambian Kwacha"),
("ZWD", "Zimbabwe Dollar A/06"),
("ZWN", "Zimbabwe dollar A/08"),
("ZWL", "Zimbabwe dollar A/09"),
("PLN", "Zloty"),
],
default="CHF",
editable=False,
max_length=3,
),
),
migrations.AddField(
model_name="project",
name="amount_offered",
field=djmoney.models.fields.MoneyField(
blank=True,
decimal_places=2,
default_currency="CHF",
max_digits=10,
null=True,
),
),
migrations.AddField(
model_name="project",
name="amount_offered_currency",
field=djmoney.models.fields.CurrencyField(
choices=[
("XUA", "ADB Unit of Account"),
("AFN", "Afghani"),
("DZD", "Algerian Dinar"),
("ARS", "Argentine Peso"),
("AMD", "Armenian Dram"),
("AWG", "Aruban Guilder"),
("AUD", "Australian Dollar"),
("AZN", "Azerbaijanian Manat"),
("BSD", "Bahamian Dollar"),
("BHD", "Bahraini Dinar"),
("THB", "Baht"),
("PAB", "Balboa"),
("BBD", "Barbados Dollar"),
("BYN", "Belarussian Ruble"),
("BYR", "Belarussian Ruble"),
("BZD", "Belize Dollar"),
("BMD", "Bermudian Dollar (customarily known as Bermuda Dollar)"),
("BTN", "Bhutanese ngultrum"),
("VEF", "Bolivar Fuerte"),
("BOB", "Boliviano"),
("XBA", "Bond Markets Units European Composite Unit (EURCO)"),
("BRL", "Brazilian Real"),
("BND", "Brunei Dollar"),
("BGN", "Bulgarian Lev"),
("BIF", "Burundi Franc"),
("XOF", "CFA Franc BCEAO"),
("XAF", "CFA franc BEAC"),
("XPF", "CFP Franc"),
("CAD", "Canadian Dollar"),
("CVE", "Cape Verde Escudo"),
("KYD", "Cayman Islands Dollar"),
("CLP", "Chilean peso"),
("XTS", "Codes specifically reserved for testing purposes"),
("COP", "Colombian peso"),
("KMF", "Comoro Franc"),
("CDF", "Congolese franc"),
("BAM", "Convertible Marks"),
("NIO", "Cordoba Oro"),
("CRC", "Costa Rican Colon"),
("HRK", "Croatian Kuna"),
("CUP", "Cuban Peso"),
("CUC", "Cuban convertible peso"),
("CZK", "Czech Koruna"),
("GMD", "Dalasi"),
("DKK", "Danish Krone"),
("MKD", "Denar"),
("DJF", "Djibouti Franc"),
("STD", "Dobra"),
("DOP", "Dominican Peso"),
("VND", "Dong"),
("XCD", "East Caribbean Dollar"),
("EGP", "Egyptian Pound"),
("SVC", "El Salvador Colon"),
("ETB", "Ethiopian Birr"),
("EUR", "Euro"),
("XBB", "European Monetary Unit (E.M.U.-6)"),
("XBD", "European Unit of Account 17(E.U.A.-17)"),
("XBC", "European Unit of Account 9(E.U.A.-9)"),
("FKP", "Falkland Islands Pound"),
("FJD", "Fiji Dollar"),
("HUF", "Forint"),
("GHS", "Ghana Cedi"),
("GIP", "Gibraltar Pound"),
("XAU", "Gold"),
("XFO", "Gold-Franc"),
("PYG", "Guarani"),
("GNF", "Guinea Franc"),
("GYD", "Guyana Dollar"),
("HTG", "Haitian gourde"),
("HKD", "Hong Kong Dollar"),
("UAH", "Hryvnia"),
("ISK", "Iceland Krona"),
("INR", "Indian Rupee"),
("IRR", "Iranian Rial"),
("IQD", "Iraqi Dinar"),
("IMP", "Isle of Man Pound"),
("JMD", "Jamaican Dollar"),
("JOD", "Jordanian Dinar"),
("KES", "Kenyan Shilling"),
("PGK", "Kina"),
("LAK", "Kip"),
("KWD", "Kuwaiti Dinar"),
("AOA", "Kwanza"),
("MMK", "Kyat"),
("GEL", "Lari"),
("LVL", "Latvian Lats"),
("LBP", "Lebanese Pound"),
("ALL", "Lek"),
("HNL", "Lempira"),
("SLL", "Leone"),
("LSL", "Lesotho loti"),
("LRD", "Liberian Dollar"),
("LYD", "Libyan Dinar"),
("SZL", "Lilangeni"),
("LTL", "Lithuanian Litas"),
("MGA", "Malagasy Ariary"),
("MWK", "Malawian Kwacha"),
("MYR", "Malaysian Ringgit"),
("TMM", "Manat"),
("MUR", "Mauritius Rupee"),
("MZN", "Metical"),
("MXV", "Mexican Unidad de Inversion (UDI)"),
("MXN", "Mexican peso"),
("MDL", "Moldovan Leu"),
("MAD", "Moroccan Dirham"),
("BOV", "Mvdol"),
("NGN", "Naira"),
("ERN", "Nakfa"),
("NAD", "Namibian Dollar"),
("NPR", "Nepalese Rupee"),
("ANG", "Netherlands Antillian Guilder"),
("ILS", "New Israeli Sheqel"),
("RON", "New Leu"),
("TWD", "New Taiwan Dollar"),
("NZD", "New Zealand Dollar"),
("KPW", "North Korean Won"),
("NOK", "Norwegian Krone"),
("PEN", "Nuevo Sol"),
("MRO", "Ouguiya"),
("TOP", "Paanga"),
("PKR", "Pakistan Rupee"),
("XPD", "Palladium"),
("MOP", "Pataca"),
("PHP", "Philippine Peso"),
("XPT", "Platinum"),
("GBP", "Pound Sterling"),
("BWP", "Pula"),
("QAR", "Qatari Rial"),
("GTQ", "Quetzal"),
("ZAR", "Rand"),
("OMR", "Rial Omani"),
("KHR", "Riel"),
("MVR", "Rufiyaa"),
("IDR", "Rupiah"),
("RUB", "Russian Ruble"),
("RWF", "Rwanda Franc"),
("XDR", "SDR"),
("SHP", "Saint Helena Pound"),
("SAR", "Saudi Riyal"),
("RSD", "Serbian Dinar"),
("SCR", "Seychelles Rupee"),
("XAG", "Silver"),
("SGD", "Singapore Dollar"),
("SBD", "Solomon Islands Dollar"),
("KGS", "Som"),
("SOS", "Somali Shilling"),
("TJS", "Somoni"),
("SSP", "South Sudanese Pound"),
("LKR", "Sri Lanka Rupee"),
("XSU", "Sucre"),
("SDG", "Sudanese Pound"),
("SRD", "Surinam Dollar"),
("SEK", "Swedish Krona"),
("CHF", "Swiss Franc"),
("SYP", "Syrian Pound"),
("BDT", "Taka"),
("WST", "Tala"),
("TZS", "Tanzanian Shilling"),
("KZT", "Tenge"),
(
"XXX",
"The codes assigned for transactions where no currency is involved",
),
("TTD", "Trinidad and Tobago Dollar"),
("MNT", "Tugrik"),
("TND", "Tunisian Dinar"),
("TRY", "Turkish Lira"),
("TMT", "Turkmenistan New Manat"),
("TVD", "Tuvalu dollar"),
("AED", "UAE Dirham"),
("XFU", "UIC-Franc"),
("USD", "US Dollar"),
("USN", "US Dollar (Next day)"),
("UGX", "Uganda Shilling"),
("CLF", "Unidad de Fomento"),
("COU", "Unidad de Valor Real"),
("UYI", "Uruguay Peso en Unidades Indexadas (URUIURUI)"),
("UYU", "Uruguayan peso"),
("UZS", "Uzbekistan Sum"),
("VUV", "Vatu"),
("CHE", "WIR Euro"),
("CHW", "WIR Franc"),
("KRW", "Won"),
("YER", "Yemeni Rial"),
("JPY", "Yen"),
("CNY", "Yuan Renminbi"),
("ZMK", "Zambian Kwacha"),
("ZMW", "Zambian Kwacha"),
("ZWD", "Zimbabwe Dollar A/06"),
("ZWN", "Zimbabwe dollar A/08"),
("ZWL", "Zimbabwe dollar A/09"),
("PLN", "Zloty"),
],
default="CHF",
editable=False,
max_length=3,
),
),
migrations.AddField(
model_name="task",
name="amount_invoiced",
field=djmoney.models.fields.MoneyField(
blank=True,
decimal_places=2,
default_currency="CHF",
max_digits=10,
null=True,
),
),
migrations.AddField(
model_name="task",
name="amount_invoiced_currency",
field=djmoney.models.fields.CurrencyField(
choices=[
("XUA", "ADB Unit of Account"),
("AFN", "Afghani"),
("DZD", "Algerian Dinar"),
("ARS", "Argentine Peso"),
("AMD", "Armenian Dram"),
("AWG", "Aruban Guilder"),
("AUD", "Australian Dollar"),
("AZN", "Azerbaijanian Manat"),
("BSD", "Bahamian Dollar"),
("BHD", "Bahraini Dinar"),
("THB", "Baht"),
("PAB", "Balboa"),
("BBD", "Barbados Dollar"),
("BYN", "Belarussian Ruble"),
("BYR", "Belarussian Ruble"),
("BZD", "Belize Dollar"),
("BMD", "Bermudian Dollar (customarily known as Bermuda Dollar)"),
("BTN", "Bhutanese ngultrum"),
("VEF", "Bolivar Fuerte"),
("BOB", "Boliviano"),
("XBA", "Bond Markets Units European Composite Unit (EURCO)"),
("BRL", "Brazilian Real"),
("BND", "Brunei Dollar"),
("BGN", "Bulgarian Lev"),
("BIF", "Burundi Franc"),
("XOF", "CFA Franc BCEAO"),
("XAF", "CFA franc BEAC"),
("XPF", "CFP Franc"),
("CAD", "Canadian Dollar"),
("CVE", "Cape Verde Escudo"),
("KYD", "Cayman Islands Dollar"),
("CLP", "Chilean peso"),
("XTS", "Codes specifically reserved for testing purposes"),
("COP", "Colombian peso"),
("KMF", "Comoro Franc"),
("CDF", "Congolese franc"),
("BAM", "Convertible Marks"),
("NIO", "Cordoba Oro"),
("CRC", "Costa Rican Colon"),
("HRK", "Croatian Kuna"),
("CUP", "Cuban Peso"),
("CUC", "Cuban convertible peso"),
("CZK", "Czech Koruna"),
("GMD", "Dalasi"),
("DKK", "Danish Krone"),
("MKD", "Denar"),
("DJF", "Djibouti Franc"),
("STD", "Dobra"),
("DOP", "Dominican Peso"),
("VND", "Dong"),
("XCD", "East Caribbean Dollar"),
("EGP", "Egyptian Pound"),
("SVC", "El Salvador Colon"),
("ETB", "Ethiopian Birr"),
("EUR", "Euro"),
("XBB", "European Monetary Unit (E.M.U.-6)"),
("XBD", "European Unit of Account 17(E.U.A.-17)"),
("XBC", "European Unit of Account 9(E.U.A.-9)"),
("FKP", "Falkland Islands Pound"),
("FJD", "Fiji Dollar"),
("HUF", "Forint"),
("GHS", "Ghana Cedi"),
("GIP", "Gibraltar Pound"),
("XAU", "Gold"),
("XFO", "Gold-Franc"),
("PYG", "Guarani"),
("GNF", "Guinea Franc"),
("GYD", "Guyana Dollar"),
("HTG", "Haitian gourde"),
("HKD", "Hong Kong Dollar"),
("UAH", "Hryvnia"),
("ISK", "Iceland Krona"),
("INR", "Indian Rupee"),
("IRR", "Iranian Rial"),
("IQD", "Iraqi Dinar"),
("IMP", "Isle of Man Pound"),
("JMD", "Jamaican Dollar"),
("JOD", "Jordanian Dinar"),
("KES", "Kenyan Shilling"),
("PGK", "Kina"),
("LAK", "Kip"),
("KWD", "Kuwaiti Dinar"),
("AOA", "Kwanza"),
("MMK", "Kyat"),
("GEL", "Lari"),
("LVL", "Latvian Lats"),
("LBP", "Lebanese Pound"),
("ALL", "Lek"),
("HNL", "Lempira"),
("SLL", "Leone"),
("LSL", "Lesotho loti"),
("LRD", "Liberian Dollar"),
("LYD", "Libyan Dinar"),
("SZL", "Lilangeni"),
("LTL", "Lithuanian Litas"),
("MGA", "Malagasy Ariary"),
("MWK", "Malawian Kwacha"),
("MYR", "Malaysian Ringgit"),
("TMM", "Manat"),
("MUR", "Mauritius Rupee"),
("MZN", "Metical"),
("MXV", "Mexican Unidad de Inversion (UDI)"),
("MXN", "Mexican peso"),
("MDL", "Moldovan Leu"),
("MAD", "Moroccan Dirham"),
("BOV", "Mvdol"),
("NGN", "Naira"),
("ERN", "Nakfa"),
("NAD", "Namibian Dollar"),
("NPR", "Nepalese Rupee"),
("ANG", "Netherlands Antillian Guilder"),
("ILS", "New Israeli Sheqel"),
("RON", "New Leu"),
("TWD", "New Taiwan Dollar"),
("NZD", "New Zealand Dollar"),
("KPW", "North Korean Won"),
("NOK", "Norwegian Krone"),
("PEN", "Nuevo Sol"),
("MRO", "Ouguiya"),
("TOP", "Paanga"),
("PKR", "Pakistan Rupee"),
("XPD", "Palladium"),
("MOP", "Pataca"),
("PHP", "Philippine Peso"),
("XPT", "Platinum"),
("GBP", "Pound Sterling"),
("BWP", "Pula"),
("QAR", "Qatari Rial"),
("GTQ", "Quetzal"),
("ZAR", "Rand"),
("OMR", "Rial Omani"),
("KHR", "Riel"),
("MVR", "Rufiyaa"),
("IDR", "Rupiah"),
("RUB", "Russian Ruble"),
("RWF", "Rwanda Franc"),
("XDR", "SDR"),
("SHP", "Saint Helena Pound"),
("SAR", "Saudi Riyal"),
("RSD", "Serbian Dinar"),
("SCR", "Seychelles Rupee"),
("XAG", "Silver"),
("SGD", "Singapore Dollar"),
("SBD", "Solomon Islands Dollar"),
("KGS", "Som"),
("SOS", "Somali Shilling"),
("TJS", "Somoni"),
("SSP", "South Sudanese Pound"),
("LKR", "Sri Lanka Rupee"),
("XSU", "Sucre"),
("SDG", "Sudanese Pound"),
("SRD", "Surinam Dollar"),
("SEK", "Swedish Krona"),
("CHF", "Swiss Franc"),
("SYP", "Syrian Pound"),
("BDT", "Taka"),
("WST", "Tala"),
("TZS", "Tanzanian Shilling"),
("KZT", "Tenge"),
(
"XXX",
"The codes assigned for transactions where no currency is involved",
),
("TTD", "Trinidad and Tobago Dollar"),
("MNT", "Tugrik"),
("TND", "Tunisian Dinar"),
("TRY", "Turkish Lira"),
("TMT", "Turkmenistan New Manat"),
("TVD", "Tuvalu dollar"),
("AED", "UAE Dirham"),
("XFU", "UIC-Franc"),
("USD", "US Dollar"),
("USN", "US Dollar (Next day)"),
("UGX", "Uganda Shilling"),
("CLF", "Unidad de Fomento"),
("COU", "Unidad de Valor Real"),
("UYI", "Uruguay Peso en Unidades Indexadas (URUIURUI)"),
("UYU", "Uruguayan peso"),
("UZS", "Uzbekistan Sum"),
("VUV", "Vatu"),
("CHE", "WIR Euro"),
("CHW", "WIR Franc"),
("KRW", "Won"),
("YER", "Yemeni Rial"),
("JPY", "Yen"),
("CNY", "Yuan Renminbi"),
("ZMK", "Zambian Kwacha"),
("ZMW", "Zambian Kwacha"),
("ZWD", "Zimbabwe Dollar A/06"),
("ZWN", "Zimbabwe dollar A/08"),
("ZWL", "Zimbabwe dollar A/09"),
("PLN", "Zloty"),
],
default="CHF",
editable=False,
max_length=3,
),
),
migrations.AddField(
model_name="task",
name="amount_offered",
field=djmoney.models.fields.MoneyField(
blank=True,
decimal_places=2,
default_currency="CHF",
max_digits=10,
null=True,
),
),
migrations.AddField(
model_name="task",
name="amount_offered_currency",
field=djmoney.models.fields.CurrencyField(
choices=[
("XUA", "ADB Unit of Account"),
("AFN", "Afghani"),
("DZD", "Algerian Dinar"),
("ARS", "Argentine Peso"),
("AMD", "Armenian Dram"),
("AWG", "Aruban Guilder"),
("AUD", "Australian Dollar"),
("AZN", "Azerbaijanian Manat"),
("BSD", "Bahamian Dollar"),
("BHD", "Bahraini Dinar"),
("THB", "Baht"),
("PAB", "Balboa"),
("BBD", "Barbados Dollar"),
("BYN", "Belarussian Ruble"),
("BYR", "Belarussian Ruble"),
("BZD", "Belize Dollar"),
("BMD", "Bermudian Dollar (customarily known as Bermuda Dollar)"),
("BTN", "Bhutanese ngultrum"),
("VEF", "Bolivar Fuerte"),
("BOB", "Boliviano"),
("XBA", "Bond Markets Units European Composite Unit (EURCO)"),
("BRL", "Brazilian Real"),
("BND", "Brunei Dollar"),
("BGN", "Bulgarian Lev"),
("BIF", "Burundi Franc"),
("XOF", "CFA Franc BCEAO"),
("XAF", "CFA franc BEAC"),
("XPF", "CFP Franc"),
("CAD", "Canadian Dollar"),
("CVE", "Cape Verde Escudo"),
("KYD", "Cayman Islands Dollar"),
("CLP", "Chilean peso"),
("XTS", "Codes specifically reserved for testing purposes"),
("COP", "Colombian peso"),
("KMF", "Comoro Franc"),
("CDF", "Congolese franc"),
("BAM", "Convertible Marks"),
("NIO", "Cordoba Oro"),
("CRC", "Costa Rican Colon"),
("HRK", "Croatian Kuna"),
("CUP", "Cuban Peso"),
("CUC", "Cuban convertible peso"),
("CZK", "Czech Koruna"),
("GMD", "Dalasi"),
("DKK", "Danish Krone"),
("MKD", "Denar"),
("DJF", "Djibouti Franc"),
("STD", "Dobra"),
("DOP", "Dominican Peso"),
("VND", "Dong"),
("XCD", "East Caribbean Dollar"),
("EGP", "Egyptian Pound"),
("SVC", "El Salvador Colon"),
("ETB", "Ethiopian Birr"),
("EUR", "Euro"),
("XBB", "European Monetary Unit (E.M.U.-6)"),
("XBD", "European Unit of Account 17(E.U.A.-17)"),
("XBC", "European Unit of Account 9(E.U.A.-9)"),
("FKP", "Falkland Islands Pound"),
("FJD", "Fiji Dollar"),
("HUF", "Forint"),
("GHS", "Ghana Cedi"),
("GIP", "Gibraltar Pound"),
("XAU", "Gold"),
("XFO", "Gold-Franc"),
("PYG", "Guarani"),
("GNF", "Guinea Franc"),
("GYD", "Guyana Dollar"),
("HTG", "Haitian gourde"),
("HKD", "Hong Kong Dollar"),
("UAH", "Hryvnia"),
("ISK", "Iceland Krona"),
("INR", "Indian Rupee"),
("IRR", "Iranian Rial"),
("IQD", "Iraqi Dinar"),
("IMP", "Isle of Man Pound"),
("JMD", "Jamaican Dollar"),
("JOD", "Jordanian Dinar"),
("KES", "Kenyan Shilling"),
("PGK", "Kina"),
("LAK", "Kip"),
("KWD", "Kuwaiti Dinar"),
("AOA", "Kwanza"),
("MMK", "Kyat"),
("GEL", "Lari"),
("LVL", "Latvian Lats"),
("LBP", "Lebanese Pound"),
("ALL", "Lek"),
("HNL", "Lempira"),
("SLL", "Leone"),
("LSL", "Lesotho loti"),
("LRD", "Liberian Dollar"),
("LYD", "Libyan Dinar"),
("SZL", "Lilangeni"),
("LTL", "Lithuanian Litas"),
("MGA", "Malagasy Ariary"),
("MWK", "Malawian Kwacha"),
("MYR", "Malaysian Ringgit"),
("TMM", "Manat"),
("MUR", "Mauritius Rupee"),
("MZN", "Metical"),
("MXV", "Mexican Unidad de Inversion (UDI)"),
("MXN", "Mexican peso"),
("MDL", "Moldovan Leu"),
("MAD", "Moroccan Dirham"),
("BOV", "Mvdol"),
("NGN", "Naira"),
("ERN", "Nakfa"),
("NAD", "Namibian Dollar"),
("NPR", "Nepalese Rupee"),
("ANG", "Netherlands Antillian Guilder"),
("ILS", "New Israeli Sheqel"),
("RON", "New Leu"),
("TWD", "New Taiwan Dollar"),
("NZD", "New Zealand Dollar"),
("KPW", "North Korean Won"),
("NOK", "Norwegian Krone"),
("PEN", "Nuevo Sol"),
("MRO", "Ouguiya"),
("TOP", "Paanga"),
("PKR", "Pakistan Rupee"),
("XPD", "Palladium"),
("MOP", "Pataca"),
("PHP", "Philippine Peso"),
("XPT", "Platinum"),
("GBP", "Pound Sterling"),
("BWP", "Pula"),
("QAR", "Qatari Rial"),
("GTQ", "Quetzal"),
("ZAR", "Rand"),
("OMR", "Rial Omani"),
("KHR", "Riel"),
("MVR", "Rufiyaa"),
("IDR", "Rupiah"),
("RUB", "Russian Ruble"),
("RWF", "Rwanda Franc"),
("XDR", "SDR"),
("SHP", "Saint Helena Pound"),
("SAR", "Saudi Riyal"),
("RSD", "Serbian Dinar"),
("SCR", "Seychelles Rupee"),
("XAG", "Silver"),
("SGD", "Singapore Dollar"),
("SBD", "Solomon Islands Dollar"),
("KGS", "Som"),
("SOS", "Somali Shilling"),
("TJS", "Somoni"),
("SSP", "South Sudanese Pound"),
("LKR", "Sri Lanka Rupee"),
("XSU", "Sucre"),
("SDG", "Sudanese Pound"),
("SRD", "Surinam Dollar"),
("SEK", "Swedish Krona"),
("CHF", "Swiss Franc"),
("SYP", "Syrian Pound"),
("BDT", "Taka"),
("WST", "Tala"),
("TZS", "Tanzanian Shilling"),
("KZT", "Tenge"),
(
"XXX",
"The codes assigned for transactions where no currency is involved",
),
("TTD", "Trinidad and Tobago Dollar"),
("MNT", "Tugrik"),
("TND", "Tunisian Dinar"),
("TRY", "Turkish Lira"),
("TMT", "Turkmenistan New Manat"),
("TVD", "Tuvalu dollar"),
("AED", "UAE Dirham"),
("XFU", "UIC-Franc"),
("USD", "US Dollar"),
("USN", "US Dollar (Next day)"),
("UGX", "Uganda Shilling"),
("CLF", "Unidad de Fomento"),
("COU", "Unidad de Valor Real"),
("UYI", "Uruguay Peso en Unidades Indexadas (URUIURUI)"),
("UYU", "Uruguayan peso"),
("UZS", "Uzbekistan Sum"),
("VUV", "Vatu"),
("CHE", "WIR Euro"),
("CHW", "WIR Franc"),
("KRW", "Won"),
("YER", "Yemeni Rial"),
("JPY", "Yen"),
("CNY", "Yuan Renminbi"),
("ZMK", "Zambian Kwacha"),
("ZMW", "Zambian Kwacha"),
("ZWD", "Zimbabwe Dollar A/06"),
("ZWN", "Zimbabwe dollar A/08"),
("ZWL", "Zimbabwe dollar A/09"),
("PLN", "Zloty"),
],
default="CHF",
editable=False,
max_length=3,
),
),
]
| adfinis-sygroup/timed-backend | timed/projects/migrations/0009_auto_20201201_1412.py | Python | agpl-3.0 | 38,360 |
from django.views.decorators.cache import never_cache
from django.views.generic.base import RedirectView
from C4CApplication.views.utils import create_user
class MemberDetailsRedirectView(RedirectView):
url = ""
connected_member = None
def dispatch(self, request, *args, **kwargs):
# Create the object representing the user
if 'email' not in self.request.session:
raise PermissionDenied # HTTP 403
self.connected_member = create_user(self.request.session['email'])
return super(MemberDetailsRedirectView, self).dispatch(request, *args, **kwargs)
@never_cache
def get(self, request, *args, **kwargs):
member_to_ad_as_a_friend_mail = kwargs['pk']
self.url = "/memberdetails/"+str(member_to_ad_as_a_friend_mail)
self.connected_member.add_favorite( member_to_ad_as_a_friend_mail)
return super(MemberDetailsRedirectView, self).get(request, *args, **kwargs) | dsarkozi/care4care-sdp-grp4 | Care4Care/C4CApplication/views/MemberDetailsRedirectView.py | Python | agpl-3.0 | 1,019 |
"""
Specific overrides to the base prod settings to make development easier.
"""
# Silence noisy logs
import logging
from os.path import abspath, dirname, join
from corsheaders.defaults import default_headers as corsheaders_default_headers
# pylint: enable=unicode-format-string # lint-amnesty, pylint: disable=bad-option-value
#####################################################################
from edx_django_utils.plugins import add_plugins
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
from .production import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
ORA2_FILEUPLOAD_BACKEND = 'django'
DEBUG = True
USE_I18N = True
DEFAULT_TEMPLATE_ENGINE['OPTIONS']['debug'] = True
LMS_BASE = 'localhost:18000'
CMS_BASE = 'localhost:18010'
SITE_NAME = LMS_BASE
SESSION_COOKIE_NAME = 'lms_sessionid'
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
HTTPS = 'off'
LMS_ROOT_URL = f'http://{LMS_BASE}'
LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL
ENTERPRISE_API_URL = f'{LMS_INTERNAL_ROOT_URL}/enterprise/api/v1/'
IDA_LOGOUT_URI_LIST = [
'http://localhost:18130/logout/', # ecommerce
'http://localhost:18150/logout/', # credentials
'http://localhost:18381/logout/', # discovery
'http://localhost:18010/logout/', # studio
]
################################ LOGGERS ######################################
LOG_OVERRIDES = [
('common.djangoapps.track.contexts', logging.CRITICAL),
('common.djangoapps.track.middleware', logging.CRITICAL),
('lms.djangoapps.discussion.django_comment_client.utils', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Docker does not support the syslog socket at /dev/log. Rely on the console.
LOGGING['handlers']['local'] = LOGGING['handlers']['tracking'] = {
'class': 'logging.NullHandler',
}
LOGGING['loggers']['tracking']['handlers'] = ['console']
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/edx/src/ace_messages/'
############################ PYFS XBLOCKS SERVICE #############################
# Set configuration for Django pyfilesystem
DJFS = {
'type': 'osfs',
'directory_root': 'lms/static/djpyfs',
'url_root': '/static/djpyfs',
}
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE += [
'lms.djangoapps.discussion.django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.history.HistoryPanel',
# ProfilingPanel has been intentionally removed for default devstack.py
# runtimes for performance reasons. If you wish to re-enable it in your
# local development environment, please create a new settings file
# that imports and extends devstack.py.
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar',
}
def should_show_debug_toolbar(request): # lint-amnesty, pylint: disable=missing-function-docstring
# We always want the toolbar on devstack unless running tests from another Docker container
hostname = request.get_host()
if hostname.startswith('edx.devstack.lms:') or hostname.startswith('lms.devstack.edx:'):
return False
return True
########################### PIPELINE #################################
PIPELINE['PIPELINE_ENABLED'] = False
STATICFILES_STORAGE = 'openedx.core.storage.DevelopmentStorage'
# Revert to the default set of finders as we don't want the production pipeline
STATICFILES_FINDERS = [
'openedx.core.djangoapps.theming.finders.ThemeFilesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Disable JavaScript compression in development
PIPELINE['JS_COMPRESSOR'] = None
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = DEBUG
PIPELINE['SASS_ARGUMENTS'] = '--debug-info'
# Load development webpack donfiguration
WEBPACK_CONFIG_PATH = 'webpack.dev.config.js'
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
FEATURES['ENABLE_COURSEWARE_SEARCH_FOR_COURSE_STAFF'] = True
SEARCH_ENGINE = 'search.elastic.ElasticSearchEngine'
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = False
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
LANGUAGE_MAP = {
'terms': dict(ALL_LANGUAGES),
'name': 'Language',
}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': 'Organization',
},
'modes': {
'name': 'Course Type',
'terms': {
'honor': 'Honor',
'verified': 'Verified',
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = False
# Setting for overriding default filtering facets for Course discovery
# COURSE_DISCOVERY_FILTERS = ["org", "language", "modes"]
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH = "verify_student_disable_account_activation_requirement"
# Skip enrollment start date filtering
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True
########################## Shopping cart ##########################
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
######################### Program Enrollments #####################
FEATURES['ENABLE_ENROLLMENT_RESET'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and (
'common.djangoapps.third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS
):
AUTHENTICATION_BACKENDS = ['common.djangoapps.third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
############## ECOMMERCE API CONFIGURATION SETTINGS ###############
ECOMMERCE_PUBLIC_URL_ROOT = 'http://localhost:18130'
ECOMMERCE_API_URL = 'http://edx.devstack.ecommerce:18130/api/v2'
############## Comments CONFIGURATION SETTINGS ###############
COMMENTS_SERVICE_URL = 'http://edx.devstack.forum:4567'
############## Credentials CONFIGURATION SETTINGS ###############
CREDENTIALS_INTERNAL_SERVICE_URL = 'http://edx.devstack.credentials:18150'
CREDENTIALS_PUBLIC_SERVICE_URL = 'http://localhost:18150'
############################### BLOCKSTORE #####################################
BLOCKSTORE_API_URL = "http://edx.devstack.blockstore:18250/api/v1/"
########################## PROGRAMS LEARNER PORTAL ##############################
LEARNER_PORTAL_URL_ROOT = 'http://localhost:8734'
########################## ENTERPRISE LEARNER PORTAL ##############################
ENTERPRISE_LEARNER_PORTAL_NETLOC = 'localhost:8734'
ENTERPRISE_LEARNER_PORTAL_BASE_URL = 'http://' + ENTERPRISE_LEARNER_PORTAL_NETLOC
########################## ENTERPRISE ADMIN PORTAL ##############################
ENTERPRISE_ADMIN_PORTAL_NETLOC = 'localhost:1991'
ENTERPRISE_ADMIN_PORTAL_BASE_URL = 'http://' + ENTERPRISE_ADMIN_PORTAL_NETLOC
###################### Cross-domain requests ######################
FEATURES['ENABLE_CORS_HEADERS'] = True
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = corsheaders_default_headers + (
'use-jwt-cookie',
)
LOGIN_REDIRECT_WHITELIST.extend([
CMS_BASE,
# Allow redirection to all micro-frontends.
# Please add your MFE if is not already listed here.
# Note: For this to work, the MFE must set BASE_URL in its .env.development to:
# BASE_URL=http://localhost:$PORT
# as opposed to:
# BASE_URL=localhost:$PORT
'localhost:1997', # frontend-app-account
'localhost:1976', # frontend-app-program-console
'localhost:1994', # frontend-app-gradebook
'localhost:2000', # frontend-app-learning
'localhost:2001', # frontend-app-course-authoring
'localhost:3001', # frontend-app-library-authoring
'localhost:18400', # frontend-app-publisher
'localhost:1993', # frontend-app-ora-grading
ENTERPRISE_LEARNER_PORTAL_NETLOC, # frontend-app-learner-portal-enterprise
ENTERPRISE_ADMIN_PORTAL_NETLOC, # frontend-app-admin-portal
])
###################### JWTs ######################
JWT_AUTH.update({
'JWT_AUDIENCE': 'lms-key',
'JWT_ISSUER': f'{LMS_ROOT_URL}/oauth2',
'JWT_ISSUERS': [{
'AUDIENCE': 'lms-key',
'ISSUER': f'{LMS_ROOT_URL}/oauth2',
'SECRET_KEY': 'lms-secret',
}],
'JWT_SECRET_KEY': 'lms-secret',
'JWT_SIGNING_ALGORITHM': 'RS512',
'JWT_PRIVATE_SIGNING_JWK': (
'{"e": "AQAB", "d": "RQ6k4NpRU3RB2lhwCbQ452W86bMMQiPsa7EJiFJUg-qBJthN0FMNQVbArtrCQ0xA1BdnQHThFiUnHcXfsTZUwmwvTu'
'iqEGR_MI6aI7h5D8vRj_5x-pxOz-0MCB8TY8dcuK9FkljmgtYvV9flVzCk_uUb3ZJIBVyIW8En7n7nV7JXpS9zey1yVLld2AbRG6W5--Pgqr9J'
'CI5-bLdc2otCLuen2sKyuUDHO5NIj30qGTaKUL-OW_PgVmxrwKwccF3w5uGNEvMQ-IcicosCOvzBwdIm1uhdm9rnHU1-fXz8VLRHNhGVv7z6mo'
'ghjNI0_u4smhUkEsYeshPv7RQEWTdkOQ", "n": "smKFSYowG6nNUAdeqH1jQQnH1PmIHphzBmwJ5vRf1vu48BUI5VcVtUWIPqzRK_LDSlZYh'
'9D0YFL0ZTxIrlb6Tn3Xz7pYvpIAeYuQv3_H5p8tbz7Fb8r63c1828wXPITVTv8f7oxx5W3lFFgpFAyYMmROC4Ee9qG5T38LFe8_oAuFCEntimW'
'xN9F3P-FJQy43TL7wG54WodgiM0EgzkeLr5K6cDnyckWjTuZbWI-4ffcTgTZsL_Kq1owa_J2ngEfxMCObnzGy5ZLcTUomo4rZLjghVpq6KZxfS'
'6I1Vz79ZsMVUWEdXOYePCKKsrQG20ogQEkmTf9FT_SouC6jPcHLXw", "q": "7KWj7l-ZkfCElyfvwsl7kiosvi-ppOO7Imsv90cribf88Dex'
'cO67xdMPesjM9Nh5X209IT-TzbsOtVTXSQyEsy42NY72WETnd1_nAGLAmfxGdo8VV4ZDnRsA8N8POnWjRDwYlVBUEEeuT_MtMWzwIKU94bzkWV'
'nHCY5vbhBYLeM", "p": "wPkfnjavNV1Hqb5Qqj2crBS9HQS6GDQIZ7WF9hlBb2ofDNe2K2dunddFqCOdvLXr7ydRcK51ZwSeHjcjgD1aJkHA'
'9i1zqyboxgd0uAbxVDo6ohnlVqYLtap2tXXcavKm4C9MTpob_rk6FBfEuq4uSsuxFvCER4yG3CYBBa4gZVU", "kid": "devstack_key", "'
'kty": "RSA"}'
),
'JWT_PUBLIC_SIGNING_JWK_SET': (
'{"keys": [{"kid": "devstack_key", "e": "AQAB", "kty": "RSA", "n": "smKFSYowG6nNUAdeqH1jQQnH1PmIHphzBmwJ5vRf1vu'
'48BUI5VcVtUWIPqzRK_LDSlZYh9D0YFL0ZTxIrlb6Tn3Xz7pYvpIAeYuQv3_H5p8tbz7Fb8r63c1828wXPITVTv8f7oxx5W3lFFgpFAyYMmROC'
'4Ee9qG5T38LFe8_oAuFCEntimWxN9F3P-FJQy43TL7wG54WodgiM0EgzkeLr5K6cDnyckWjTuZbWI-4ffcTgTZsL_Kq1owa_J2ngEfxMCObnzG'
'y5ZLcTUomo4rZLjghVpq6KZxfS6I1Vz79ZsMVUWEdXOYePCKKsrQG20ogQEkmTf9FT_SouC6jPcHLXw"}]}'
),
})
add_plugins(__name__, ProjectType.LMS, SettingsType.DEVSTACK)
######################### Django Rest Framework ########################
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] += (
'rest_framework.renderers.BrowsableAPIRenderer',
)
OPENAPI_CACHE_TIMEOUT = 0
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
EDXNOTES_INTERNAL_API = 'http://edx.devstack.edxnotesapi:18120/api/v1'
EDXNOTES_CLIENT_NAME = 'edx_notes_api-backend-service'
############## Settings for Microfrontends #########################
LEARNING_MICROFRONTEND_URL = 'http://localhost:2000'
ACCOUNT_MICROFRONTEND_URL = 'http://localhost:1997'
AUTHN_MICROFRONTEND_URL = 'http://localhost:1999'
AUTHN_MICROFRONTEND_DOMAIN = 'localhost:1999'
################### FRONTEND APPLICATION DISCUSSIONS ###################
DISCUSSIONS_MICROFRONTEND_URL = 'http://localhost:2002'
################### FRONTEND APPLICATION DISCUSSIONS FEEDBACK URL###################
DISCUSSIONS_MFE_FEEDBACK_URL = None
############## Docker based devstack settings #######################
FEATURES.update({
'AUTOMATIC_AUTH_FOR_TESTING': True,
'ENABLE_DISCUSSION_SERVICE': True,
'SHOW_HEADER_LANGUAGE_SELECTOR': True,
# Enable enterprise integration by default.
# See https://github.com/edx/edx-enterprise/blob/master/docs/development.rst for
# more background on edx-enterprise.
# Toggle this off if you don't want anything to do with enterprise in devstack.
'ENABLE_ENTERPRISE_INTEGRATION': True,
})
ENABLE_MKTG_SITE = os.environ.get('ENABLE_MARKETING_SITE', False)
MARKETING_SITE_ROOT = os.environ.get('MARKETING_SITE_ROOT', 'http://localhost:8080')
MKTG_URLS = {
'ABOUT': '/about',
'ACCESSIBILITY': '/accessibility',
'AFFILIATES': '/affiliate-program',
'BLOG': '/blog',
'CAREERS': '/careers',
'CONTACT': '/support/contact_us',
'COURSES': '/course',
'DONATE': '/donate',
'ENTERPRISE': '/enterprise',
'FAQ': '/student-faq',
'HONOR': '/edx-terms-service',
'HOW_IT_WORKS': '/how-it-works',
'MEDIA_KIT': '/media-kit',
'NEWS': '/news-announcements',
'PRESS': '/press',
'PRIVACY': '/edx-privacy-policy',
'ROOT': MARKETING_SITE_ROOT,
'SCHOOLS': '/schools-partners',
'SITE_MAP': '/sitemap',
'TRADEMARKS': '/trademarks',
'TOS': '/edx-terms-service',
'TOS_AND_HONOR': '/edx-terms-service',
'WHAT_IS_VERIFIED_CERT': '/verified-certificate',
}
ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS = {}
CREDENTIALS_SERVICE_USERNAME = 'credentials_worker'
COURSE_CATALOG_URL_ROOT = 'http://edx.devstack.discovery:18381'
COURSE_CATALOG_API_URL = f'{COURSE_CATALOG_URL_ROOT}/api/v1'
SYSTEM_WIDE_ROLE_CLASSES = os.environ.get("SYSTEM_WIDE_ROLE_CLASSES", SYSTEM_WIDE_ROLE_CLASSES)
SYSTEM_WIDE_ROLE_CLASSES.append(
'system_wide_roles.SystemWideRoleAssignment',
)
if FEATURES.get('ENABLE_ENTERPRISE_INTEGRATION'):
SYSTEM_WIDE_ROLE_CLASSES.append(
'enterprise.SystemWideEnterpriseUserRoleAssignment',
)
#####################################################################
# django-session-cookie middleware
DCS_SESSION_COOKIE_SAMESITE = 'Lax'
DCS_SESSION_COOKIE_SAMESITE_FORCE_ALL = True
########################## THEMING #######################
# If you want to enable theming in devstack, uncomment this section and add any relevant
# theme directories to COMPREHENSIVE_THEME_DIRS
# We have to import the private method here because production.py calls
# derive_settings('lms.envs.production') which runs _make_mako_template_dirs with
# the settings from production, which doesn't include these theming settings. Thus,
# the templating engine is unable to find the themed templates because they don't exist
# in it's path. Re-calling derive_settings doesn't work because the settings was already
# changed from a function to a list, and it can't be derived again.
# from .common import _make_mako_template_dirs
# ENABLE_COMPREHENSIVE_THEMING = True
# COMPREHENSIVE_THEME_DIRS = [
# "/edx/app/edxapp/edx-platform/themes/"
# ]
# TEMPLATES[1]["DIRS"] = _make_mako_template_dirs
# derive_settings(__name__)
# Uncomment the lines below if you'd like to see SQL statements in your devstack LMS log.
# LOGGING['handlers']['console']['level'] = 'DEBUG'
# LOGGING['loggers']['django.db.backends'] = {'handlers': ['console'], 'level': 'DEBUG', 'propagate': False}
################### Special Exams (Proctoring) and Prereqs ###################
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Used in edx-proctoring for ID generation in lieu of SECRET_KEY - dummy value
# (ref MST-637)
PROCTORING_USER_OBFUSCATION_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
#################### Webpack Configuration Settings ##############################
WEBPACK_LOADER['DEFAULT']['TIMEOUT'] = 5
################# New settings must go ABOVE this line #################
########################################################################
# See if the developer has any local overrides.
if os.path.isfile(join(dirname(abspath(__file__)), 'private.py')):
from .private import * # pylint: disable=import-error,wildcard-import
| eduNEXT/edx-platform | lms/envs/devstack.py | Python | agpl-3.0 | 17,710 |
from unittest.mock import ANY, patch
from django.test import override_settings
from geoip2.errors import AddressNotFoundError
from rest_framework import status
from rest_framework.test import APITestCase
from karrot.groups.factories import GroupFactory
from karrot.users.factories import UserFactory
from karrot.utils.geoip import ip_to_city
from karrot.utils.tests.fake import faker
OVERRIDE_SETTINGS = {
'SENTRY_CLIENT_DSN': faker.name(),
'SENTRY_ENVIRONMENT': faker.name(),
'FCM_CLIENT_API_KEY': faker.name(),
'FCM_CLIENT_MESSAGING_SENDER_ID': faker.name(),
'FCM_CLIENT_PROJECT_ID': faker.name(),
'FCM_CLIENT_APP_ID': faker.name(),
}
class TestConfigAPI(APITestCase):
def test_default_config(self):
response = self.client.get('/api/config/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data, {
'fcm': {
'api_key': None,
'messaging_sender_id': None,
'project_id': None,
'app_id': None,
},
'sentry': {
'dsn': None,
'environment': 'production',
},
}, response.data
)
@override_settings(**OVERRIDE_SETTINGS)
def test_config_with_overrides(self):
response = self.client.get('/api/config/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data, {
'fcm': {
'api_key': OVERRIDE_SETTINGS['FCM_CLIENT_API_KEY'],
'messaging_sender_id': OVERRIDE_SETTINGS['FCM_CLIENT_MESSAGING_SENDER_ID'],
'project_id': OVERRIDE_SETTINGS['FCM_CLIENT_PROJECT_ID'],
'app_id': OVERRIDE_SETTINGS['FCM_CLIENT_APP_ID'],
},
'sentry': {
'dsn': OVERRIDE_SETTINGS['SENTRY_CLIENT_DSN'],
'environment': OVERRIDE_SETTINGS['SENTRY_ENVIRONMENT'],
},
}, response.data
)
class TestBootstrapAPI(APITestCase):
def setUp(self):
self.user = UserFactory()
self.member = UserFactory()
self.group = GroupFactory(members=[self.member], application_questions='')
self.url = '/api/bootstrap/'
self.client_ip = '2003:d9:ef08:4a00:4b7a:7964:8a3c:a33e'
ip_to_city.cache_clear() # prevent getting cached mock values
def tearDown(self):
ip_to_city.cache_clear()
def test_as_anon(self):
with self.assertNumQueries(1):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['server'], ANY)
self.assertEqual(response.data['config'], ANY)
self.assertEqual(response.data['user'], None)
self.assertEqual(response.data['geoip'], None)
self.assertEqual(response.data['groups'], ANY)
@patch('karrot.utils.geoip.geoip')
def test_with_geoip(self, geoip):
lat_lng = [float(val) for val in faker.latlng()]
city = {'latitude': lat_lng[0], 'longitude': lat_lng[1], 'country_code': 'AA', 'time_zone': 'Europe/Berlin'}
geoip.city.return_value = city
response = self.client.get(self.url, HTTP_X_FORWARDED_FOR=self.client_ip)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
dict(response.data['geoip']), {
'lat': city['latitude'],
'lng': city['longitude'],
'country_code': city['country_code'],
'timezone': city['time_zone'],
}
)
@patch('karrot.utils.geoip.geoip')
def test_without_geoip(self, geoip):
geoip.city.side_effect = AddressNotFoundError
response = self.client.get(self.url, HTTP_X_FORWARDED_FOR=self.client_ip)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNone(response.data['geoip'])
def test_when_logged_in(self):
self.client.force_login(user=self.user)
with self.assertNumQueries(2):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['user']['id'], self.user.id)
| yunity/foodsaving-backend | karrot/bootstrap/tests/test_api.py | Python | agpl-3.0 | 4,392 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
from flask import current_app as app
from settings import DAYS_TO_KEEP
from datetime import timedelta
from werkzeug.exceptions import HTTPException
from superdesk.notification import push_notification
from superdesk.io import providers
from superdesk.celery_app import celery
from superdesk.utc import utcnow
from superdesk.workflow import set_default_state
from superdesk.errors import ProviderError
from superdesk.stats import stats
from superdesk.upload import url_for_media
from superdesk.media.media_operations import download_file_from_url, process_file
from superdesk.media.renditions import generate_renditions
UPDATE_SCHEDULE_DEFAULT = {'minutes': 5}
LAST_UPDATED = 'last_updated'
STATE_INGESTED = 'ingested'
logger = logging.getLogger(__name__)
superdesk.workflow_state(STATE_INGESTED)
superdesk.workflow_action(
name='ingest'
)
def is_valid_type(provider, provider_type_filter=None):
"""Test if given provider has valid type and should be updated.
:param provider: provider to be updated
:param provider_type_filter: active provider type filter
"""
provider_type = provider.get('type')
if provider_type not in providers:
return False
if provider_type_filter and provider_type != provider_type_filter:
return False
return True
def is_scheduled(provider):
"""Test if given provider should be scheduled for update.
:param provider: ingest provider
"""
now = utcnow()
last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return last_updated + timedelta(**update_schedule) < now
def is_closed(provider):
"""Test if provider is closed.
:param provider: ingest provider
"""
return provider.get('is_closed', False)
def filter_expired_items(provider, items):
try:
days_to_keep_content = provider.get('days_to_keep', DAYS_TO_KEEP)
expiration_date = utcnow() - timedelta(days=days_to_keep_content)
return [item for item in items if item.get('versioncreated', utcnow()) > expiration_date]
except Exception as ex:
raise ProviderError.providerFilterExpiredContentError(ex, provider)
def get_provider_rule_set(provider):
if provider.get('rule_set'):
return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
def get_task_ttl(provider):
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600
def get_task_id(provider):
return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get('_id'))
class UpdateIngest(superdesk.Command):
"""Update ingest providers."""
option_list = (
superdesk.Option('--provider', '-p', dest='provider_type'),
)
def run(self, provider_type=None):
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup={}):
if is_valid_type(provider, provider_type) and is_scheduled(provider) and not is_closed(provider):
kwargs = {
'provider': provider,
'rule_set': get_provider_rule_set(provider)
}
update_provider.apply_async(
task_id=get_task_id(provider),
expires=get_task_ttl(provider),
kwargs=kwargs)
@celery.task
def update_provider(provider, rule_set=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
"""
superdesk.get_resource_service('ingest_providers').update(provider['_id'], {
LAST_UPDATED: utcnow(),
# Providing the _etag as system updates to the documents shouldn't override _etag.
app.config['ETAG']: provider.get(app.config['ETAG'])
})
for items in providers[provider.get('type')].update(provider):
ingest_items(items, provider, rule_set)
stats.incr('ingest.ingested_items', len(items))
logger.info('Provider {0} updated'.format(provider['_id']))
push_notification('ingest:update')
def process_anpa_category(item, provider):
try:
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if anpa_categories:
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and item['anpa-category']['qcode'].lower() == anpa_category['value'].lower():
item['anpa-category'] = {'qcode': item['anpa-category']['qcode'], 'name': anpa_category['name']}
break
except Exception as ex:
raise ProviderError.anpaError(ex, provider)
def apply_rule_set(item, provider, rule_set=None):
"""
Applies rules set on the item to be ingested into the system. If there's no rule set then the item will
be returned without any change.
:param item: Item to be ingested
:param provider: provider object from whom the item was received
:return: item
"""
try:
if rule_set is None and provider.get('rule_set') is not None:
rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
if rule_set and 'body_html' in item:
body = item['body_html']
for rule in rule_set['rules']:
body = body.replace(rule['old'], rule['new'])
item['body_html'] = body
return item
except Exception as ex:
raise ProviderError.ruleError(ex, provider)
def ingest_items(items, provider, rule_set=None):
all_items = filter_expired_items(provider, items)
items_dict = {doc['guid']: doc for doc in all_items}
for item in [doc for doc in all_items if doc.get('type') != 'composite']:
ingest_item(item, provider, rule_set)
for item in [doc for doc in all_items if doc.get('type') == 'composite']:
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]:
ref.setdefault('location', 'ingest')
itemRendition = items_dict.get(ref['residRef'], {}).get('renditions')
if itemRendition:
ref.setdefault('renditions', itemRendition)
ingest_item(item, provider, rule_set)
def ingest_item(item, provider, rule_set=None):
try:
item.setdefault('_id', item['guid'])
providers[provider.get('type')].provider = provider
item['ingest_provider'] = str(provider['_id'])
item.setdefault('source', provider.get('source', ''))
set_default_state(item, STATE_INGESTED)
if 'anpa-category' in item:
process_anpa_category(item, provider)
apply_rule_set(item, provider, rule_set)
ingest_service = superdesk.get_resource_service('ingest')
if item.get('ingest_provider_sequence') is None:
ingest_service.set_ingest_provider_sequence(item, provider)
rend = item.get('renditions', {})
if rend:
baseImageRend = rend.get('baseImage') or next(iter(rend.values()))
if baseImageRend:
href = providers[provider.get('type')].prepare_href(baseImageRend['href'])
update_renditions(item, href)
old_item = ingest_service.find_one(_id=item['guid'], req=None)
if old_item:
ingest_service.put(item['guid'], item)
else:
try:
ingest_service.post([item])
except HTTPException as e:
logger.error("Exception while persisting item in ingest collection", e)
ingest_service.put(item['guid'], item)
except ProviderError:
raise
except Exception as ex:
raise ProviderError.ingestError(ex, provider)
def update_renditions(item, href):
inserted = []
try:
content, filename, content_type = download_file_from_url(href)
file_type, ext = content_type.split('/')
metadata = process_file(content, file_type)
file_guid = app.media.put(content, filename, content_type, metadata)
inserted.append(file_guid)
rendition_spec = app.config.get('RENDITIONS', {}).get('picture', {})
renditions = generate_renditions(content, file_guid, inserted, file_type,
content_type, rendition_spec, url_for_media)
item['renditions'] = renditions
item['mimetype'] = content_type
item['filemeta'] = metadata
except Exception as io:
logger.exception(io)
for file_id in inserted:
app.media.delete(file_id)
raise
superdesk.command('ingest:update', UpdateIngest())
| petrjasek/superdesk-server | superdesk/io/commands/update_ingest.py | Python | agpl-3.0 | 9,268 |
"""
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from __future__ import unicode_literals
from django.conf import settings
from keops.core.serializers import base
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.encoding import smart_text, is_protected_type
from django.utils import six
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = {}
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
return {
"pk": smart_text(obj._get_pk_val(), strings_only=True),
"model": smart_text(obj._meta),
"fields": self._current
}
def handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.rel.through._meta.auto_created:
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: smart_text(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
ignore = options.pop('ignorenonexistent', True)
models.get_apps()
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = _get_model(d["model"])
data = {Model._meta.pk.attname: Model._meta.pk.to_python(d.get("pk"))}
rec = _id = None
if 'data-id' in d:
_id = d.pop('data-id')
from keops.modules.base.models import ModelData
try:
obj = ModelData.objects.using(db).get(name=_id)
rec = obj.content_object
except:
obj = ModelData(name=_id)
_id = obj
m2m_data = {}
model_fields = Model._meta.get_all_field_names()
# Handle each field
for (field_name, field_value) in six.iteritems(d["fields"]):
if isinstance(field_value, str):
field_value = smart_text(field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True)
if field_name not in model_fields:
# skip fields no longer on model
data[field_name] = field_value
continue
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.rel and isinstance(field.rel, models.ManyToManyRel):
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return smart_text(field.rel.to._meta.pk.to_python(value))
else:
m2m_convert = lambda v: smart_text(field.rel.to._meta.pk.to_python(v))
m2m_data[field.name] = [m2m_convert(pk) for pk in field_value]
# Handle FK fields
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
if field_value is not None:
if isinstance(field_value, dict):
obj = field.rel.to.objects.db_manager(db).only('id').filter(**field_value)[0]
data[field.attname] = obj.pk
elif hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type):
obj = field.rel.to._default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
value = value.pk
else:
value = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
data[field.name] = field.to_python(field_value)
if not rec:
rec = Model()
rec._state.db = db
for k, v in data.items():
setattr(rec, k, v)
rec.save(using=db, update_fields=None)
if _id:
_id.content_object = rec
_id.save(using=db, update_fields=None)
yield base.DeserializedObject(rec, m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
return Model
| mrmuxl/keops | keops/core/serializers/python.py | Python | agpl-3.0 | 6,979 |
# -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Learner, and Content Groups.
"""
from textwrap import dedent
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.tests.helpers import UniqueCourseTest, create_user_partition_json
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
@attr(shard=20)
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "[email protected]"
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
@attr(shard=20)
class CourseWithContentGroupsTest(StaffViewTest):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
MINIMUM_STATIC_PARTITION_ID,
'Configuration alpha,beta',
'Content Group Partition',
[
Group(MINIMUM_STATIC_PARTITION_ID + 1, 'alpha'),
Group(MINIMUM_STATIC_PARTITION_ID + 2, 'beta')
],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<choiceresponse>
<label>Choose Yes.</label>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.audit_text = "VISIBLE TO AUDIT"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem',
self.alpha_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 1]}}
),
XBlockFixtureDesc(
'problem',
self.beta_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 2]}}
),
XBlockFixtureDesc(
'problem',
self.audit_text,
data=problem_data,
# Below 1 is the hardcoded group ID for "Audit"
metadata={"group_access": {ENROLLMENT_TRACK_PARTITION_ID: [1]}}
),
XBlockFixtureDesc(
'problem',
self.everyone_text,
data=problem_data
)
)
)
)
)
@attr('a11y')
def test_course_page(self):
"""
Run accessibility audit for course staff pages.
"""
course_page = self._goto_staff_page()
course_page.a11y_audit.config.set_rules({
'ignore': [
'aria-allowed-attr', # TODO: AC-559
'aria-roles', # TODO: AC-559,
'aria-valid-attr', # TODO: AC-559
'color-contrast', # TODO: AC-559
'link-href', # TODO: AC-559
'section', # TODO: AC-559
'region', # TODO: AC-932
]
})
course_page.a11y_audit.check_for_accessibility_errors()
| msegado/edx-platform | common/test/acceptance/tests/lms/test_lms_user_preview.py | Python | agpl-3.0 | 5,975 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from crm.models import Person
from geocodable.models import LocationAlias
import uuid
class Event(models.Model):
name = models.CharField(max_length=200)
timestamp = models.DateTimeField()
end_timestamp = models.DateTimeField()
attendees = models.ManyToManyField(Person, related_name='events', blank=True)
uid = models.CharField(max_length=200, blank=True)
location = models.ForeignKey(LocationAlias, default=None, blank=True,
null=True)
instance_id = models.CharField(max_length=200, blank=True)
@property
def geo(self):
return {'lat': self.lat, 'lng': self.lng}
@property
def lat(self):
if self.location is not None:
return self.location.lat
else:
return None
@property
def lng(self):
if self.location is not None:
return self.location.lng
else:
return None
def __unicode__(self):
return "%s (%s)"%(self.name, self.timestamp)
| tdfischer/organizer | events/models.py | Python | agpl-3.0 | 1,124 |
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q, F
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE, EnrollStatusChange
from util.query import use_read_replica_if_available
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from shoppingcart.pdf import PDFInvoice
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_names = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_names.append(course.display_name)
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_names
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, course_names):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
joined_course_names = " " + ", ".join(course_names)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_CONFIRM_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
# Some of the names in the db end in white space.
recipient_name = self.user.profile.name.strip()
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient_name,
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join(course_names),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
'payment_support_phone': configuration_helpers.get_value('payment_support_phone', settings.PAYMENT_SUPPORT_PHONE),
'payment_platform_name': configuration_helpers.get_value('payment_platform_name', settings.PAYMENT_PLATFORM_NAME),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'ReceiptOrder{}.pdf'.format(str(self.id)), pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
course_names = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, course_names = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, course_names
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_queryset(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True)
def get_queryset(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_queryset()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from lms.djangoapps.instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
| caesar2164/edx-platform | lms/djangoapps/shoppingcart/models.py | Python | agpl-3.0 | 91,861 |
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import time
from datetime import date, datetime, timedelta
from optparse import make_option
import openid.store.nonce
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from django.utils.translation import ugettext as _
from identityprovider.models import Account, EmailAddress
SESSION_SQL = """DELETE FROM django_session
WHERE session_key = ANY(SELECT session_key FROM django_session
WHERE expire_date < CURRENT_TIMESTAMP LIMIT %s)"""
NONCES_SQL = """DELETE FROM openidnonce
WHERE timestamp = ANY(SELECT timestamp FROM openidnonce
WHERE timestamp < %s LIMIT %s)"""
NO_ITEMS = """No items selected to clean up. Please select at least one of:
--sessions
--nonces
--testdata
"""
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-s', '--sessions', dest='sessions', default=False,
action='store_true', help='Cleanup sessions.'),
make_option('-n', '--nonces', dest='nonces', default=False,
action='store_true', help='Cleanup nonces.'),
make_option('-t', '--testdata', dest='testdata', default=False,
action='store_true', help='Cleanup test data.'),
make_option('-l', '--limit', dest='limit', default=10000,
action='store',
help='Number of rows to process per batch.'),
make_option('-d', '--date-created', dest='date_created',
default=None, action='store',
help='Cleanup records created before this date.'),
)
help = _("""Clean unnecessary/stalled data from database.""")
def handle(self, *args, **options):
limit = int(options['limit'])
nonce_expire_stamp = int(time.time()) - openid.store.nonce.SKEW
test_email_pattern = settings.EMAIL_ADDRESS_PATTERN.replace(
'+', '\+').replace('.', '\.') % "[^@]+"
if options['date_created'] is None:
date_created = date.today() + timedelta(days=1)
else:
parsed = datetime.strptime(options['date_created'], '%Y-%m-%d')
date_created = parsed.date()
queries = {
'sessions': SESSION_SQL % limit,
'nonces': NONCES_SQL % (nonce_expire_stamp, limit),
}
verbosity = int(options['verbosity'])
testdata = options.get('testdata')
if testdata:
self.clean_testdata(test_email_pattern, date_created, limit,
verbosity)
selected_queries = [query for query in queries
if options.get(query)]
if not selected_queries and not testdata:
self.stdout.write(NO_ITEMS)
for item in selected_queries:
if verbosity >= 1:
self.stdout.write("\nCleaning %s..." % item)
cursor = connection.cursor()
cursor.execute(queries[item])
while cursor.rowcount > 0:
if verbosity >= 2:
self.stdout.write(".")
cursor.execute(queries[item])
transaction.commit_unless_managed()
def clean_testdata(self, email_pattern, date_created, limit, verbosity=0):
kwargs = {'email__iregex': email_pattern,
'date_created__lt': date_created}
if verbosity >= 1:
self.stdout.write("\nCleaning accounts...\n")
while True:
email_ids = EmailAddress.objects.filter(**kwargs).values_list(
'pk')[:limit]
accounts = Account.objects.filter(emailaddress__in=email_ids)
if not accounts:
break
if verbosity >= 2:
self.stdout.write("\tDeleting %d accounts..." % (
accounts.count(),))
accounts.delete()
if verbosity >= 2:
self.stdout.write('\t [OK]\n')
| miing/mci_migo | identityprovider/management/commands/cleanup.py | Python | agpl-3.0 | 4,126 |
# Copyright 2021 Alfredo de la Fuente - Avanzosc S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests import common
from odoo.tests import tagged
@tagged("post_install", "-at_install")
class TestNameCodeYearId(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestNameCodeYearId, cls).setUpClass()
cls.event_obj = cls.env['event.event']
cls.skill_type_lang = cls.env.ref('hr_skills.hr_skill_type_lang')
cls.skill_spanish = cls.env.ref('hr_skills.hr_skill_spanish')
cls.skill_filipino = cls.env.ref('hr_skills.hr_skill_filipino')
cls.skill_type_lang.skill_language = True
cls.skill_spanish.code = 'SP'
cls.skill_filipino.code = 'FI'
def test_event_name_code_year_id(self):
vals = {'name': 'User for event lang level',
'date_begin': '2025-01-06 08:00:00',
'date_end': '2025-01-15 10:00:00',
'lang_id': self.skill_spanish.id}
event = self.event_obj.create(vals)
name = 'SP-{}-2025'.format(event.id)
self.assertEqual(event.name, name)
vals = {'date_begin': '2024-01-06 08:00:00',
'lang_id': self.skill_filipino.id}
event.write(vals)
name = 'FI-{}-2024'.format(event.id)
self.assertEqual(event.name, name)
| avanzosc/odoo-addons | event_name_code_year_id/tests/test_event_name_code_year_id.py | Python | agpl-3.0 | 1,353 |
try:
import serial # Python2
except ImportError:
from serial3 import * # Python3
from nupic.frameworks.opf.modelfactory import ModelFactory
import os,sys
ser = serial.Serial('/dev/ttyACM0', 9600)
def get_online(number_of_records=20):# 0 means forever
model = ModelFactory.loadFromCheckpoint(os.getcwd() + "/model_save")
count=0
ser.flushInput()
while (count < number_of_records) or (number_of_records == 0):
count = count + 1
text = ser.readline()
if (len(text.split(",")) == 4):
result = model.run({
"s1": float(text.split(",")[0]),
"s2": float(text.split(",")[1]),
"s3": float(text.split(",")[2]),
"s4": float(text.split(",")[3])
})
prediction = int(result.inferences['multiStepBestPredictions'][4])
sys.stdout.write("\r"+ str(prediction))
sys.stdout.write("\t"+ text)
ser.write(str(prediction)+ '\n') | lmaag182/nupic_physical | online.py | Python | agpl-3.0 | 994 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partners Persons Management',
'version': '1.0',
'category': 'Tools',
'sequence': 14,
'summary': '',
'description': """
Partners Persons Management
===========================
Openerp consider a person those partners that have not "is_company" as true, now, those partners can have:
----------------------------------------------------------------------------------------------------------
* First Name and Last Name
* Birthdate
* Sex
* Mother and Father
* Childs
* Age (functional field)
* Nationality
* Husband/Wife
* National Identity
* Passport
* Marital Status
It also adds a configuration menu for choosing which fields do you wanna see.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'base',
],
'data': [
'res_partner_view.xml',
'res_config_view.xml',
'security/partner_person_security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| maljac/odoo-addons | partner_person/__openerp__.py | Python | agpl-3.0 | 2,095 |
"""
Tests suite for the views of the private messages app.
"""
from django.test import TestCase, Client
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.utils import timezone
from ..models import (PrivateMessage,
BlockedUser)
class NotificationsViewsTestCase(TestCase):
"""
Tests suite for the views.
"""
def setUp(self):
"""
Create a new user named "johndoe" with password "illpassword".
"""
self.user1 = get_user_model().objects.create_user(username='johndoe1',
password='johndoe1',
email='[email protected]')
self.user2 = get_user_model().objects.create_user(username='johndoe2',
password='johndoe2',
email='[email protected]')
self.user3 = get_user_model().objects.create_user(username='johndoe3',
password='johndoe3',
email='[email protected]')
self.msg1 = PrivateMessage.objects.create(sender=self.user1,
recipient=self.user2,
subject='Test message 1',
body='Test message')
self.msg2 = PrivateMessage.objects.create(sender=self.user1,
recipient=self.user2,
read_at=timezone.now(),
subject='Test message 2',
body='Test message')
self.msg3 = PrivateMessage.objects.create(sender=self.user1,
recipient=self.user2,
recipient_deleted_at=timezone.now(),
subject='Test message 3',
body='Test message')
self.msg4 = PrivateMessage.objects.create(sender=self.user1,
recipient=self.user2,
recipient_permanently_deleted=True,
subject='Test message 4',
body='Test message')
self.msg5 = PrivateMessage.objects.create(sender=self.user2,
recipient=self.user3,
subject='Test message 5',
body='Test message')
self.block1 = BlockedUser.objects.create(user=self.user1, blocked_user=self.user2)
def test_private_msg_list_view_available(self):
"""
Test the availability of the "private messages list" view.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:inbox'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/inbox.html')
self.assertIn('private_messages', response.context)
self.assertQuerysetEqual(response.context['private_messages'], ['<PrivateMessage: Test message 2>',
'<PrivateMessage: Test message 1>'])
def test_read_private_msg_list_view_available(self):
"""
Test the availability of the "read private messages list" view.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:inbox_read'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/inbox.html')
self.assertIn('private_messages', response.context)
self.assertQuerysetEqual(response.context['private_messages'], ['<PrivateMessage: Test message 2>'])
def test_unread_private_msg_list_view_available(self):
"""
Test the availability of the "unread private messages list" view.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:inbox_unread'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/inbox.html')
self.assertIn('private_messages', response.context)
self.assertQuerysetEqual(response.context['private_messages'], ['<PrivateMessage: Test message 1>'])
def test_private_msg_list_view_redirect_not_login(self):
"""
Test the redirection of the "private messages list" view when not logged-in.
"""
client = Client()
privatemsg_list_url = reverse('privatemsg:inbox')
response = client.get(privatemsg_list_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, privatemsg_list_url))
def test_mark_all_private_msg_as_read_view_available(self):
"""
Test the availability of the "mark all private messages as read" view.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:inbox_mark_all_as_read'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/mark_all_as_read.html')
def test_mark_all_private_msg_as_read_view_redirect_not_login(self):
"""
Test the redirection of the "mark all private messages as read" view when not logged-in.
"""
client = Client()
mark_all_as_read_url = reverse('privatemsg:inbox_mark_all_as_read')
response = client.get(mark_all_as_read_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, mark_all_as_read_url))
def test_outbox_list_view_available(self):
"""
Test the availability of the "sent private messages list" view.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:outbox'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/outbox.html')
self.assertIn('private_messages', response.context)
self.assertQuerysetEqual(response.context['private_messages'], ['<PrivateMessage: Test message 4>',
'<PrivateMessage: Test message 3>',
'<PrivateMessage: Test message 2>',
'<PrivateMessage: Test message 1>'])
def test_outbox_list_view_redirect_not_login(self):
"""
Test the redirection of the "sent private messages list" view when not logged-in.
"""
client = Client()
outbox_url = reverse('privatemsg:outbox')
response = client.get(outbox_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, outbox_url))
def test_trashbox_list_view_available(self):
"""
Test the availability of the "deleted private messages list" view.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:trash'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/trashbox.html')
self.assertIn('private_messages', response.context)
self.assertQuerysetEqual(response.context['private_messages'], ['<PrivateMessage: Test message 3>'])
def test_trashbox_list_view_redirect_not_login(self):
"""
Test the redirection of the "deleted private messages list" view when not logged-in.
"""
client = Client()
trashbox_url = reverse('privatemsg:trash')
response = client.get(trashbox_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, trashbox_url))
def test_delete_all_deleted_msg_permanently_view_available(self):
"""
Test the availability of the "empty trash" view.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:delete_all_deleted_msg_permanently'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/trashbox_cleanup.html')
def test_delete_all_deleted_msg_permanently_view_redirect_not_login(self):
"""
Test the redirection of the "empty trash" view when not logged-in.
"""
client = Client()
delete_all_deleted_msg_url = reverse('privatemsg:delete_all_deleted_msg_permanently')
response = client.get(delete_all_deleted_msg_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, delete_all_deleted_msg_url))
def test_msg_compose_view_available(self):
"""
Test the availability of the "compose message" view.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:compose'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_compose.html')
def test_msg_compose_to_view_available(self):
"""
Test the availability of the "compose to message" view.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:compose_to', kwargs={'recipient': 'johndoe2'}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_compose.html')
def test_msg_compose_view_redirect_not_login(self):
"""
Test the redirection of the "compose message" view when not logged-in.
"""
client = Client()
compose_msg_url = reverse('privatemsg:compose')
response = client.get(compose_msg_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, compose_msg_url))
def test_msg_detail_view_available_as_sender(self):
"""
Test the availability of the "message detail" view as sender.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_detail', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_detail.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
self.assertIn('is_sender', response.context)
self.assertTrue(response.context['is_sender'])
self.assertIn('is_recipient', response.context)
self.assertFalse(response.context['is_recipient'])
def test_msg_detail_view_available_as_recipient(self):
"""
Test the availability of the "message detail" view as recipient.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:msg_detail', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_detail.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
self.assertIn('is_recipient', response.context)
self.assertTrue(response.context['is_recipient'])
self.assertIn('is_sender', response.context)
self.assertFalse(response.context['is_sender'])
def test_msg_detail_view_not_available_as_thirdparty(self):
"""
Test the UN-availability of the "message detail" view as a third party.
"""
client = Client()
client.login(username='johndoe3', password='johndoe3')
response = client.get(reverse('privatemsg:msg_detail', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_detail_view_with_unknown_msg(self):
"""
Test the UN-availability of the "message detail" view with an unknown message PK.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_detail', kwargs={'pk': '1337'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_detail_view_redirect_not_login(self):
"""
Test the redirection of the "message detail" view when not logged-in.
"""
client = Client()
msg_details_url = reverse('privatemsg:msg_detail', kwargs={'pk': self.msg1.pk})
response = client.get(msg_details_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_details_url))
def test_msg_reply_view_available_as_sender(self):
"""
Test the availability of the "message reply" view as sender.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_reply', kwargs={'parent_pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_reply.html')
self.assertIn('parent_msg', response.context)
self.assertEqual(response.context['parent_msg'], self.msg1)
def test_msg_reply_view_available_as_recipient(self):
"""
Test the availability of the "message reply" view as sender.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:msg_reply', kwargs={'parent_pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_reply.html')
self.assertIn('parent_msg', response.context)
self.assertEqual(response.context['parent_msg'], self.msg1)
def test_msg_reply_view_not_available_as_thirdparty(self):
"""
Test the UN-availability of the "message reply" view as a third party.
"""
client = Client()
client.login(username='johndoe3', password='johndoe3')
response = client.get(reverse('privatemsg:msg_reply', kwargs={'parent_pk': self.msg1.pk}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_reply_view_with_unknown_msg(self):
"""
Test the UN-availability of the "message reply" view with an unknown message PK.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_reply', kwargs={'parent_pk': '1337'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_reply_view_redirect_not_login(self):
"""
Test the redirection of the "message reply" view when not logged-in.
"""
client = Client()
msg_reply_url = reverse('privatemsg:msg_reply', kwargs={'parent_pk': self.msg1.pk})
response = client.get(msg_reply_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_reply_url))
def test_my_account_view_redirect_not_login(self):
"""
Test the redirection of the "my account" view when not logged-in.
"""
client = Client()
myaccount_url = reverse('privatemsg:myaccount')
response = client.get(myaccount_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, myaccount_url))
def test_my_account_view_available(self):
"""
Test the availability of the "my account" view when logged-in.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:myaccount'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/my_account.html')
def test_msg_delete_view_available_as_sender(self):
"""
Test the availability of the "delete message" view as sender.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_delete', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_delete_confirm.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
def test_msg_delete_view_available_as_recipient(self):
"""
Test the availability of the "delete message" view as sender.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:msg_delete', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_delete_confirm.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
def test_msg_delete_view_not_available_as_thirdparty(self):
"""
Test the UN-availability of the "delete message" view as a third party.
"""
client = Client()
client.login(username='johndoe3', password='johndoe3')
response = client.get(reverse('privatemsg:msg_delete', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_delete_view_with_unknown_msg(self):
"""
Test the UN-availability of the "delete message" view with an unknown message PK.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_delete', kwargs={'pk': '1337'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_delete_view_redirect_not_login(self):
"""
Test the redirection of the "delete message" view when not logged-in.
"""
client = Client()
msg_delete_url = reverse('privatemsg:msg_delete', kwargs={'pk': self.msg1.pk})
response = client.get(msg_delete_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_delete_url))
def test_msg_delete_permanent_view_available_as_sender(self):
"""
Test the availability of the "permanently delete message" view as sender.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_delete_permanent', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_delete_permanent_confirm.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
def test_msg_delete_permanent_view_available_as_recipient(self):
"""
Test the availability of the "permanently delete message" view as sender.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:msg_delete_permanent', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_delete_permanent_confirm.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
def test_msg_delete_permanent_view_not_available_as_thirdparty(self):
"""
Test the UN-availability of the "permanently delete message" view as a third party.
"""
client = Client()
client.login(username='johndoe3', password='johndoe3')
response = client.get(reverse('privatemsg:msg_delete_permanent', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_delete_permanent_view_with_unknown_msg(self):
"""
Test the UN-availability of the "permanently delete message" view with an unknown message PK.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_delete_permanent', kwargs={'pk': '1337'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_delete_permanent_view_redirect_not_login(self):
"""
Test the redirection of the "permanently delete message" view when not logged-in.
"""
client = Client()
msg_reply_url = reverse('privatemsg:msg_delete_permanent', kwargs={'pk': self.msg1.pk})
response = client.get(msg_reply_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_reply_url))
def test_msg_undelete_view_available_as_sender(self):
"""
Test the availability of the "undelete message" view as sender.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_undelete', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_undelete_confirm.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
def test_msg_undelete_view_available_as_recipient(self):
"""
Test the availability of the "undelete message" view as sender.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:msg_undelete', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/msg_undelete_confirm.html')
self.assertIn('message', response.context)
self.assertEqual(response.context['message'], self.msg1)
def test_msg_undelete_view_not_available_as_thirdparty(self):
"""
Test the UN-availability of the "undelete message" view as a third party.
"""
client = Client()
client.login(username='johndoe3', password='johndoe3')
response = client.get(reverse('privatemsg:msg_undelete', kwargs={'pk': self.msg1.pk}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_undelete_view_with_unknown_msg(self):
"""
Test the UN-availability of the "undelete message" view with an unknown message PK.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:msg_undelete', kwargs={'pk': '1337'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_undelete_view_with_permanently_deleted_msg(self):
"""
Test the UN-availability of the "undelete message" view with an already permanently deleted message PK.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:msg_undelete', kwargs={'pk': self.msg4.pk}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_msg_undelete_view_redirect_not_login(self):
"""
Test the redirection of the "undelete message" view when not logged-in.
"""
client = Client()
msg_undelete_url = reverse('privatemsg:msg_undelete', kwargs={'pk': self.msg1.pk})
response = client.get(msg_undelete_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_undelete_url))
def test_blocked_user_list_view_available(self):
"""
Test the availability of the "blocked user list" view.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:blocked_users'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/blocked_user_list.html')
self.assertIn('blocked_users', response.context)
self.assertQuerysetEqual(response.context['blocked_users'],
['<BlockedUser: User "johndoe1" blocking "johndoe2">'])
def test_blocked_user_list_view_redirect_not_login(self):
"""
Test the redirection of the "undelete message" view when not logged-in.
"""
client = Client()
msg_undelete_url = reverse('privatemsg:blocked_users')
response = client.get(msg_undelete_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_undelete_url))
def test_block_user_view_available(self):
"""
Test the availability of the "block user" view.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:block_user', kwargs={'username': self.user2}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/block_user.html')
self.assertIn('blocked_user', response.context)
self.assertEqual(response.context['blocked_user'], self.user2)
self.assertIn('trying_self_block', response.context)
self.assertFalse(response.context['trying_self_block'])
def test_block_user_view_available_self_block(self):
"""
Test the availability of the "block user" view when trying to block himself.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:block_user', kwargs={'username': self.user1}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/block_user.html')
self.assertIn('blocked_user', response.context)
self.assertEqual(response.context['blocked_user'], self.user1)
self.assertIn('trying_self_block', response.context)
self.assertTrue(response.context['trying_self_block'])
def test_block_user_view_available_staff_block(self):
"""
Test the availability of the "block user" view when trying to block an admin.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
self.user3.is_staff = True
self.user3.save()
response = client.get(reverse('privatemsg:block_user', kwargs={'username': self.user3}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/block_user.html')
self.assertIn('blocked_user', response.context)
self.assertEqual(response.context['blocked_user'], self.user3)
self.assertIn('trying_block_staff', response.context)
self.assertTrue(response.context['trying_block_staff'])
def test_block_user_view_with_unknown_nickname(self):
"""
Test the UN-availability of the "block user" view with an unknown user name.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:block_user', kwargs={'username': 'unknown'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_block_user_view_redirect_not_login(self):
"""
Test the redirection of the "undelete message" view when not logged-in.
"""
client = Client()
msg_undelete_url = reverse('privatemsg:block_user', kwargs={'username': self.user1})
response = client.get(msg_undelete_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_undelete_url))
def test_unblock_user_view_available(self):
"""
Test the availability of the "unblock user" view.
"""
client = Client()
client.login(username='johndoe1', password='johndoe1')
response = client.get(reverse('privatemsg:unblock_user', kwargs={'username': self.user2}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'privatemsg/unblock_user.html')
self.assertIn('blocked_user', response.context)
self.assertEqual(response.context['blocked_user'], self.user2)
def test_unblock_user_view_with_unknown_nickname(self):
"""
Test the UN-availability of the "block user" view with an unknown user name.
"""
client = Client()
client.login(username='johndoe2', password='johndoe2')
response = client.get(reverse('privatemsg:unblock_user', kwargs={'username': 'unknown'}))
self.assertEqual(response.status_code, 404)
self.assertTemplateUsed(response, '404.html')
def test_unblock_user_view_redirect_not_login(self):
"""
Test the redirection of the "undelete message" view when not logged-in.
"""
client = Client()
msg_undelete_url = reverse('privatemsg:unblock_user', kwargs={'username': self.user1})
response = client.get(msg_undelete_url)
self.assertRedirects(response, '%s?next=%s' % (settings.LOGIN_URL, msg_undelete_url))
| TamiaLab/carnetdumaker | apps/privatemsg/tests/test_views.py | Python | agpl-3.0 | 30,624 |
# -*- coding: utf-8 -*-
# Copyright 2019 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
@api.multi
@api.depends(
"product_id",
)
def _compute_allowed_purchase_uom_ids(self):
obj_product_uom =\
self.env["product.uom"]
for document in self:
uom_po = document.product_id.uom_po_id
if document.product_id:
if document.product_id.limit_product_uom_selection:
allowed_purchase_uom_ids =\
document.product_id.allowed_purchase_uom_ids.ids
if uom_po.id not in allowed_purchase_uom_ids:
allowed_purchase_uom_ids.append(uom_po.id)
document.allowed_purchase_uom_ids =\
allowed_purchase_uom_ids
else:
category_id =\
uom_po.category_id.id
criteria = [
("category_id", "=", category_id)
]
document.allowed_purchase_uom_ids =\
obj_product_uom.search(criteria)
allowed_purchase_uom_ids = fields.Many2many(
string="Allowed Invoices",
comodel_name="product.uom",
compute="_compute_allowed_purchase_uom_ids",
store=False,
)
@api.onchange(
"product_id",
"product_uom",
"product_qty",
)
@api.depends(
"order_id.pricelist_id",
"product_id",
"product_qty",
"product_uom",
"order_id.partner_id",
"order_id.date_order",
"order_id.fiscal_position",
"date_planned",
"name",
"order_id.state",
)
def onchange_product_id_new_api(self):
_super = super(PurchaseOrderLine, self)
result = _super.onchange_product_id(
self.order_id.pricelist_id.id,
self.product_id.id,
self.product_qty,
self.product_uom.id,
self.order_id.partner_id.id,
self.order_id.date_order,
self.order_id.fiscal_position.id,
self.date_planned,
self.name,
False,
self.order_id.state,
)
if type(result) is dict and "value" in result:
for field, value in result.get('value').items():
if hasattr(self, field):
setattr(self, field, value)
| open-synergy/opnsynid-purchase-workflow | purchase_order_line_allowed_uom/models/purchase_order_line.py | Python | agpl-3.0 | 2,605 |
from openerp.osv import fields,osv
import time
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp import pooler
from openerp import netsvc
import base64
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.Edumedia_India import config
class sale_order(osv.osv):
def history(self, cr, uid, cases, keyword, history=False, subject=None, email=False, details=None, email_from=False, message_id=False, attach=[], context=None):
mailgate_pool = self.pool.get('mailgate.thread')
return mailgate_pool.history(cr, uid, cases, keyword, history=history,\
subject=subject, email=email, \
details=details, email_from=email_from,\
message_id=message_id, attach=attach, \
context=context)
def _get_partner_default_addr(self, cr, uid, ids, name, arg, context=None):
res = {}
for case in self.browse(cr, uid, ids, context=context):
addr = self.pool.get('res.partner').address_get(cr, uid, [case.partner_id.id], ['default'])
res[case.id] = addr['default']
return res
# def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
# return super(sale_order,self)._amount_all(cr, uid, ids,field_name,arg,context=context)
def _get_class_details(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for case in self.browse(cr, uid, ids, context=context):
res[case.id] = {
'tot_class': 0, 'low_std': 0, 'high_std': 0, 'tot_student' : 0, 'tot_sectn':0 }
cnt_class = l_std = h_std = studnts = sectns = 0
class_std = []
if case.class_ids:
for line in case.class_ids:
cnt_class += 1
class_std.append(line.ed_class)
studnts += line.ed_students
sectns += line.ed_sec
if class_std:
l_std = min(class_std)
h_std = max(class_std)
res[case.id]['tot_class'] = cnt_class
res[case.id]['low_std'] = l_std
res[case.id]['high_std'] = h_std
res[case.id]['tot_student'] = studnts
res[case.id]['tot_sectn'] = sectns
return res
# def _get_order(self, cr, uid, ids, context=None):
# result = {}
# for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
# result[line.order_id.id] = True
# return result.keys()
def _get_delivry_ids(self, cr, uid, ids, field_name, arg, context=None):
delivry_obj = self.pool.get("stock.picking")
res = {}
for case in self.browse(cr,uid,ids,context):
res[case.id] = delivry_obj.search(cr, uid, [('sale_id', '=', case.id),('state','=','done')])
return res
_inherit='sale.order'
_columns={
# Overridden
'product_id': fields.many2one('product.product', 'Product', change_default=True,states={'draft': [('readonly', False)]}),
# 'amount_untaxed': fields.function(_amount_all, method=True, digits_compute= dp.get_precision('Sale Price'), string='Untaxed Amount',
# store = {
# 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
# 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
# },
# multi='sums', help="The amount without tax."),
# 'amount_tax': fields.function(_amount_all, method=True, digits_compute= dp.get_precision('Sale Price'), string='Taxes',
# store = {
# 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
# 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
# },
# multi='sums', help="The tax amount."),
# 'amount_total': fields.function(_amount_all, method=True, digits_compute= dp.get_precision('Sale Price'), string='Total',
# store = {
# 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
# 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
# },
# multi='sums', help="The total amount."),
'state': fields.selection([
('draft', 'Quotation'),
# ('waiting_date', 'Waiting Schedule'),
# ('proposal_sent', 'Proposal Sent'),
# ('proposal_accept','Proposal Accepted'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled'),
],'State',readonly=True),
# Extra Fields
'films_only':fields.boolean('Film License Only',readonly=True ,states={'draft': [('readonly', False)]}),
'address_ids':fields.many2many('res.partner','address_sale_rel','sale_id','address_id','Coordinator Details'),
'class_ids':fields.one2many('ed.class.details','sale_id','Class Details'),
'cap1_terms' : fields.char('Caption 1',size=100),
'cap1_text':fields.text('Caption Text',size=500),
'cap2_terms' : fields.char('Caption 2',size=100),
'cap2_text':fields.text('Caption Text',size=500),
'cap3_terms' : fields.char('Caption 3',size=100),
'cap3_text':fields.text('Caption Text',size=500),
'cap4_terms' : fields.char('Caption 4',size=100),
'cap4_text':fields.text('Caption Text',size=500),
'ed_type':fields.selection([('so','Sale Order'),('crm','CRM')],'Type'),
'ed_license':fields.selection(config.CLASS_STD,'License',readonly=True ,states={'draft': [('readonly', False)]}),
'rsn_reject' : fields.text('Relationship Manager Remarks',readonly=True ,states={'draft': [('readonly', False)]}),
'ed_proj':fields.char('Project',size=100),
'ed_cdd':fields.integer('No.Of.CDD',readonly=True ,states={'draft': [('readonly', False)]}),
'ed_rate':fields.integer('Rate',readonly=True ,states={'draft': [('readonly', False)]}),
'license_rate':fields.integer('Rate',readonly=True ,states={'draft': [('readonly', False)]}),
'nxt_payment_date' : fields.date('Next Payment Date'),
'licen_stdate' : fields.date('Start Date',readonly=True ,states={'draft': [('readonly', False)]}),
'licen_eddate' : fields.date('End Date',readonly=True ,states={'draft': [('readonly', False)]}),
'invoice_id' : fields.many2one('account.invoice','Invoice No',readonly=True),
'training_ids':fields.one2many('ed.training.grid','sale_id','Training'),
'message_ids': fields.one2many('mail.message', 'res_id', 'Messages', domain=[('model','=',_inherit)]),
'vw_address_ids':fields.one2many('vw.res.partner','sale_id','View Coordinator Details'),
'vw_class_ids':fields.one2many('vw.ed.class.details','sale_id','view class details'),
'payment_ids' : fields.one2many('ed.payment','sale_id','ed.payment'),
'feedback_ids':fields.one2many('ed.feedback','sale_id','Feedback'),
'ed_pod_ids':fields.one2many('ed.product','sale_id','Product',states={'draft': [('readonly', False)]}),
'ed_serv_ids':fields.one2many('ed.service','sale_id','service',states={'draft': [('readonly', False)]}),
'hub_id' : fields.many2one('ed.hub','HUB',readonly=True,states={'draft': [('readonly', False)]}),
'partner_default_id': fields.function(_get_partner_default_addr, method=True, relation='res.partner', type="many2one", string='Default Contact', strore=True),
'tot_class' : fields.function(_get_class_details, string="Total Classes", method=True, store=True, type="integer", multi="class_details"),
'low_std' : fields.function(_get_class_details, string="Lowest Standard", method=True, store=True, type="integer", multi="class_details"),
'high_std' : fields.function(_get_class_details, string="Highest Standard", method=True, store=True, type="integer", multi="class_details"),
'tot_student' : fields.function(_get_class_details, string="Total Students", method=True, store=True, type="integer", multi="class_details"),
'tot_sectn' : fields.function(_get_class_details, string="Total Sections", method=True, store=True, type="integer", multi="class_details"),
'delivery_ids': fields.function(_get_delivry_ids, method=True, type='one2many', obj='stock.picking', string='Delivery Orders' ,readonly=True),
}
def _create_session(self, cr, uid, ids, context=None):
ses_obj = self.pool.get('ed.sessions')
for case in self.browse(cr, uid, ids):
ses_vals={
'sale_id':case.id,
'ed_so': case.name,
'ed_school':case.partner_id.name,
}
ses_obj.create(cr, uid,ses_vals)
return True
def _open_crm_form(self, cr, uid, ids, context=None):
models_data = self.pool.get('ir.model.data')
sale_order_form = models_data._get_id(
cr, uid, 'Edumedia_India', 'view_ed_sale_crm_form')
sale_order_tree = models_data._get_id(
cr, uid, 'Edumedia_India', 'view_ed_sale_tree')
if sale_order_form:
sale_order_form = models_data.browse(
cr, uid, sale_order_form, context=context).res_id
if sale_order_tree:
sale_order_tree = models_data.browse(
cr, uid, sale_order_tree, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id' : False,
'views' : [(sale_order_form, 'form'),
(sale_order_tree, 'tree'), ],
'type': 'ir.actions.act_window',
'res_id': ids[0]
}
# *************** Overwritten standard function *****************
def action_wait(self, cr, uid, ids, *args):
self.button_dummy(cr, uid, ids)
for o in self.browse(cr, uid, ids):
if (o.order_policy == 'manual'):
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': time.strftime('%Y-%m-%d')})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': time.strftime('%Y-%m-%d')})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line])
message = _("The quotation '%s' has been converted to a sales order.") % (o.name,)
self.log(cr, uid, o.id, message)
self._create_session(cr, uid, ids)
partner_obj = self.pool.get('res.partner')
partner_obj.write(cr,uid,[o.partner_id.id],{'ed_sh_cinema':True})
# self._open_crm_form(cr, uid, ids)
return True
def button_2populateLines(self, cr, uid, ids, context=None):
ordln_obj = self.pool.get('sale.order.line')
edProd_obj = self.pool.get('ed.product')
edServ_obj = self.pool.get('ed.service')
class_obj = self.pool.get('ed.class.details')
prod_obj = self.pool.get('product.product')
prod_id = []
for case in self.browse(cr, uid, ids):
cr.execute("delete from sale_order_line where order_id = %d"%(case.id))
cr.execute("delete from ed_product where sale_id = %d"%(case.id))
cr.execute("delete from ed_service where sale_id = %d"%(case.id))
cr.execute("delete from address_sale_rel where sale_id=%d"%(case.id))
prod_films = prod_obj.search(cr,uid,[('name_template','=','Films')],limit=1)
prod_license = prod_obj.search(cr,uid,[('name_template','=','License')],limit=1)
prod_id.append(case.product_id.id)
if prod_films:
prod_id.append(prod_films[0])
if prod_license:
prod_id.append(prod_license[0])
# to create sale order lines on select of product
for prod in self.pool.get('product.product').browse(cr,uid,prod_id):
result = ordln_obj.product_id_change(cr, uid, ids, case.pricelist_id.id, prod.id, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=case.partner_id.id,
lang='lang' in context and context.get('lang')or False, update_tax=True, date_order=case.date_order, packaging=False, fiscal_position=False, flag=False)
prod_name = ''
if prod.name == 'Films':
prod_name = str(prod.name) + ' - License Fee'
lnvals = {
'product_id':prod.id,
'product_uom':prod.uom_id.id,
'name':prod_name or prod.name,
'price_unit':prod.list_price,
'order_id':case.id,
'tax_id' :[(6, 0, result['value']['tax_id'])],
}
ordln_id = ordln_obj.create(cr, uid, lnvals)
#to create lines of subproducts and service of main product
if prod.prod_ids:
for subprod in prod.prod_ids:
edProd_obj.create(cr, uid, {
'product_id':subprod.product_id.id,
'ed_qty': subprod.ed_qty,
'ed_per_id':subprod.ed_per_id.id,
'ed_class':subprod.ed_class,
'display_unit':subprod.product_id.display_unit,
'sale_id':case.id
})
for serv in prod.sevice_ids:
edServ_obj.create(cr, uid, {
'name':serv.name,
'sale_id':case.id
})
ordln_obj.write(cr,uid,[ordln_id],{})
#to create lines of address for selected customer
for add in case.partner_id.address:
if add.type == 'contact':
cr.execute("insert into address_sale_rel(sale_id, address_id) values(%d,%d)"%(case.id, add.id))
#to create class lines
if not case.class_ids:
for i in range(1,9):
class_obj.create(cr,uid,{'sale_id' : case.id,
'ed_class' : i,
'ed_boys':0,
'ed_girls':0,
'ed_sec':0,
'ed_students':0
},context)
return True
# ************************************ button to generate sale Dispatch order report***************************
def print_order_report(self, cr, uid, ids, context=None):
for case in self.browse(cr, uid, ids):
cr.execute(""" CREATE OR REPLACE VIEW vw_res_partner_address AS
select pa.id
, pa.name
, pa.mobile
, pa.email
, pa.ed_desig_id
, """ + str(case.id) + """ as sale_id
from res_partner_address pa
where pa.id in (select address_id from address_sale_rel
where sale_id = """ + str(case.id) + """);
CREATE OR REPLACE VIEW vw_ed_class_details AS
select cl.id
, cl.ed_class
, cl.ed_sec
, cl.ed_students
, cl.ed_boys
, case when cl.ed_class = 6 then sum(cl.ed_girls)
else 0 end as girls
, case when cl.ed_class = 7 then sum(cl.ed_boys)
else 0 end as boys
, cl.ed_girls
, """ + str(case.id) + """ as sale_id
from ed_class_details cl
where cl.id in (select cls_id from class_sale_rel
where sale_id = """ + str(case.id) + """)
group by cl.id,cl.ed_class ,cl.ed_sec, cl.ed_students, cl.ed_boys, cl.ed_girls """);
data = {}
data['ids'] = ids
data['model'] = 'sale.order'
return {
'report_name': 'sale.order.dispatch.order',
'type': 'ir.actions.report.xml',
'target': 'new',
'datas': data,
}
def print_proposal_report(self, cr, uid, ids, context=None):
""" button to generate proposal report """
# self._get_proposal_report(cr, uid, ids, context=None)
data = {}
data['ids'] = ids
data['model'] = 'sale.order'
return {
'report_name': 'sale.order.proposal',
'type': 'ir.actions.report.xml',
'target': 'new',
'datas': data,
}
# Modifying Standard Shipping create from Sale Order
# to create deliveries for school cinema process
def action_ship_create(self, cr, uid, ids, *args):
wf_service = netsvc.LocalService("workflow")
prod_obj = self.pool.get('product.product')
company = self.pool.get('res.users').browse(cr, uid, uid).company_id
for order in self.browse(cr, uid, ids, context={}):
proc_ids = []
output_id = order.shop_id.warehouse_id.lot_output_id.id
val = {}
val['ed_type']='crm'
self.write(cr, uid, [order.id], val)
return True
def _create_delivery_order(self, cr, uid, ids, context=None):
picking_id = False
move_obj = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
prod_obj = self.pool.get('product.product')
company = self.pool.get('res.users').browse(cr, uid, uid).company_id
for order in self.browse(cr, uid, ids, context={}):
proc_ids = []
output_id = order.shop_id.warehouse_id.lot_output_id.id
picking_id = move_id = False
cls_txt1 = ''
count = 0
for line in order.order_line:
count += 1
proc_id = False
date_planned = datetime.now() + relativedelta(days=line.delay or 0.0)
date_planned = (date_planned - timedelta(days=company.security_lead)).strftime('%Y-%m-%d %H:%M:%S')
if line.product_id and line.product_id.product_tmpl_id.type in ('product', 'consu'):
location_id = order.shop_id.warehouse_id.lot_stock_id.id
if not picking_id and line.product_id == order.product_id and line.price_unit > 0:
pick_name = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.out')
picking_id = self.pool.get('stock.picking').create(cr, uid, {
'name': pick_name,
'origin': order.name,
'type': 'out',
'state': 'draft',
'move_type': order.picking_policy,
'sale_id': order.id,
'address_id': order.partner_shipping_id.id,
'note': order.note,
'invoice_state': (order.order_policy=='picking' and '2binvoiced') or 'none',
'company_id': order.company_id.id,
'service_type':'shl_cinema'
})
#Selecting the no of classes, sections and students
nof_class = nof_sec = nof_stud = 0
cr.execute('SELECT count(c.ed_class) as cls_count, sum(ed_sec) as sec, sum(ed_students) as stud FROM ed_class_details c WHERE c.sale_id = %d'%(order.id))
cls = cr.fetchone()
if cls:
if cls[0] == 0:
raise osv.except_osv(_('Warning'),_("Add Data in other details"))
nof_class = cls[0]
nof_sec = cls[1]
nof_stud = cls[2]
#Looping through sub products against the option selected
cls_txt = ''
if not move_id:
for cl in order.class_ids:
if cl.ed_students > 0 and cl.wrk_bk_rate > 0 :
cls_txt += str(cl.ed_class) + ','
for subprod in order.ed_pod_ids:
qty = 0
if subprod.ed_class == cl.ed_class:
if subprod.ed_per_id.name == 'Student':
qty = cl.ed_students * subprod.ed_qty
if subprod.ed_per_id.name == 'Class':
qty = nof_class * subprod.ed_qty
if subprod.ed_per_id.name == 'Section':
qty = cl.ed_sec * subprod.ed_qty
if subprod.ed_per_id.name == 'Boys' and cl.ed_boys > 0 :
qty = cl.ed_boys * subprod.ed_qty
if subprod.ed_per_id.name == 'Girls' and cl.ed_girls > 0 :
qty = cl.ed_girls * subprod.ed_qty
#if subprod.ed_per_id.name:
if qty > 0:
move_id = self.pool.get('stock.move').create(cr, uid, {
'name': line.name[:64],
'picking_id': picking_id,
'product_id': subprod.product_id.id,
'date': date_planned,
'date_expected': date_planned,
'product_qty': qty,
'product_uom': subprod.product_id.uom_id.id,
'product_uos_qty': qty,
'product_uos': subprod.product_id.uom_id.id,
#'product_packaging': line.product_packaging.id,
#'address_id': line.address_allotment_id.id or order.partner_shipping_id.id,
'location_id': location_id,
'location_dest_id': output_id,
'sale_line_id': line.id,
'tracking_id': False,
'state': 'draft',
#'state': 'waiting',
'note': line.notes,
'company_id': order.company_id.id,
})
#updating license details to stock picking
cls_txt = cls_txt[0:(len(cls_txt) - 1)]
if picking_id:
self.pool.get('stock.picking').write(cr, uid, [picking_id],{'license_detls':"License Start Date :" + str(order.licen_stdate) +
", License End Date :" + str(order.licen_eddate) +
", Class :" + cls_txt})
if count == 3:
cls_txt = ''
for cl in order.class_ids:
if cl.films_rate > 0 :
cls_txt += str(cl.ed_class) + ','
cls_txt = cls_txt[0:(len(cls_txt) - 1)]
# creating additional deliver order for HDD media
pick_name = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.out')
picking_id = self.pool.get('stock.picking').create(cr, uid, {
'name': pick_name,
'origin': order.name,
'type': 'out',
'state': 'draft',
'move_type': order.picking_policy,
'sale_id': order.id,
'address_id': order.partner_shipping_id.id,
'note': order.note,
'invoice_state': (order.order_policy=='picking' and '2binvoiced') or 'none',
'company_id': order.company_id.id,
'license_detls' :"License Start Date :" + str(order.licen_stdate) +
", License End Date :" + str(order.licen_eddate) +
", Class :" + cls_txt,
'service_type':'shl_cinema'
})
products = prod_obj.search(cr,uid,[('categ_id', '=', 'HDD')],limit=1)
for prod in prod_obj.browse(cr,uid,products):
move_id = self.pool.get('stock.move').create(cr, uid, {
'name': line.name[:64],
'picking_id': picking_id,
'product_id': prod.id,
'date': date_planned,
'date_expected': date_planned,
'product_qty': order.ed_cdd,
'product_uom': prod.uom_id.id,
'product_uos_qty': order.ed_cdd,
'product_uos': prod.uom_id.id,
#'product_packaging': line.product_packaging.id,
#'address_id': line.address_allotment_id.id or order.partner_shipping_id.id,
'location_id': location_id,
'location_dest_id': output_id,
'sale_line_id': line.id,
'tracking_id': False,
'state': 'draft',
#'state': 'waiting',
'note': line.notes,
'company_id': order.company_id.id,
})
if line.product_id:
proc_id = self.pool.get('procurement.order').create(cr, uid, {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty)\
or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id)\
or line.product_uom.id,
'location_id': order.shop_id.warehouse_id.lot_stock_id.id,
'procure_method': line.type,
'move_id': move_id,
'property_ids': [(6, 0, [x.id for x in line.property_ids])],
'company_id': order.company_id.id,
})
proc_ids.append(proc_id)
self.pool.get('sale.order.line').write(cr, uid, [line.id], {'procurement_id': proc_id})
if order.state == 'shipping_except':
for pick in order.picking_ids:
for move in pick.move_lines:
if move.state == 'cancel':
mov_ids = move_obj.search(cr, uid, [('state', '=', 'cancel'),('sale_line_id', '=', line.id),('picking_id', '=', pick.id)])
if mov_ids:
for mov in move_obj.browse(cr, uid, mov_ids):
move_obj.write(cr, uid, [move_id], {'product_qty': mov.product_qty, 'product_uos_qty': mov.product_uos_qty})
proc_obj.write(cr, uid, [proc_id], {'product_qty': mov.product_qty, 'product_uos_qty': mov.product_uos_qty})
if order.state == 'shipping_except':
val['state'] = 'progress'
val['shipped'] = False
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
return True
#inherited
def _make_invoice(self, cr, uid, order, lines, context=None):
accinv_obj = self.pool.get('account.invoice')
invln_obj = self.pool.get('account.invoice.line')
res = super(sale_order, self)._make_invoice(cr, uid, order,lines, context=context)
accinv_obj.write(cr,uid,[res],{'sale_id':order.id})
invln_ids = invln_obj.search(cr,uid,[('invoice_id','=',res)])
invln_obj.write(cr,uid,invln_ids,{})
return res
# Overridden
def manual_invoice(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
wf_service = netsvc.LocalService("workflow")
inv_ids = set()
inv_ids1 = set()
for id in ids:
for record in self.pool.get('sale.order').browse(cr, uid, id).invoice_ids:
inv_ids.add(record.id)
# inv_ids would have old invoices if any
for id in ids:
wf_service.trg_validate(uid, 'sale.order', id, 'manual_invoice', cr)
for record in self.pool.get('sale.order').browse(cr, uid, id).invoice_ids:
inv_ids1.add(record.id)
inv_ids = list(inv_ids1.difference(inv_ids))
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
self.write(cr, uid, [id], {'invoice_id':inv_ids[0]})
self._create_delivery_order(cr, uid, ids, context)
return True
#overriden
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_inv = False, context=None):
res = False
invoices = {}
invoice_ids = []
picking_obj = self.pool.get('stock.picking')
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
if context is None:
context = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_inv:
context['date_inv'] = date_inv
for o in self.browse(cr, uid, ids, context=context):
lines = []
for line in o.order_line:
if line.price_unit > 0:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
for o, l in val:
invoice_ref += o.name + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
if o.order_policy == 'picking':
picking_obj.write(cr, uid, map(lambda x: x.id, o.picking_ids), {'invoice_state': 'invoiced'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
invoice.write(cr, uid, [res], {'origin': invoice_ref, 'name': invoice_ref})
else:
for order, il in val:
for lin in il:#to split sale order lines and create seprate invoices
res = self._make_invoice(cr, uid, order, [lin], context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
if order.order_policy == 'picking':
picking_obj.write(cr, uid, map(lambda x: x.id, order.picking_ids), {'invoice_state': 'invoiced'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
return res
# Overridden
def button_dummy(self, cr, uid, ids, context=None):
ordln_obj=self.pool.get('sale.order.line')
edprod_obj=self.pool.get('ed.product')
for case in self.browse(cr,uid,ids):
if case.order_line:
for line in case.order_line:
ordln_obj.write(cr,uid,[line.id],{})
if case.ed_pod_ids:
for ep in case.ed_pod_ids:
edprod_obj.write(cr, uid, [ep.id], {})
self.write(cr, uid, [case.id], {})
return True
def button_proposal_sent(self,cr,uid,ids,context=None):
for case in self.browse(cr,uid,ids):
# self._get_proposal_report(cr, uid, ids, context=None)
data = {}
data['ids'] = ids
data['model'] = 'sale.order'
obj = netsvc.LocalService('report.' + 'sale.order.dispatch.order')
(result, format) = obj.create(cr, uid, ids, data, context)
doc_parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
attachment_obj = self.pool.get('ir.attachment')
attval = {}
cr.execute("select id from ir_attachment where res_id = " + str(case.id) + " and res_model = 'sale.order' and name = '"+ str(case.name) +".pdf'")
file_att = cr.fetchall()
if not file_att:
attval = {
'res_model' : 'sale.order',
'res_name' : str(case.name),
'res_id' : str(case.id),
'db_datas' : str(result),
'type' : 'binary',
'file_type' : 'application/pdf',
'datas_fname': str(case.name) + ".pdf",
'name' : str(case.name) + ".pdf",
'file_size' : len(result),
'parent_id' : doc_parent_id,
'partner_id' : case.partner_id.id,
}
attachment_obj.create(cr,uid,attval)
else:
for f in file_att:
attval = {
'db_datas' : str(result),
'file_size' : len(result),
}
attachment_obj.write(cr,uid, [f[0]],attval)
return True
def button_proposal_accepted(self,cr,uid,ids,context=None):
self.write(cr, uid, ids, {'state':'proposal_accept'})
return True
def _open_sale_form(self, cr, uid, ids, context=None):
models_data = self.pool.get('ir.model.data')
sale_order_form = models_data._get_id(
cr, uid, 'Edumedia_India', 'view_ed_sale_form')
sale_order_tree = models_data._get_id(
cr, uid, 'Edumedia_India', 'view_ed_sale_tree')
if sale_order_form:
sale_order_form = models_data.browse(
cr, uid, sale_order_form, context=context).res_id
if sale_order_tree:
sale_order_tree = models_data.browse(
cr, uid, sale_order_tree, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id' : False,
'views' : [(sale_order_form, 'form'),
(sale_order_tree, 'tree'), ],
'type': 'ir.actions.act_window',
'res_id': ids[0]
}
# Overridden:
def action_cancel_draft(self, cr, uid, ids, *args):
if not len(ids):
return False
for sale in self.browse(cr, uid, ids):
if sale.state == 'cancel':
l = len(sale.name)
if l > 5:
nxt_no = int(sale.name[8:(l-1)]) + 1
sale_name = sale.name[0:8] + str(nxt_no) + sale.name[(l-1):l]
else:
sale_name = str(sale.name) + ' (R1)'
self.write(cr, uid, ids, {'state': 'draft','ed_type':'so','name':sale_name, 'invoice_ids': [], 'shipped': 0})
else:
self.write(cr, uid, ids, {'state': 'draft','ed_type':'so', 'invoice_ids': [], 'shipped': 0})
cr.execute('select id from sale_order_line where order_id IN %s and state=%s', (tuple(ids), 'cancel'))
line_ids = map(lambda x: x[0], cr.fetchall())
self.pool.get('sale.order.line').write(cr, uid, line_ids, {'invoiced': False, 'state': 'draft', 'invoice_lines': [(6, 0, [])]})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
# Deleting the existing instance of workflow for SO
wf_service.trg_delete(uid, 'sale.order', inv_id, cr)
wf_service.trg_create(uid, 'sale.order', inv_id, cr)
for (id,name) in self.name_get(cr, uid, ids):
message = _("The sales order '%s' has been set in draft state.") %(name,)
self.log(cr, uid, id, message)
# self._open_sale_form(cr, uid, ids)
return True
# Overridden:
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
proc_obj = self.pool.get('procurement.order')
for sale in self.browse(cr, uid, ids, context=context):
for pick in sale.picking_ids:
if pick.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Could not cancel sales order !'),
_('You must first cancel all picking attached to this sales order.'))
if pick.state == 'cancel':
for mov in pick.move_lines:
proc_ids = proc_obj.search(cr, uid, [('move_id', '=', mov.id)])
if proc_ids:
for proc in proc_ids:
wf_service.trg_validate(uid, 'procurement.order', proc, 'button_check', cr)
for r in self.read(cr, uid, ids, ['picking_ids']):
for pick in r['picking_ids']:
wf_service.trg_validate(uid, 'stock.picking', pick, 'button_cancel', cr)
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Could not cancel this sales order !'),
_('You must first cancel all invoices attached to this sales order.'))
for r in self.read(cr, uid, ids, ['invoice_ids']):
for inv in r['invoice_ids']:
wf_service.trg_validate(uid, 'account.invoice', inv, 'invoice_cancel', cr)
sale_order_line_obj.write(cr, uid, [l.id for l in sale.order_line],
{'state': 'cancel'})
message = _("The sales order '%s' has been cancelled.") % (sale.name,)
self.log(cr, uid, sale.id, message)
if sale.ed_type == 'crm':
cr.execute("delete from ed_sessions where sale_id = %d"%(sale.id))
self.write(cr, uid, ids, {'state': 'cancel','ed_type':'so'})
return True
def write(self, cr, uid, ids,vals,context=None):
addr_obj = self.pool.get('res.partner')
partner_obj = self.pool.get('res.partner')
class_obj = self.pool.get('ed.class.details')
line_obj = self.pool.get('sale.order.line')
result = super(sale_order, self).write(cr, uid, ids, vals, context=context)
for case in self.browse(cr, uid, ids):
if case.address_ids:
for a in case.address_ids:
if not a.partner_id:
addr_obj.write(cr, uid, [a.id], {'partner_id': case.partner_id.id})
# sale_cls_ids = set()
# if case.class_ids:
# for c in case.class_ids:
# sale_cls_ids.add(c.id)
#
# part_cls_ids = new_cls_ids = set()
#
# partner = partner_obj.browse(cr,uid, case.partner_id.id)
# for pc in partner.ed_cls_ids:
# part_cls_ids.add(pc.id)
# new_cls_ids = sale_cls_ids - part_cls_ids
# class_ids = class_obj.search(cr,uid,[('sale_id','=',case.id)],order='ed_class')
tot_wb_price = tot_fl_price = avg_wb_price = avg_fl_price = 0.00
tot_stu = 0
id
for cl in case.class_ids:
if not case.films_only:
tot_wb_price += cl.wrk_bk_rate * cl.ed_students
tot_fl_price += cl.films_rate * cl.ed_students
tot_stu += cl.ed_students
if case.films_only:
tot_fl_price += cl.films_rate
avg_wb_price = tot_wb_price / (tot_stu or 1)
avg_fl_price = tot_fl_price / (tot_stu or 1)
lvals = {}
line_ids = line_obj.search(cr,uid,[('order_id','=',case.id)])
for ln in line_obj.browse(cr,uid,line_ids):
if ln.product_id.name_template == case.product_id.name:
lvals = {'price_unit':avg_wb_price,'ed_units':case.ed_cdd,'ed_per_depo':0}
if ln.product_id.name_template == 'Films':
lvals = {'price_unit':avg_fl_price}
if ln.product_id.name_template == 'License':
lvals = {'price_unit':case.license_rate}
line_obj.write(cr,uid,[ln.id],lvals)
#partner_obj.write(cr, uid, [partner.id], {'ed_cls_ids': [(6, 0, new_cls_ids)]})
# for n in new_cls_ids:
# cr.execute("insert into ed_partner_cls_rel(partner_id, class_id) values(%d,%d)"%(case.partner_id.id, n))
return result
sale_order()
class sale_order_line(osv.osv):
_inherit='sale.order.line'
# def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
# tax_obj = self.pool.get('account.tax')
# cur_obj = self.pool.get('res.currency')
# res = {}
# if context is None:
# context = {}
# for line in self.browse(cr, uid, ids, context=context):
# price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
# taxes = tax_obj.compute_all(cr, uid, line.tax_id, price,line.product_uom_qty ,line.order_id.partner_invoice_id.id, line.product_id, line.order_id.partner_id)
#
# cur = line.order_id.pricelist_id.currency_id
# res[line.id] = cur_obj.round(cr, uid, cur, taxes['total']+line.ed_total)
# return res
def get_deposit_Total(self,cr,uid,ids,field_name,arg,context=None):
res = {}
sale_obj = self.pool.get('sale.order')
for case in self.browse(cr,uid,ids):
deposit = 0.00
if case.order_id:
sale = sale_obj.browse(cr,uid,case.order_id.id)
deposit = sale.ed_cdd * sale.ed_rate
res[case.id] = deposit
return res
def _get_students(self,cr,uid,ids,field_name,arg,context=None):
res={}
for case in self.browse(cr, uid, ids):
res[case.id]= 1
if case.product_id.name != 'License' and not case.order_id.films_only:
cr.execute('SELECT sum(ed_students) as stud FROM ed_class_details c \
WHERE c.sale_id =%d'%(case.order_id.id))
cls = cr.fetchone()
res[case.id]= cls and cls[0] or 1
return res
def _default_qty(self, cr, uid, context=None):
sale_id = context.get('sale_id', False)
if sale_id:
cr.execute('SELECT sum(ed_students) as stud FROM ed_class_details c \
WHERE c.sale_id =%d'%(sale_id))
cls = cr.fetchone()
return cls and cls[0] or 1
else:
return 1
_columns={
# Inherited
# 'price_subtotal': fields.function(_amount_line, method=True, string='Subtotal', digits_compute= dp.get_precision('Sale Price')),
# 'ed_total':fields.function(get_deposit_Total, method=True, string='Total Deposit', type='float', store=True, readonly=True),
'product_uom_qty':fields.function(_get_students, method=True, string='NO.Of Students', type='float', store=True),
'ed_units':fields.integer('No.Of.Units'),
'ed_per_depo':fields.integer('Deposit Per Unit'),
'notes': fields.text('Notes'),
}
_defaults={
'product_uom_qty':_default_qty
}
_order = 'id'
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
def _get_line_qty(line):
if (line.order_id.invoice_quantity=='order') or not line.procurement_id:
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
else:
return self.pool.get('procurement.order').quantity_get(cr, uid,
line.procurement_id.id, context=context)
def _get_line_uom(line):
if (line.order_id.invoice_quantity=='order') or not line.procurement_id:
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
else:
return self.pool.get('procurement.order').uom_get(cr, uid,
line.procurement_id.id, context=context)
create_ids = []
sales = {}
for line in self.browse(cr, uid, ids, context=context):
if not line.invoiced:
if line.product_id:
a = line.product_id.product_tmpl_id.property_account_income.id
if not a:
a = line.product_id.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error !'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d)') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
a = prop and prop.id or False
uosqty = _get_line_qty(line)
uos_id = _get_line_uom(line)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Sale Price'))
fpos = line.order_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, a)
if not a:
raise osv.except_osv(_('Error !'),
_('There is no income category account defined in default Properties for Product Category or Fiscal Position is not defined !'))
inv_id = self.pool.get('account.invoice.line').create(cr, uid, {
'name': line.name,
'origin': line.order_id.name,
'account_id': a,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'note': line.notes,
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
})
cr.execute('insert into sale_order_line_invoice_rel (order_line_id,invoice_id) values (%s,%s)', (line.id, inv_id))
self.write(cr, uid, [line.id], {'invoiced': True})
sales[line.order_id.id] = True
create_ids.append(inv_id)
# Trigger workflow events
wf_service = netsvc.LocalService("workflow")
for sid in sales.keys():
wf_service.trg_write(uid, 'sale.order', sid, cr)
return create_ids
def onchange_Total(self, cr, uid, ids, ed_per_depo,ed_units):
res = {}
total_dep = 0
if ed_per_depo and ed_units:
total_dep = ed_per_depo * ed_units
res['ed_total'] = total_dep
return {'value':res}
# def onchange_subtotal(self, cr, uid, ids, price_unit,ed_total):
# res = {}
# subtotal_dep = 0
#
# if price_unit and ed_total:
# subtotal_dep = price_unit
#
# res['price_subtotal'] = subtotal_dep
#
# return {'value':res}
def create(self, cr, uid, vals, context=None):
return super(sale_order_line, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids,vals,context=None):
return super(sale_order_line, self).write(cr, uid, ids, vals, context=context)
sale_order_line()
| trabacus-softapps/docker-edumedia | additional_addons/Edumedia_India/ed_sale.py | Python | agpl-3.0 | 56,005 |
from requests import post
import io
import base64
class ZivService(object):
def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def send_cycle(self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn't matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
result = post(url, files={'file': (filename, filecontent)}, auth=self.auth)
else:
result = post(url, files={'file': (filename, filecontent)})
return result
| gisce/primestg | primestg/ziv_service.py | Python | agpl-3.0 | 959 |
"""
Tests for send_email_base_command
"""
import datetime
from unittest import skipUnless
import ddt
import pytz
from django.conf import settings
from mock import DEFAULT, Mock, patch
from openedx.core.djangoapps.schedules.management.commands import SendEmailBaseCommand
from openedx.core.djangoapps.site_configuration.tests.factories import SiteConfigurationFactory, SiteFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
@ddt.ddt
@skip_unless_lms
@skipUnless('openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS,
"Can't test schedules if the app isn't installed")
class TestSendEmailBaseCommand(CacheIsolationTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self): # lint-amnesty, pylint: disable=super-method-not-called
self.command = SendEmailBaseCommand()
self.site = SiteFactory()
self.site_config = SiteConfigurationFactory.create(site=self.site)
def test_handle(self):
with patch.object(self.command, 'send_emails') as send_emails:
self.command.handle(site_domain_name=self.site.domain, date='2017-09-29')
send_emails.assert_called_once_with(
self.site,
datetime.datetime(2017, 9, 29, tzinfo=pytz.UTC),
None
)
def test_weeks_option(self):
with patch.object(self.command, 'enqueue') as enqueue:
self.command.handle(site_domain_name=self.site.domain, date='2017-09-29', weeks=12)
assert enqueue.call_count == 12
def test_send_emails(self):
with patch.multiple(
self.command,
offsets=(1, 3, 5),
enqueue=DEFAULT,
):
arg = Mock(name='arg')
kwarg = Mock(name='kwarg')
self.command.send_emails(arg, kwarg=kwarg)
assert not arg.called
assert not kwarg.called
for offset in self.command.offsets:
self.command.enqueue.assert_any_call(offset, arg, kwarg=kwarg) # lint-amnesty, pylint: disable=no-member
| stvstnfrd/edx-platform | openedx/core/djangoapps/schedules/management/commands/tests/test_send_email_base_command.py | Python | agpl-3.0 | 2,135 |
# coding: utf-8
def configure(app):
if app.config.get('DEBUG_TOOLBAR_ENABLED'):
try:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
except ImportError:
app.logger.info('flask_debugtoolbar is not installed')
if app.config.get('OPBEAT'):
try:
from opbeat.contrib.flask import Opbeat
Opbeat(
app,
logging=app.config.get('OPBEAT', {}).get('LOGGING', False)
)
app.logger.info('opbeat configured!!!')
except ImportError:
app.logger.info('opbeat is not installed')
if app.config.get('SENTRY_ENABLED', False):
try:
from raven.contrib.flask import Sentry
app.sentry = Sentry(app)
except ImportError:
app.logger.info('sentry, raven is not installed')
| Spotipo/spotipo | unifispot/ext/development.py | Python | agpl-3.0 | 902 |
import json
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.db import models
from django.utils.html import strip_tags
from opaque_keys.edx.django.models import CourseKeyField
from six import text_type
class Note(models.Model):
"""
Stores user Notes for the LMS local Notes service.
.. pii: Legacy model for an app that edx.org hasn't used since 2013
.. pii_types: other
.. pii_retirement: retained
"""
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
course_id = CourseKeyField(max_length=255, db_index=True)
uri = models.CharField(max_length=255, db_index=True)
text = models.TextField(default="")
quote = models.TextField(default="")
range_start = models.CharField(max_length=2048) # xpath string
range_start_offset = models.IntegerField()
range_end = models.CharField(max_length=2048) # xpath string
range_end_offset = models.IntegerField()
tags = models.TextField(default="") # comma-separated string
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
app_label = 'notes'
def clean(self, json_body):
"""
Cleans the note object or raises a ValidationError.
"""
if json_body is None:
raise ValidationError('Note must have a body.')
body = json.loads(json_body)
if not isinstance(body, dict):
raise ValidationError('Note body must be a dictionary.')
# NOTE: all three of these fields should be considered user input
# and may be output back to the user, so we need to sanitize them.
# These fields should only contain _plain text_.
self.uri = strip_tags(body.get('uri', ''))
self.text = strip_tags(body.get('text', ''))
self.quote = strip_tags(body.get('quote', ''))
ranges = body.get('ranges')
if ranges is None or len(ranges) != 1:
raise ValidationError('Note must contain exactly one range.')
self.range_start = ranges[0]['start']
self.range_start_offset = ranges[0]['startOffset']
self.range_end = ranges[0]['end']
self.range_end_offset = ranges[0]['endOffset']
self.tags = ""
tags = [strip_tags(tag) for tag in body.get('tags', [])]
if len(tags) > 0:
self.tags = ",".join(tags)
def get_absolute_url(self):
"""
Returns the absolute url for the note object.
"""
kwargs = {'course_id': text_type(self.course_id), 'note_id': str(self.pk)}
return reverse('notes_api_note', kwargs=kwargs)
def as_dict(self):
"""
Returns the note object as a dictionary.
"""
return {
'id': self.pk,
'user_id': self.user.pk,
'uri': self.uri,
'text': self.text,
'quote': self.quote,
'ranges': [{
'start': self.range_start,
'startOffset': self.range_start_offset,
'end': self.range_end,
'endOffset': self.range_end_offset
}],
'tags': self.tags.split(","),
'created': str(self.created),
'updated': str(self.updated)
}
| jolyonb/edx-platform | lms/djangoapps/notes/models.py | Python | agpl-3.0 | 3,401 |
# -*- coding: utf-8 -*-
"""
Basic unit tests for LibraryContentBlock
Higher-level tests are in `cms/djangoapps/contentstore/tests/test_libraries.py`.
"""
import six
from bson.objectid import ObjectId
from fs.memoryfs import MemoryFS
from lxml import etree
from mock import Mock, patch
from search.search_engine_base import SearchEngine
from six.moves import range
from web_fragments.fragment import Fragment
from xblock.runtime import Runtime as VanillaRuntime
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE, LibraryContentBlock
from xmodule.library_tools import LibraryToolsService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, LibraryFactory
from xmodule.modulestore.tests.utils import MixedSplitTestCase
from xmodule.tests import get_test_system
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import AUTHOR_VIEW
from .test_course_module import DummySystem as TestImportSystem
dummy_render = lambda block, _: Fragment(block.data) # pylint: disable=invalid-name
class LibraryContentTest(MixedSplitTestCase):
"""
Base class for tests of LibraryContentBlock (library_content_block.py)
"""
def setUp(self):
super(LibraryContentTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.tools = LibraryToolsService(self.store, self.user_id)
self.library = LibraryFactory.create(modulestore=self.store)
self.lib_blocks = [
self.make_block("html", self.library, data="Hello world from block {}".format(i))
for i in range(1, 5)
]
self.course = CourseFactory.create(modulestore=self.store)
self.chapter = self.make_block("chapter", self.course)
self.sequential = self.make_block("sequential", self.chapter)
self.vertical = self.make_block("vertical", self.sequential)
self.lc_block = self.make_block(
"library_content",
self.vertical,
max_count=1,
source_library_id=six.text_type(self.library.location.library_key)
)
def _bind_course_module(self, module):
"""
Bind a module (part of self.course) so we can access student-specific data.
"""
module_system = get_test_system(course_id=module.location.course_key)
module_system.descriptor_runtime = module.runtime._descriptor_system # pylint: disable=protected-access
module_system._services['library_tools'] = self.tools # pylint: disable=protected-access
def get_module(descriptor):
"""Mocks module_system get_module function"""
sub_module_system = get_test_system(course_id=module.location.course_key)
sub_module_system.get_module = get_module
sub_module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.bind_for_student(sub_module_system, self.user_id)
return descriptor
module_system.get_module = get_module
module.xmodule_runtime = module_system
class TestLibraryContentExportImport(LibraryContentTest):
"""
Export and import tests for LibraryContentBlock
"""
maxDiff = None
def test_xml_export_import_cycle(self):
"""
Test the export-import cycle.
"""
# Children will only set after calling this.
self.lc_block.refresh_children()
lc_block = self.store.get_item(self.lc_block.location)
expected_olx = (
'<library_content display_name="{block.display_name}" max_count="{block.max_count}"'
' source_library_id="{block.source_library_id}" source_library_version="{block.source_library_version}">\n'
' <html url_name="{block.children[0].block_id}"/>\n'
' <html url_name="{block.children[1].block_id}"/>\n'
' <html url_name="{block.children[2].block_id}"/>\n'
' <html url_name="{block.children[3].block_id}"/>\n'
'</library_content>\n'
).format(
block=lc_block,
)
export_fs = MemoryFS()
# Set the virtual FS to export the olx to.
lc_block.runtime._descriptor_system.export_fs = export_fs # pylint: disable=protected-access
# Export the olx.
node = etree.Element("unknown_root")
lc_block.add_xml_to_node(node)
# Read it back
with export_fs.open('{dir}/{file_name}.xml'.format(
dir=lc_block.scope_ids.usage_id.block_type,
file_name=lc_block.scope_ids.usage_id.block_id
)) as f:
exported_olx = f.read()
# And compare.
assert exported_olx == expected_olx
runtime = TestImportSystem(load_error_modules=True, course_id=lc_block.location.course_key)
runtime.resources_fs = export_fs
# Now import it.
olx_element = etree.fromstring(exported_olx)
id_generator = Mock()
imported_lc_block = LibraryContentBlock.parse_xml(olx_element, runtime, None, id_generator)
# Check the new XBlock has the same properties as the old one.
assert imported_lc_block.display_name == lc_block.display_name
assert imported_lc_block.source_library_id == lc_block.source_library_id
assert imported_lc_block.source_library_version == lc_block.source_library_version
assert imported_lc_block.mode == lc_block.mode
assert imported_lc_block.max_count == lc_block.max_count
assert imported_lc_block.capa_type == lc_block.capa_type
assert len(imported_lc_block.children) == 4
assert imported_lc_block.children == lc_block.children
class LibraryContentBlockTestMixin(object):
"""
Basic unit tests for LibraryContentBlock
"""
problem_types = [
["multiplechoiceresponse"], ["optionresponse"], ["optionresponse", "coderesponse"],
["coderesponse", "optionresponse"]
]
problem_type_lookup = {}
def _get_capa_problem_type_xml(self, *args):
""" Helper function to create empty CAPA problem definition """
problem = "<problem>"
for problem_type in args:
problem += "<{problem_type}></{problem_type}>".format(problem_type=problem_type)
problem += "</problem>"
return problem
def _create_capa_problems(self):
"""
Helper function to create a set of capa problems to test against.
Creates four blocks total.
"""
self.problem_type_lookup = {}
for problem_type in self.problem_types:
block = self.make_block("problem", self.library, data=self._get_capa_problem_type_xml(*problem_type))
self.problem_type_lookup[block.location] = problem_type
def test_lib_content_block(self):
"""
Test that blocks from a library are copied and added as children
"""
# Check that the LibraryContent block has no children initially
# Normally the children get added when the "source_libraries" setting
# is updated, but the way we do it through a factory doesn't do that.
assert len(self.lc_block.children) == 0
# Update the LibraryContent module:
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
# Check that all blocks from the library are now children of the block:
assert len(self.lc_block.children) == len(self.lib_blocks)
def test_children_seen_by_a_user(self):
"""
Test that each student sees only one block as a child of the LibraryContent block.
"""
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
# Make sure the runtime knows that the block's children vary per-user:
assert self.lc_block.has_dynamic_children()
assert len(self.lc_block.children) == len(self.lib_blocks)
# Check how many children each user will see:
assert len(self.lc_block.get_child_descriptors()) == 1
# Check that get_content_titles() doesn't return titles for hidden/unused children
assert len(self.lc_block.get_content_titles()) == 1
def test_validation_of_course_libraries(self):
"""
Test that the validation method of LibraryContent blocks can validate
the source_library setting.
"""
# When source_library_id is blank, the validation summary should say this block needs to be configured:
self.lc_block.source_library_id = ""
result = self.lc_block.validate()
assert not result
# Validation fails due to at least one warning/message
assert result.summary
assert StudioValidationMessage.NOT_CONFIGURED == result.summary.type
# When source_library_id references a non-existent library, we should get an error:
self.lc_block.source_library_id = "library-v1:BAD+WOLF"
result = self.lc_block.validate()
assert not result
# Validation fails due to at least one warning/message
assert result.summary
assert StudioValidationMessage.ERROR == result.summary.type
assert 'invalid' in result.summary.text
# When source_library_id is set but the block needs to be updated, the summary should say so:
self.lc_block.source_library_id = six.text_type(self.library.location.library_key)
result = self.lc_block.validate()
assert not result
# Validation fails due to at least one warning/message
assert result.summary
assert StudioValidationMessage.WARNING == result.summary.type
assert 'out of date' in result.summary.text
# Now if we update the block, all validation should pass:
self.lc_block.refresh_children()
assert self.lc_block.validate()
def test_validation_of_matching_blocks(self):
"""
Test that the validation method of LibraryContent blocks can warn
the user about problems with other settings (max_count and capa_type).
"""
# Set max_count to higher value than exists in library
self.lc_block.max_count = 50
# In the normal studio editing process, editor_saved() calls refresh_children at this point
self.lc_block.refresh_children()
result = self.lc_block.validate()
assert not result
# Validation fails due to at least one warning/message
assert result.summary
assert StudioValidationMessage.WARNING == result.summary.type
assert 'only 4 matching problems' in result.summary.text
# Add some capa problems so we can check problem type validation messages
self.lc_block.max_count = 1
self._create_capa_problems()
self.lc_block.refresh_children()
assert self.lc_block.validate()
# Existing problem type should pass validation
self.lc_block.max_count = 1
self.lc_block.capa_type = 'multiplechoiceresponse'
self.lc_block.refresh_children()
assert self.lc_block.validate()
# ... unless requested more blocks than exists in library
self.lc_block.max_count = 10
self.lc_block.capa_type = 'multiplechoiceresponse'
self.lc_block.refresh_children()
result = self.lc_block.validate()
assert not result
# Validation fails due to at least one warning/message
assert result.summary
assert StudioValidationMessage.WARNING == result.summary.type
assert 'only 1 matching problem' in result.summary.text
# Missing problem type should always fail validation
self.lc_block.max_count = 1
self.lc_block.capa_type = 'customresponse'
self.lc_block.refresh_children()
result = self.lc_block.validate()
assert not result
# Validation fails due to at least one warning/message
assert result.summary
assert StudioValidationMessage.WARNING == result.summary.type
assert 'no matching problem types' in result.summary.text
def test_capa_type_filtering(self):
"""
Test that the capa type filter is actually filtering children
"""
self._create_capa_problems()
assert len(self.lc_block.children) == 0
# precondition check
self.lc_block.capa_type = "multiplechoiceresponse"
self.lc_block.refresh_children()
assert len(self.lc_block.children) == 1
self.lc_block.capa_type = "optionresponse"
self.lc_block.refresh_children()
assert len(self.lc_block.children) == 3
self.lc_block.capa_type = "coderesponse"
self.lc_block.refresh_children()
assert len(self.lc_block.children) == 2
self.lc_block.capa_type = "customresponse"
self.lc_block.refresh_children()
assert len(self.lc_block.children) == 0
self.lc_block.capa_type = ANY_CAPA_TYPE_VALUE
self.lc_block.refresh_children()
assert len(self.lc_block.children) == (len(self.lib_blocks) + 4)
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.lc_block.non_editable_metadata_fields
assert LibraryContentBlock.mode in non_editable_metadata_fields
assert LibraryContentBlock.display_name not in non_editable_metadata_fields
def test_overlimit_blocks_chosen_randomly(self):
"""
Tests that blocks to remove from selected children are chosen
randomly when len(selected) > max_count.
"""
blocks_seen = set()
total_tries, max_tries = 0, 100
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
# Eventually, we should see every child block selected
while len(blocks_seen) != len(self.lib_blocks):
self._change_count_and_refresh_children(len(self.lib_blocks))
# Now set the number of selections to 1
selected = self._change_count_and_refresh_children(1)
blocks_seen.update(selected)
total_tries += 1
if total_tries >= max_tries:
assert False, "Max tries exceeded before seeing all blocks."
break
def _change_count_and_refresh_children(self, count):
"""
Helper method that changes the max_count of self.lc_block, refreshes
children, and asserts that the number of selected children equals the count provided.
"""
self.lc_block.max_count = count
selected = self.lc_block.get_child_descriptors()
assert len(selected) == count
return selected
@patch('xmodule.library_tools.SearchEngine.get_search_engine', Mock(return_value=None, autospec=True))
class TestLibraryContentBlockNoSearchIndex(LibraryContentBlockTestMixin, LibraryContentTest):
"""
Tests for library container when no search index is available.
Tests fallback low-level CAPA problem introspection
"""
pass # pylint:disable=unnecessary-pass
search_index_mock = Mock(spec=SearchEngine) # pylint: disable=invalid-name
@patch('xmodule.library_tools.SearchEngine.get_search_engine', Mock(return_value=search_index_mock, autospec=True))
class TestLibraryContentBlockWithSearchIndex(LibraryContentBlockTestMixin, LibraryContentTest):
"""
Tests for library container with mocked search engine response.
"""
def _get_search_response(self, field_dictionary=None):
""" Mocks search response as returned by search engine """
target_type = field_dictionary.get('problem_types')
matched_block_locations = [
key for key, problem_types in
self.problem_type_lookup.items() if target_type in problem_types
]
return {
'results': [
{'data': {'id': str(location)}} for location in matched_block_locations
]
}
def setUp(self):
""" Sets up search engine mock """
super(TestLibraryContentBlockWithSearchIndex, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
search_index_mock.search = Mock(side_effect=self._get_search_response)
@patch(
'xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.render', VanillaRuntime.render
)
@patch('xmodule.html_module.HtmlBlock.author_view', dummy_render, create=True)
@patch('xmodule.x_module.DescriptorSystem.applicable_aside_types', lambda self, block: [])
class TestLibraryContentRender(LibraryContentTest):
"""
Rendering unit tests for LibraryContentBlock
"""
def test_preview_view(self):
""" Test preview view rendering """
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
assert len(self.lc_block.children) == len(self.lib_blocks)
self._bind_course_module(self.lc_block)
rendered = self.lc_block.render(AUTHOR_VIEW, {'root_xblock': self.lc_block})
assert 'Hello world from block 1' in rendered.content
def test_author_view(self):
""" Test author view rendering """
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
assert len(self.lc_block.children) == len(self.lib_blocks)
self._bind_course_module(self.lc_block)
rendered = self.lc_block.render(AUTHOR_VIEW, {})
assert '' == rendered.content
# content should be empty
assert 'LibraryContentAuthorView' == rendered.js_init_fn
# but some js initialization should happen
class TestLibraryContentAnalytics(LibraryContentTest):
"""
Test analytics features of LibraryContentBlock
"""
def setUp(self):
super(TestLibraryContentAnalytics, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.publisher = Mock()
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
self.lc_block.xmodule_runtime.publish = self.publisher
def _assert_event_was_published(self, event_type):
"""
Check that a LibraryContentBlock analytics event was published by self.lc_block.
"""
assert self.publisher.called
assert len(self.publisher.call_args[0]) == 3 # pylint:disable=unsubscriptable-object
_, event_name, event_data = self.publisher.call_args[0] # pylint:disable=unsubscriptable-object
assert event_name == 'edx.librarycontentblock.content.{}'.format(event_type)
assert event_data['location'] == six.text_type(self.lc_block.location)
return event_data
def test_assigned_event(self):
"""
Test the "assigned" event emitted when a student is assigned specific blocks.
"""
# In the beginning was the lc_block and it assigned one child to the student:
child = self.lc_block.get_child_descriptors()[0]
child_lib_location, child_lib_version = self.store.get_block_original_usage(child.location)
assert isinstance(child_lib_version, ObjectId)
event_data = self._assert_event_was_published("assigned")
block_info = {
"usage_key": six.text_type(child.location),
"original_usage_key": six.text_type(child_lib_location),
"original_usage_version": six.text_type(child_lib_version),
"descendants": [],
}
assert event_data ==\
{'location': six.text_type(self.lc_block.location),
'added': [block_info],
'result': [block_info],
'previous_count': 0, 'max_count': 1}
self.publisher.reset_mock()
# Now increase max_count so that one more child will be added:
self.lc_block.max_count = 2
children = self.lc_block.get_child_descriptors()
assert len(children) == 2
child, new_child = children if children[0].location == child.location else reversed(children)
event_data = self._assert_event_was_published("assigned")
assert event_data['added'][0]['usage_key'] == six.text_type(new_child.location)
assert len(event_data['result']) == 2
assert event_data['previous_count'] == 1
assert event_data['max_count'] == 2
def test_assigned_event_published(self):
"""
Same as test_assigned_event but uses the published branch
"""
self.store.publish(self.course.location, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
self.lc_block.xmodule_runtime.publish = self.publisher
self.test_assigned_event()
def test_assigned_descendants(self):
"""
Test the "assigned" event emitted includes descendant block information.
"""
# Replace the blocks in the library with a block that has descendants:
with self.store.bulk_operations(self.library.location.library_key):
self.library.children = []
main_vertical = self.make_block("vertical", self.library)
inner_vertical = self.make_block("vertical", main_vertical)
html_block = self.make_block("html", inner_vertical)
problem_block = self.make_block("problem", inner_vertical)
self.lc_block.refresh_children()
# Reload lc_block and set it up for a student:
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
self.lc_block.xmodule_runtime.publish = self.publisher
# Get the keys of each of our blocks, as they appear in the course:
course_usage_main_vertical = self.lc_block.children[0]
course_usage_inner_vertical = self.store.get_item(course_usage_main_vertical).children[0]
inner_vertical_in_course = self.store.get_item(course_usage_inner_vertical)
course_usage_html = inner_vertical_in_course.children[0]
course_usage_problem = inner_vertical_in_course.children[1]
# Trigger a publish event:
self.lc_block.get_child_descriptors()
event_data = self._assert_event_was_published("assigned")
for block_list in (event_data["added"], event_data["result"]):
assert len(block_list) == 1
# main_vertical is the only root block added, and is the only result.
assert block_list[0]['usage_key'] == six.text_type(course_usage_main_vertical)
# Check that "descendants" is a flat, unordered list of all of main_vertical's descendants:
descendants_expected = (
(inner_vertical.location, course_usage_inner_vertical),
(html_block.location, course_usage_html),
(problem_block.location, course_usage_problem),
)
descendant_data_expected = {}
for lib_key, course_usage_key in descendants_expected:
descendant_data_expected[six.text_type(course_usage_key)] = {
"usage_key": six.text_type(course_usage_key),
"original_usage_key": six.text_type(lib_key),
"original_usage_version": six.text_type(self.store.get_block_original_usage(course_usage_key)[1]),
}
assert len(block_list[0]['descendants']) == len(descendant_data_expected)
for descendant in block_list[0]["descendants"]:
assert descendant == descendant_data_expected.get(descendant['usage_key'])
def test_removed_overlimit(self):
"""
Test the "removed" event emitted when we un-assign blocks previously assigned to a student.
We go from one blocks assigned to none because max_count has been decreased.
"""
# Decrease max_count to 1, causing the block to be overlimit:
self.lc_block.get_child_descriptors() # This line is needed in the test environment or the change has no effect
self.publisher.reset_mock() # Clear the "assigned" event that was just published.
self.lc_block.max_count = 0
# Check that the event says that one block was removed, leaving no blocks left:
children = self.lc_block.get_child_descriptors()
assert len(children) == 0
event_data = self._assert_event_was_published("removed")
assert len(event_data['removed']) == 1
assert event_data['result'] == []
assert event_data['reason'] == 'overlimit'
def test_removed_invalid(self):
"""
Test the "removed" event emitted when we un-assign blocks previously assigned to a student.
We go from two blocks assigned, to one because the others have been deleted from the library.
"""
# Start by assigning two blocks to the student:
self.lc_block.get_child_descriptors() # This line is needed in the test environment or the change has no effect
self.lc_block.max_count = 2
initial_blocks_assigned = self.lc_block.get_child_descriptors()
assert len(initial_blocks_assigned) == 2
self.publisher.reset_mock() # Clear the "assigned" event that was just published.
# Now make sure that one of the assigned blocks will have to be un-assigned.
# To cause an "invalid" event, we delete all blocks from the content library
# except for one of the two already assigned to the student:
keep_block_key = initial_blocks_assigned[0].location
keep_block_lib_usage_key, keep_block_lib_version = self.store.get_block_original_usage(keep_block_key)
assert keep_block_lib_usage_key is not None
deleted_block_key = initial_blocks_assigned[1].location
self.library.children = [keep_block_lib_usage_key]
self.store.update_item(self.library, self.user_id)
self.lc_block.refresh_children()
# Check that the event says that one block was removed, leaving one block left:
children = self.lc_block.get_child_descriptors()
assert len(children) == 1
event_data = self._assert_event_was_published("removed")
assert event_data['removed'] ==\
[{'usage_key': six.text_type(deleted_block_key),
'original_usage_key': None,
'original_usage_version': None,
'descendants': []}]
assert event_data['result'] ==\
[{'usage_key': six.text_type(keep_block_key),
'original_usage_key': six.text_type(keep_block_lib_usage_key),
'original_usage_version': six.text_type(keep_block_lib_version), 'descendants': []}]
assert event_data['reason'] == 'invalid'
| stvstnfrd/edx-platform | common/lib/xmodule/xmodule/tests/test_library_content.py | Python | agpl-3.0 | 26,994 |
"""
Test the views of jurisdiction models
"""
# Django
from django.test import TestCase
# Third Party
from nose.tools import assert_is_not, eq_
# MuckRock
from muckrock.core.test_utils import http_get_response
from muckrock.jurisdiction import factories, views
class TestExemptionDetailView(TestCase):
"""The exemption detail view provides information about the exemption at a standalone url."""
def setUp(self):
self.view = views.ExemptionDetailView.as_view()
def test_ok(self):
"""The view should return a 200 OK status."""
exemption = factories.ExemptionFactory()
url = exemption.get_absolute_url()
kwargs = exemption.jurisdiction.get_slugs()
kwargs.update({"slug": exemption.slug, "pk": exemption.pk})
response = http_get_response(url, self.view, **kwargs)
eq_(response.status_code, 200)
def test_unique_for_jurisdiction(self):
"""Two exemptions may have the same name,
as long as they belong to different jurisdictions."""
exemption = factories.ExemptionFactory()
url = exemption.get_absolute_url()
kwargs = exemption.jurisdiction.get_slugs()
kwargs.update({"slug": exemption.slug, "pk": exemption.pk})
another_jurisdiction = factories.StateJurisdictionFactory(
parent=exemption.jurisdiction.parent
)
assert_is_not(exemption.jurisdiction, another_jurisdiction)
factories.ExemptionFactory(jurisdiction=another_jurisdiction)
response = http_get_response(url, self.view, **kwargs)
eq_(response.status_code, 200)
def test_local_exemptions(self):
"""An exemption at the local level should return 200."""
local = factories.LocalJurisdictionFactory()
exemption = factories.ExemptionFactory(jurisdiction=local)
url = exemption.get_absolute_url()
kwargs = exemption.jurisdiction.get_slugs()
kwargs.update({"slug": exemption.slug, "pk": exemption.pk})
response = http_get_response(url, self.view, **kwargs)
eq_(response.status_code, 200)
def test_state_exemptions(self):
"""An exemption at the state level should return 200."""
state = factories.StateJurisdictionFactory()
exemption = factories.ExemptionFactory(jurisdiction=state)
url = exemption.get_absolute_url()
kwargs = exemption.jurisdiction.get_slugs()
kwargs.update({"slug": exemption.slug, "pk": exemption.pk})
response = http_get_response(url, self.view, **kwargs)
eq_(response.status_code, 200)
def test_federal_exemptions(self):
"""An exemption at the federal level should return 200."""
fed = factories.FederalJurisdictionFactory()
exemption = factories.ExemptionFactory(jurisdiction=fed)
url = exemption.get_absolute_url()
kwargs = exemption.jurisdiction.get_slugs()
kwargs.update({"slug": exemption.slug, "pk": exemption.pk})
response = http_get_response(url, self.view, **kwargs)
eq_(response.status_code, 200)
| MuckRock/muckrock | muckrock/jurisdiction/tests/test_views.py | Python | agpl-3.0 | 3,078 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This class is a base class for nearly all configuration
elements like service, hosts or contacts.
"""
import time
import cPickle # for hashing compute
# Try to import md5 function
try:
from hashlib import md5
except ImportError:
from md5 import md5
from copy import copy
from shinken.graph import Graph
from shinken.commandcall import CommandCall
from shinken.property import StringProp, ListProp, BoolProp
from shinken.brok import Brok
from shinken.util import strip_and_uniq
from shinken.acknowledge import Acknowledge
from shinken.comment import Comment
from shinken.complexexpression import ComplexExpressionFactory
from shinken.log import logger
class Item(object):
properties = {
'imported_from': StringProp(default='unknown'),
'use': ListProp(default=''),
'name': StringProp(default=''),
# TODO: find why we can't uncomment this line below.
#'register': BoolProp(default='1'),
}
running_properties = {
# All errors and warning raised during the configuration parsing
# and that will raised real warning/errors during the is_correct
'configuration_warnings': ListProp(default=[]),
'configuration_errors': ListProp(default=[]),
'hash': StringProp(default=''),
# We save all template we asked us to load from
'tags': ListProp(default=set(), fill_brok=['full_status']),
}
macros = {
}
def __init__(self, params={}):
# We have our own id of My Class type :)
# use set attr for going into the slots
# instead of __dict__ :)
cls = self.__class__
self.id = cls.id
cls.id += 1
self.customs = {} # for custom variables
self.plus = {} # for value with a +
self.init_running_properties()
# [0] = + -> new key-plus
# [0] = _ -> new custom entry in UPPER case
for key in params:
if len(params[key]) >= 1 and params[key][0] == '+':
# Special case: a _MACRO can be a plus. so add to plus
# but upper the key for the macro name
if key[0] == "_":
self.plus[key.upper()] = params[key][1:] # we remove the +
else:
self.plus[key] = params[key][1:] # we remove the +
elif key[0] == "_":
custom_name = key.upper()
self.customs[custom_name] = params[key]
else:
setattr(self, key, params[key])
def init_running_properties(self):
for prop, entry in self.__class__.running_properties.items():
# Copy is slow, so we check type
# Type with __iter__ are list or dict, or tuple.
# Item need it's own list, so we copy
val = entry.default
if hasattr(val, '__iter__'):
setattr(self, prop, copy(val))
else:
setattr(self, prop, val)
# each instance to have his own running prop!
def copy(self):
""" Return a copy of the item, but give him a new id """
cls = self.__class__
i = cls({}) # Dummy item but with it's own running properties
for prop in cls.properties:
if hasattr(self, prop):
val = getattr(self, prop)
setattr(i, prop, val)
# Also copy the customs tab
i.customs = copy(self.customs)
return i
def clean(self):
""" Clean useless things not requested once item has been fully initialized&configured.
Like temporary attributes such as "imported_from", etc.. """
for name in ('imported_from', 'use', 'plus', 'templates',):
try:
delattr(self, name)
except AttributeError:
pass
def __str__(self):
return str(self.__dict__) + '\n'
def is_tpl(self):
""" Return if the elements is a template """
try:
return self.register == '0'
except Exception, exp:
return False
# If a prop is absent and is not required, put the default value
def fill_default(self):
""" Fill missing properties if they are missing """
cls = self.__class__
for prop, entry in cls.properties.items():
if not hasattr(self, prop) and entry.has_default:
setattr(self, prop, entry.default)
# We load every useful parameter so no need to access global conf later
# Must be called after a change in a global conf parameter
def load_global_conf(cls, conf):
""" Used to put global values in the sub Class like
hosts or services """
# conf have properties, if 'enable_notifications':
# { [...] 'class_inherit': [(Host, None), (Service, None),
# (Contact, None)]}
# get the name and put the value if None, put the Name
# (not None) if not (not clear?)
for prop, entry in conf.properties.items():
# If we have a class_inherit, and the arbiter really send us it
# if 'class_inherit' in entry and hasattr(conf, prop):
if hasattr(conf, prop):
for (cls_dest, change_name) in entry.class_inherit:
if cls_dest == cls: # ok, we've got something to get
value = getattr(conf, prop)
if change_name is None:
setattr(cls, prop, value)
else:
setattr(cls, change_name, value)
# Make this method a classmethod
load_global_conf = classmethod(load_global_conf)
# Use to make python properties
def pythonize(self):
cls = self.__class__
for prop, tab in cls.properties.items():
try:
new_val = tab.pythonize(getattr(self, prop))
setattr(self, prop, new_val)
except AttributeError, exp:
#print exp
pass # Will be catch at the is_correct moment
except KeyError, exp:
#print "Missing prop value", exp
err = "the property '%s' of '%s' do not have value" % (prop, self.get_name())
self.configuration_errors.append(err)
except ValueError, exp:
err = "incorrect type for property '%s' of '%s'" % (prop, self.get_name())
self.configuration_errors.append(err)
# Compute a hash of this element values. Should be launched
# When we got all our values, but not linked with other objects
def compute_hash(self):
# ID will always changed between runs, so we remove it
# for hash compute
i = self.id
del self.id
m = md5()
tmp = cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
m.update(tmp)
self.hash = m.digest()
# and put again our id
self.id = i
def get_templates(self):
if hasattr(self, 'use') and self.use != '':
return self.use.split(',')
else:
return []
# We fillfull properties with template ones if need
def get_property_by_inheritance(self, items, prop):
# If I have the prop, I take mine but I check if I must
# add a plus property
if hasattr(self, prop):
value = getattr(self, prop)
# Maybe this value is 'null'. If so, we should NOT inherit
# and just delete this entry, and hope of course.
# Keep "null" values, because in "inheritance chaining" they must
# be passed from one level to the next.
#if value == 'null':
# delattr(self, prop)
# return None
# Manage the additive inheritance for the property,
# if property is in plus, add or replace it
# Template should keep the '+' at the beginning of the chain
if self.has_plus(prop):
value = self.get_plus_and_delete(prop) + ',' + value
if self.is_tpl():
value = '+' + value
return value
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in self.templates:
value = i.get_property_by_inheritance(items, prop)
if value is not None:
# If our template give us a '+' value, we should continue to loop
still_loop = False
if value.startswith('+'):
# Templates should keep their + inherited from their parents
if not self.is_tpl():
value = value[1:]
still_loop = True
# Maybe in the previous loop, we set a value, use it too
if hasattr(self, prop):
# If the current value is strong, it will simplify the problem
if value.startswith('+'):
# In this case we can remove the + from our current
# tpl because our value will be final
value = ','.join([getattr(self, prop), value[1:]])
else: # If not, se should keep the + sign of need
value = ','.join([getattr(self, prop), value])
# Ok, we can set it
setattr(self, prop, value)
# If we only got some '+' values, we must still loop
# for an end value without it
if not still_loop:
# And set my own value in the end if need
if self.has_plus(prop):
value = ','.join([getattr(self, prop), self.get_plus_and_delete(prop)])
# Template should keep their '+'
if self.is_tpl() and not value.startswith('+'):
value = '+' + value
setattr(self, prop, value)
return value
# Maybe templates only give us + values, so we didn't quit, but we already got a
# self.prop value after all
template_with_only_plus = hasattr(self, prop)
# I do not have endingprop, my templates too... Maybe a plus?
# warning: if all my templates gave me '+' values, do not forgot to
# add the already set self.prop value
if self.has_plus(prop):
if template_with_only_plus:
value = ','.join([getattr(self, prop), self.get_plus_and_delete(prop)])
else:
value = self.get_plus_and_delete(prop)
# Template should keep their '+' chain
# We must say it's a '+' value, so our son will now that it must
# still loop
if self.is_tpl() and not value.startswith('+'):
value = '+' + value
setattr(self, prop, value)
return value
# Ok so in the end, we give the value we got if we have one, or None
# Not even a plus... so None :)
return getattr(self, prop, None)
# We fillfull properties with template ones if need
def get_customs_properties_by_inheritance(self, items):
for i in self.templates:
tpl_cv = i.get_customs_properties_by_inheritance(items)
if tpl_cv is not {}:
for prop in tpl_cv:
if prop not in self.customs:
value = tpl_cv[prop]
else:
value = self.customs[prop]
if self.has_plus(prop):
value = self.get_plus_and_delete(prop) + ',' + value
self.customs[prop] = value
for prop in self.customs:
value = self.customs[prop]
if self.has_plus(prop):
value = self.get_plus_and_delete(prop) + ',' + value
self.customs[prop] = value
# We can get custom properties in plus, we need to get all
# entires and put
# them into customs
cust_in_plus = self.get_all_plus_and_delete()
for prop in cust_in_plus:
self.customs[prop] = cust_in_plus[prop]
return self.customs
def has_plus(self, prop):
try:
self.plus[prop]
except:
return False
return True
def get_all_plus_and_delete(self):
res = {}
props = self.plus.keys() # we delete entries, so no for ... in ...
for prop in props:
res[prop] = self.get_plus_and_delete(prop)
return res
def get_plus_and_delete(self, prop):
val = self.plus[prop]
del self.plus[prop]
return val
# Check is required prop are set:
# template are always correct
def is_correct(self):
state = True
properties = self.__class__.properties
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s" % (self.get_name(), err))
for prop, entry in properties.items():
if not hasattr(self, prop) and entry.required:
logger.warning("[item::%s] %s property is missing" % (self.get_name(), prop))
state = False
return state
# This function is used by service and hosts
# to transform Nagios2 parameters to Nagios3
# ones, like normal_check_interval to
# check_interval. There is a old_parameters tab
# in Classes that give such modifications to do.
def old_properties_names_to_new(self):
old_properties = self.__class__.old_properties
for old_name, new_name in old_properties.items():
# Ok, if we got old_name and NO new name,
# we switch the name
if hasattr(self, old_name) and not hasattr(self, new_name):
value = getattr(self, old_name)
setattr(self, new_name, value)
# The arbiter is asking us our raw value before all explode or linking
def get_raw_import_values(self):
r = {}
properties = self.__class__.properties.keys()
# Register is not by default in the properties
if not 'register' in properties:
properties.append('register')
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
#print prop, ":", v
r[prop] = v
return r
def add_downtime(self, downtime):
self.downtimes.append(downtime)
def del_downtime(self, downtime_id):
d_to_del = None
for dt in self.downtimes:
if dt.id == downtime_id:
d_to_del = dt
dt.can_be_deleted = True
if d_to_del is not None:
self.downtimes.remove(d_to_del)
def add_comment(self, comment):
self.comments.append(comment)
def del_comment(self, comment_id):
c_to_del = None
for c in self.comments:
if c.id == comment_id:
c_to_del = c
c.can_be_deleted = True
if c_to_del is not None:
self.comments.remove(c_to_del)
def acknowledge_problem(self, sticky, notify, persistent, author, comment, end_time=0):
if self.state != self.ok_up:
if notify:
self.create_notifications('ACKNOWLEDGEMENT')
self.problem_has_been_acknowledged = True
if sticky == 2:
sticky = True
else:
sticky = False
a = Acknowledge(self, sticky, notify, persistent, author, comment, end_time=end_time)
self.acknowledgement = a
if self.my_type == 'host':
comment_type = 1
else:
comment_type = 2
c = Comment(self, persistent, author, comment,
comment_type, 4, 0, False, 0)
self.add_comment(c)
self.broks.append(self.get_update_status_brok())
# Look if we got an ack that is too old with an expire date and should
# be delete
def check_for_expire_acknowledge(self):
if self.acknowledgement and self.acknowledgement.end_time != 0 and self.acknowledgement.end_time < time.time():
self.unacknowledge_problem()
# Delete the acknowledgement object and reset the flag
# but do not remove the associated comment.
def unacknowledge_problem(self):
if self.problem_has_been_acknowledged:
logger.debug("[item::%s] deleting acknowledge of %s" % (self.get_name(), self.get_dbg_name()))
self.problem_has_been_acknowledged = False
# Should not be deleted, a None is Good
self.acknowledgement = None
# del self.acknowledgement
# find comments of non-persistent ack-comments and delete them too
for c in self.comments:
if c.entry_type == 4 and not c.persistent:
self.del_comment(c.id)
self.broks.append(self.get_update_status_brok())
# Check if we have an acknowledgement and if this is marked as sticky.
# This is needed when a non-ok state changes
def unacknowledge_problem_if_not_sticky(self):
if hasattr(self, 'acknowledgement') and self.acknowledgement is not None:
if not self.acknowledgement.sticky:
self.unacknowledge_problem()
# Will flatten some parameters tagged by the 'conf_send_preparation'
# property because they are too "linked" to be send like that (like realms)
def prepare_for_conf_sending(self):
cls = self.__class__
for prop, entry in cls.properties.items():
# Is this property need preparation for sending?
if entry.conf_send_preparation is not None:
f = entry.conf_send_preparation
if f is not None:
val = f(getattr(self, prop))
setattr(self, prop, val)
if hasattr(cls, 'running_properties'):
for prop, entry in cls.running_properties.items():
# Is this property need preparation for sending?
if entry.conf_send_preparation is not None:
f = entry.conf_send_preparation
if f is not None:
val = f(getattr(self, prop))
setattr(self, prop, val)
# Get the property for an object, with good value
# and brok_transformation if need
def get_property_value_for_brok(self, prop, tab):
entry = tab[prop]
# Get the current value, or the default if need
value = getattr(self, prop, entry.default)
# Apply brok_transformation if need
# Look if we must preprocess the value first
pre_op = entry.brok_transformation
if pre_op is not None:
value = pre_op(self, value)
return value
# Fill data with info of item by looking at brok_type
# in props of properties or running_properties
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.properties)
# Maybe the class do not have running_properties
if hasattr(cls, 'running_properties'):
# We've got prop in running_properties too
for prop, entry in cls.running_properties.items():
#if 'fill_brok' in cls.running_properties[prop]:
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
# Get a brok with initial status
def get_initial_status_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('initial_' + my_type + '_status', data)
return b
# Get a brok with update item status
def get_update_status_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('update_' + my_type + '_status', data)
return b
# Get a brok with check_result
def get_check_result_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {}
self.fill_data_brok_from(data, 'check_result')
b = Brok(my_type + '_check_result', data)
return b
# Get brok about the new schedule (next_check)
def get_next_schedule_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {}
self.fill_data_brok_from(data, 'next_schedule')
b = Brok(my_type + '_next_schedule', data)
return b
# Link one command property to a class (for globals like oc*p_command)
def linkify_one_command_with_commands(self, commands, prop):
if hasattr(self, prop):
command = getattr(self, prop).strip()
if command != '':
if hasattr(self, 'poller_tag'):
cmdCall = CommandCall(commands, command,
poller_tag=self.poller_tag)
elif hasattr(self, 'reactionner_tag'):
cmdCall = CommandCall(commands, command,
reactionner_tag=self.reactionner_tag)
else:
cmdCall = CommandCall(commands, command)
setattr(self, prop, cmdCall)
else:
setattr(self, prop, None)
# We look at the 'trigger' prop and we create a trigger for it
def explode_trigger_string_into_triggers(self, triggers):
src = getattr(self, 'trigger', '')
if src:
# Change on the fly the characters
src = src.replace(r'\n', '\n').replace(r'\t', '\t')
t = triggers.create_trigger(src, 'inner-trigger-' + self.__class__.my_type + '' + str(self.id))
if t:
# Maybe the trigger factory give me a already existing trigger,
# so my name can be dropped
self.triggers.append(t.get_name())
# Link with triggers. Can be with a "in source" trigger, or a file name
def linkify_with_triggers(self, triggers):
# Get our trigger string and trigger names in the same list
self.triggers.extend(self.trigger_name)
#print "I am linking my triggers", self.get_full_name(), self.triggers
new_triggers = []
for tname in self.triggers:
t = triggers.find_by_name(tname)
if t:
new_triggers.append(t)
else:
self.configuration_errors.append('the %s %s does have a unknown trigger_name "%s"' % (self.__class__.my_type, self.get_full_name(), tname))
self.triggers = new_triggers
class Items(object):
def __init__(self, items):
self.items = {}
self.configuration_warnings = []
self.configuration_errors = []
for i in items:
self.items[i.id] = i
self.templates = {}
# We should keep a graph of templates relations
self.templates_graph = Graph()
def __iter__(self):
return self.items.itervalues()
def __len__(self):
return len(self.items)
def __delitem__(self, key):
try:
del self.items[key]
except KeyError: # we don't want it, we do not have it. All is perfect
pass
def __setitem__(self, key, value):
self.items[key] = value
def __getitem__(self, key):
return self.items[key]
def __contains__(self, key):
return key in self.items
def compute_hash(self):
for i in self:
i.compute_hash()
# We create the reversed list so search will be faster
# We also create a twins list with id of twins (not the original
# just the others, higher twins)
def create_reversed_list(self):
self.reversed_list = {}
self.twins = []
name_property = self.__class__.name_property
for id in self.items:
if hasattr(self.items[id], name_property):
name = getattr(self.items[id], name_property)
if name not in self.reversed_list:
self.reversed_list[name] = id
else:
self.twins.append(id)
def find_id_by_name(self, name):
if hasattr(self, 'reversed_list'):
if name in self.reversed_list:
return self.reversed_list[name]
else:
return None
else: # ok, an early ask, with no reversed list from now...
name_property = self.__class__.name_property
for i in self:
if hasattr(i, name_property):
i_name = getattr(i, name_property)
if i_name == name:
return i.id
return None
def find_by_name(self, name):
id = self.find_id_by_name(name)
if id is not None:
return self.items[id]
else:
return None
# prepare_for_conf_sending to flatten some properties
def prepare_for_sending(self):
for i in self:
i.prepare_for_conf_sending()
# It's used to change old Nagios2 names to
# Nagios3 ones
def old_properties_names_to_new(self):
for i in self:
i.old_properties_names_to_new()
def pythonize(self):
for id in self.items:
self.items[id].pythonize()
def create_tpl_list(self):
for id in self.items:
i = self.items[id]
if i.is_tpl():
self.templates[id] = i
def find_tpl_by_name(self, name):
for i in self.templates.values():
if hasattr(i, 'name') and i.name == name:
return i
return None
# We will link all templates, and create the template
# graph too
def linkify_templates(self):
# First we create a list of all templates
self.create_tpl_list()
for i in self:
tpls = i.get_templates()
new_tpls = []
for tpl in tpls:
tpl = tpl.strip()
# We save this template in the 'tags' set
i.tags.add(tpl)
# Then we link it
t = self.find_tpl_by_name(tpl)
# If it's ok, add the template and update the
# template graph too
if t is not None:
# add the template object to us
new_tpls.append(t)
else: # not find? not good!
err = "the template '%s' defined for '%s' is unknown" % (tpl, i.get_name())
i.configuration_warnings.append(err)
i.templates = new_tpls
# Now we will create the template graph, so
# we look only for templates here. First we should declare our nodes
for tpl in self.templates.values():
self.templates_graph.add_node(tpl)
# And then really create our edge
for tpl in self.templates.values():
for father in tpl.templates:
self.templates_graph.add_edge(father, tpl)
def is_correct(self):
# we are ok at the beginning. Hope we still ok at the end...
r = True
# Some class do not have twins, because they do not have names
# like servicedependencies
twins = getattr(self, 'twins', None)
if twins is not None:
# Ok, look at no twins (it's bad!)
for id in twins:
i = self.items[id]
logger.error("[items] %s.%s is duplicated from %s" %\
(i.__class__.my_type, i.get_name(), getattr(i, 'imported_from', "unknown source")))
r = False
# Then look if we have some errors in the conf
# Juts print warnings, but raise errors
for err in self.configuration_warnings:
logger.warning("[items] %s" % err)
for err in self.configuration_errors:
logger.error("[items] %s" % err)
r = False
# Then look for individual ok
for i in self:
# Alias and display_name hook hook
prop_name = getattr(self.__class__, 'name_property', None)
if prop_name and not hasattr(i, 'alias') and hasattr(i, prop_name):
setattr(i, 'alias', getattr(i, prop_name))
if prop_name and getattr(i, 'display_name', '') == '' and hasattr(i, prop_name):
setattr(i, 'display_name', getattr(i, prop_name))
# Now other checks
if not i.is_correct():
n = getattr(i, 'imported_from', "unknown source")
logger.error("[items] In %s is incorrect ; from %s" % (i.get_name(), n))
r = False
return r
def remove_templates(self):
""" Remove useless templates (& properties) of our items ; otherwise we could get errors on config.is_correct() """
tpls = [i for i in self if i.is_tpl()]
for i in tpls:
del self[i.id]
del self.templates
del self.templates_graph
def clean(self):
""" Request to remove the unnecessary attributes/others from our items """
for i in self:
i.clean()
Item.clean(self)
# If a prop is absent and is not required, put the default value
def fill_default(self):
for i in self:
i.fill_default()
def __str__(self):
s = ''
cls = self.__class__
for id in self.items:
s = s + str(cls) + ':' + str(id) + str(self.items[id]) + '\n'
return s
# Inheritance for just a property
def apply_partial_inheritance(self, prop):
for i in self:
i.get_property_by_inheritance(self, prop)
if not i.is_tpl():
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except:
pass
def apply_inheritance(self):
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in self:
i.get_customs_properties_by_inheritance(self)
# We remove twins
# Remember: item id respect the order of conf. So if and item
# is defined multiple times,
# we want to keep the first one.
# Services are also managed here but they are specials:
# We remove twins services with the same host_name/service_description
# Remember: host service are take into account first before hostgroup service
# Id of host service are lower than hostgroups one, so they are
# in self.twins_services
# and we can remove them.
def remove_twins(self):
for id in self.twins:
i = self.items[id]
type = i.__class__.my_type
logger.warning("[items] %s.%s is already defined '%s'" % (type, i.get_name(), getattr(i, 'imported_from', "unknown source")))
del self[id] # bye bye
# do not remove twins, we should look in it, but just void it
self.twins = []
#del self.twins #no more need
# We've got a contacts property with , separated contacts names
# and we want have a list of Contacts
def linkify_with_contacts(self, contacts):
for i in self:
if hasattr(i, 'contacts'):
contacts_tab = i.contacts.split(',')
contacts_tab = strip_and_uniq(contacts_tab)
new_contacts = []
for c_name in contacts_tab:
if c_name != '':
c = contacts.find_by_name(c_name)
if c is not None:
new_contacts.append(c)
# Else: Add in the errors tab.
# will be raised at is_correct
else:
err = "the contact '%s' defined for '%s' is unknown" % (c_name, i.get_name())
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.contacts = list(set(new_contacts))
# Make link between an object and its escalations
def linkify_with_escalations(self, escalations):
for i in self:
if hasattr(i, 'escalations'):
escalations_tab = i.escalations.split(',')
escalations_tab = strip_and_uniq(escalations_tab)
new_escalations = []
for es_name in [e for e in escalations_tab if e != '']:
es = escalations.find_by_name(es_name)
if es is not None:
new_escalations.append(es)
else: # Escalation not find, not good!
err = "the escalation '%s' defined for '%s' is unknown" % (es_name, i.get_name())
i.configuration_errors.append(err)
i.escalations = new_escalations
# Make link between item and it's resultmodulations
def linkify_with_resultmodulations(self, resultmodulations):
for i in self:
if hasattr(i, 'resultmodulations'):
resultmodulations_tab = i.resultmodulations.split(',')
resultmodulations_tab = strip_and_uniq(resultmodulations_tab)
new_resultmodulations = []
for rm_name in resultmodulations_tab:
rm = resultmodulations.find_by_name(rm_name)
if rm is not None:
new_resultmodulations.append(rm)
else:
err = "the result modulation '%s' defined on the %s '%s' do not exist" % (rm_name, i.__class__.my_type, i.get_name())
i.configuration_errors.append(err)
continue
i.resultmodulations = new_resultmodulations
# Make link between item and it's business_impact_modulations
def linkify_with_business_impact_modulations(self, business_impact_modulations):
for i in self:
if hasattr(i, 'business_impact_modulations'):
business_impact_modulations_tab = i.business_impact_modulations.split(',')
business_impact_modulations_tab = strip_and_uniq(business_impact_modulations_tab)
new_business_impact_modulations = []
for rm_name in business_impact_modulations_tab:
rm = business_impact_modulations.find_by_name(rm_name)
if rm is not None:
new_business_impact_modulations.append(rm)
else:
err = "the business impact modulation '%s' defined on the %s '%s' do not exist" % (rm_name, i.__class__.my_type, i.get_name())
i.configuration_errors.append(err)
continue
i.business_impact_modulations = new_business_impact_modulations
# If we've got a contact_groups properties, we search for all
# theses groups and ask them their contacts, and then add them
# all into our contacts property
def explode_contact_groups_into_contacts(self, contactgroups):
for i in self:
if hasattr(i, 'contact_groups'):
cgnames = i.contact_groups.split(',')
cgnames = strip_and_uniq(cgnames)
for cgname in cgnames:
cg = contactgroups.find_by_name(cgname)
if cg is None:
err = "The contact group '%s' defined on the %s '%s' do not exist" % (cgname, i.__class__.my_type, i.get_name())
i.configuration_errors.append(err)
continue
cnames = contactgroups.get_members_by_name(cgname)
# We add contacts into our contacts
if cnames != []:
if hasattr(i, 'contacts'):
i.contacts += ',' + cnames
else:
i.contacts = cnames
# Link a timeperiod property (prop)
def linkify_with_timeperiods(self, timeperiods, prop):
for i in self:
if hasattr(i, prop):
tpname = getattr(i, prop).strip()
# some default values are '', so set None
if tpname == '':
setattr(i, prop, None)
continue
# Ok, get a real name, search for it
tp = timeperiods.find_by_name(tpname)
# If not found, it's an error
if tp is None:
err = "The %s of the %s '%s' named '%s' is unknown!" % (prop, i.__class__.my_type, i.get_name(), tpname)
i.configuration_errors.append(err)
continue
# Got a real one, just set it :)
setattr(i, prop, tp)
# Link one command property
def linkify_one_command_with_commands(self, commands, prop):
for i in self:
if hasattr(i, prop):
command = getattr(i, prop).strip()
if command != '':
if hasattr(i, 'poller_tag'):
cmdCall = CommandCall(commands, command,
poller_tag=i.poller_tag)
elif hasattr(i, 'reactionner_tag'):
cmdCall = CommandCall(commands, command,
reactionner_tag=i.reactionner_tag)
else:
cmdCall = CommandCall(commands, command)
# TODO: catch None?
setattr(i, prop, cmdCall)
else:
setattr(i, prop, None)
# Link a command list (commands with , between) in real CommandCalls
def linkify_command_list_with_commands(self, commands, prop):
for i in self:
if hasattr(i, prop):
coms = getattr(i, prop).split(',')
coms = strip_and_uniq(coms)
com_list = []
for com in coms:
if com != '':
if hasattr(i, 'poller_tag'):
cmdCall = CommandCall(commands, com,
poller_tag=i.poller_tag)
elif hasattr(i, 'reactionner_tag'):
cmdCall = CommandCall(commands, com,
reactionner_tag=i.reactionner_tag)
else:
cmdCall = CommandCall(commands, com)
# TODO: catch None?
com_list.append(cmdCall)
else: # TODO: catch?
pass
setattr(i, prop, com_list)
# Link with triggers. Can be with a "in source" trigger, or a file name
def linkify_with_triggers(self, triggers):
for i in self:
i.linkify_with_triggers(triggers)
# We've got a notificationways property with , separated contacts names
# and we want have a list of NotificationWay
def linkify_with_checkmodulations(self, checkmodulations):
for i in self:
if not hasattr(i, 'checkmodulations'):
continue
new_checkmodulations = []
for cw_name in i.checkmodulations:
cw = checkmodulations.find_by_name(cw_name)
if cw is not None:
new_checkmodulations.append(cw)
else:
err = "The checkmodulations of the %s '%s' named '%s' is unknown!" % (i.__class__.my_type, i.get_name(), cw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.checkmodulations = new_checkmodulations
# We've got list of macro modulations as list of names, and
# we want real objects
def linkify_with_macromodulations(self, macromodulations):
for i in self:
if not hasattr(i, 'macromodulations'):
continue
new_macromodulations = []
for cw_name in i.macromodulations:
cw = macromodulations.find_by_name(cw_name)
if cw is not None:
new_macromodulations.append(cw)
else:
err = "The macromodulations of the %s '%s' named '%s' is unknown!" % (i.__class__.my_type, i.get_name(), cw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.macromodulations = new_macromodulations
def evaluate_hostgroup_expression(self, expr, hosts, hostgroups, look_in='hostgroups'):
#print "\n"*10, "looking for expression", expr
if look_in=='hostgroups':
f = ComplexExpressionFactory(look_in, hostgroups, hosts)
else: # templates
f = ComplexExpressionFactory(look_in, hosts, hosts)
expr_tree = f.eval_cor_pattern(expr)
#print "RES of ComplexExpressionFactory"
#print expr_tree
#print "Try to resolve the Tree"
set_res = expr_tree.resolve_elements()
#print "R2d2 final is", set_res
# HOOK DBG
return list(set_res)
# If we've got a hostgroup_name property, we search for all
# theses groups and ask them their hosts, and then add them
# all into our host_name property
def explode_host_groups_into_hosts(self, hosts, hostgroups):
for i in self:
hnames_list = []
if hasattr(i, 'hostgroup_name'):
hnames_list.extend(self.evaluate_hostgroup_expression(i.hostgroup_name, hosts, hostgroups))
# Maybe there is no host in the groups, and do not have any
# host_name too, so tag is as template to do not look at
if hnames_list == [] and not hasattr(i, 'host_name'):
i.register = '0'
if hasattr(i, 'host_name'):
hst = i.host_name.split(',')
for h in hst:
h = h.strip()
# If the host start with a !, it's to be removed from
# the hostgroup get list
if h.startswith('!'):
hst_to_remove = h[1:].strip()
try:
hnames_list.remove(hst_to_remove)
# was not in it
except ValueError:
pass
# Else it's an host to add, but maybe it's ALL
elif h == '*':
for newhost in set(h.host_name for h in hosts.items.values() \
if getattr(h, 'host_name', '') != '' and not h.is_tpl()):
hnames_list.append(newhost)
#print "DBG in item.explode_host_groups_into_hosts , added '%s' to group '%s'" % (newhost, i)
else:
hnames_list.append(h)
i.host_name = ','.join(list(set(hnames_list)))
# Ok, even with all of it, there is still no host, put it as a template
if i.host_name == '':
i.register = '0'
# Take our trigger strings and create true objects with it
def explode_trigger_string_into_triggers(self, triggers):
for i in self:
i.explode_trigger_string_into_triggers(triggers)
| wbsavage/shinken | shinken/objects/item.py | Python | agpl-3.0 | 45,311 |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import itertools
from datetime import timedelta
import ddt
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.utils.timezone import now
from mock import patch
from opaque_keys.edx.locator import CourseLocator
from course_modes.helpers import enrollment_mode_display
from course_modes.models import CourseMode, Mode, invalidate_course_mode_cache, get_cosmetic_display_price
from course_modes.tests.factories import CourseModeFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
)
@ddt.ddt
class CourseModeModelTest(TestCase):
"""
Tests for the CourseMode model
"""
NOW = 'now'
DATES = {
NOW: now(),
None: None,
}
def setUp(self):
super(CourseModeModelTest, self).setUp()
self.course_key = CourseLocator('Test', 'TestCourse', 'TestCourseRun')
CourseMode.objects.all().delete()
def tearDown(self):
invalidate_course_mode_cache(sender=None)
def create_mode(
self,
mode_slug,
mode_name,
min_price=0,
suggested_prices='',
currency='usd',
expiration_datetime=None,
):
"""
Create a new course mode
"""
return CourseMode.objects.get_or_create(
course_id=self.course_key,
mode_display_name=mode_name,
mode_slug=mode_slug,
min_price=min_price,
suggested_prices=suggested_prices,
currency=currency,
_expiration_datetime=expiration_datetime,
)
def test_save(self):
""" Verify currency is always lowercase. """
cm, __ = self.create_mode('honor', 'honor', 0, '', 'USD')
self.assertEqual(cm.currency, 'usd')
cm.currency = 'GHS'
cm.save()
self.assertEqual(cm.currency, 'ghs')
def test_modes_for_course_empty(self):
"""
If we can't find any modes, we should get back the default mode
"""
# shouldn't be able to find a corresponding course
modes = CourseMode.modes_for_course(self.course_key)
self.assertEqual([CourseMode.get_default_course_mode()], modes)
def test_nodes_for_course_single(self):
"""
Find the modes for a course with only one mode
"""
self.create_mode('verified', 'Verified Certificate', 10)
modes = CourseMode.modes_for_course(self.course_key)
mode = Mode(u'verified', u'Verified Certificate', 10, '', 'usd', None, None, None, None)
self.assertEqual([mode], modes)
modes_dict = CourseMode.modes_for_course_dict(self.course_key)
self.assertEqual(modes_dict['verified'], mode)
self.assertEqual(CourseMode.mode_for_course(self.course_key, 'verified'),
mode)
def test_modes_for_course_multiple(self):
"""
Finding the modes when there's multiple modes
"""
mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None, None)
mode2 = Mode(u'verified', u'Verified Certificate', 10, '', 'usd', None, None, None, None)
set_modes = [mode1, mode2]
for mode in set_modes:
self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices)
modes = CourseMode.modes_for_course(self.course_key)
self.assertEqual(modes, set_modes)
self.assertEqual(mode1, CourseMode.mode_for_course(self.course_key, u'honor'))
self.assertEqual(mode2, CourseMode.mode_for_course(self.course_key, u'verified'))
self.assertIsNone(CourseMode.mode_for_course(self.course_key, 'DNE'))
def test_min_course_price_for_currency(self):
"""
Get the min course price for a course according to currency
"""
# no modes, should get 0
self.assertEqual(0, CourseMode.min_course_price_for_currency(self.course_key, 'usd'))
# create some modes
mode1 = Mode(u'honor', u'Honor Code Certificate', 10, '', 'usd', None, None, None, None)
mode2 = Mode(u'verified', u'Verified Certificate', 20, '', 'usd', None, None, None, None)
mode3 = Mode(u'honor', u'Honor Code Certificate', 80, '', 'cny', None, None, None, None)
set_modes = [mode1, mode2, mode3]
for mode in set_modes:
self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices, mode.currency)
self.assertEqual(10, CourseMode.min_course_price_for_currency(self.course_key, 'usd'))
self.assertEqual(80, CourseMode.min_course_price_for_currency(self.course_key, 'cny'))
def test_modes_for_course_expired(self):
expired_mode, _status = self.create_mode('verified', 'Verified Certificate', 10)
expired_mode.expiration_datetime = now() + timedelta(days=-1)
expired_mode.save()
modes = CourseMode.modes_for_course(self.course_key)
self.assertEqual([CourseMode.DEFAULT_MODE], modes)
mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None, None)
self.create_mode(mode1.slug, mode1.name, mode1.min_price, mode1.suggested_prices)
modes = CourseMode.modes_for_course(self.course_key)
self.assertEqual([mode1], modes)
expiration_datetime = now() + timedelta(days=1)
expired_mode.expiration_datetime = expiration_datetime
expired_mode.save()
expired_mode_value = Mode(
u'verified',
u'Verified Certificate',
10,
'',
'usd',
expiration_datetime,
None,
None,
None
)
modes = CourseMode.modes_for_course(self.course_key)
self.assertEqual([expired_mode_value, mode1], modes)
modes = CourseMode.modes_for_course(CourseLocator('TestOrg', 'TestCourse', 'TestRun'))
self.assertEqual([CourseMode.DEFAULT_MODE], modes)
def test_verified_mode_for_course(self):
self.create_mode('verified', 'Verified Certificate', 10)
mode = CourseMode.verified_mode_for_course(self.course_key)
self.assertEqual(mode.slug, 'verified')
# verify that the professional mode is preferred
self.create_mode('professional', 'Professional Education Verified Certificate', 10)
mode = CourseMode.verified_mode_for_course(self.course_key)
self.assertEqual(mode.slug, 'professional')
def test_course_has_payment_options(self):
# Has no payment options.
honor, _ = self.create_mode('honor', 'Honor')
self.assertFalse(CourseMode.has_payment_options(self.course_key))
# Now we do have a payment option.
verified, _ = self.create_mode('verified', 'Verified', min_price=5)
self.assertTrue(CourseMode.has_payment_options(self.course_key))
# Remove the verified option.
verified.delete()
self.assertFalse(CourseMode.has_payment_options(self.course_key))
# Finally, give the honor mode payment options
honor.suggested_prices = '5, 10, 15'
honor.save()
self.assertTrue(CourseMode.has_payment_options(self.course_key))
def test_course_has_payment_options_with_no_id_professional(self):
# Has payment options.
self.create_mode('no-id-professional', 'no-id-professional', min_price=5)
self.assertTrue(CourseMode.has_payment_options(self.course_key))
@ddt.data(
([], True),
([("honor", 0), ("audit", 0), ("verified", 100)], True),
([("honor", 100)], False),
([("professional", 100)], False),
([("no-id-professional", 100)], False),
)
@ddt.unpack
def test_can_auto_enroll(self, modes_and_prices, can_auto_enroll):
# Create the modes and min prices
for mode_slug, min_price in modes_and_prices:
self.create_mode(mode_slug, mode_slug.capitalize(), min_price=min_price)
# Verify that we can or cannot auto enroll
self.assertEqual(CourseMode.can_auto_enroll(self.course_key), can_auto_enroll)
@ddt.data(
([], None),
(["honor", "audit", "verified"], "honor"),
(["honor", "audit"], "honor"),
(["audit", "verified"], "audit"),
(["professional"], None),
(["no-id-professional"], None),
(["credit", "audit", "verified"], "audit"),
(["credit"], None),
)
@ddt.unpack
def test_auto_enroll_mode(self, modes, result):
# Verify that the proper auto enroll mode is returned
self.assertEqual(CourseMode.auto_enroll_mode(self.course_key, modes), result)
def test_all_modes_for_courses(self):
now_dt = now()
future = now_dt + timedelta(days=1)
past = now_dt - timedelta(days=1)
# Unexpired, no expiration date
CourseModeFactory.create(
course_id=self.course_key,
mode_display_name="Honor No Expiration",
mode_slug="honor_no_expiration",
expiration_datetime=None
)
# Unexpired, expiration date in future
CourseModeFactory.create(
course_id=self.course_key,
mode_display_name="Honor Not Expired",
mode_slug="honor_not_expired",
expiration_datetime=future
)
# Expired
CourseModeFactory.create(
course_id=self.course_key,
mode_display_name="Verified Expired",
mode_slug="verified_expired",
expiration_datetime=past
)
# We should get all of these back when querying for *all* course modes,
# including ones that have expired.
other_course_key = CourseLocator(org="not", course="a", run="course")
all_modes = CourseMode.all_modes_for_courses([self.course_key, other_course_key])
self.assertEqual(len(all_modes[self.course_key]), 3)
self.assertEqual(all_modes[self.course_key][0].name, "Honor No Expiration")
self.assertEqual(all_modes[self.course_key][1].name, "Honor Not Expired")
self.assertEqual(all_modes[self.course_key][2].name, "Verified Expired")
# Check that we get a default mode for when no course mode is available
self.assertEqual(len(all_modes[other_course_key]), 1)
self.assertEqual(all_modes[other_course_key][0], CourseMode.get_default_course_mode())
@ddt.data('', 'no-id-professional', 'professional', 'verified')
def test_course_has_professional_mode(self, mode):
# check the professional mode.
self.create_mode(mode, 'course mode', 10)
modes_dict = CourseMode.modes_for_course_dict(self.course_key)
if mode in ['professional', 'no-id-professional']:
self.assertTrue(CourseMode.has_professional_mode(modes_dict))
else:
self.assertFalse(CourseMode.has_professional_mode(modes_dict))
@ddt.data('no-id-professional', 'professional', 'verified')
def test_course_is_professional_mode(self, mode):
# check that tuple has professional mode
course_mode, __ = self.create_mode(mode, 'course mode', 10)
if mode in ['professional', 'no-id-professional']:
self.assertTrue(CourseMode.is_professional_mode(course_mode.to_tuple()))
else:
self.assertFalse(CourseMode.is_professional_mode(course_mode.to_tuple()))
def test_course_is_professional_mode_with_invalid_tuple(self):
# check that tuple has professional mode with None
self.assertFalse(CourseMode.is_professional_mode(None))
@ddt.data(
('no-id-professional', False),
('professional', True),
('verified', True),
('honor', False),
('audit', False)
)
@ddt.unpack
def test_is_verified_slug(self, mode_slug, is_verified):
# check that mode slug is verified or not
if is_verified:
self.assertTrue(CourseMode.is_verified_slug(mode_slug))
else:
self.assertFalse(CourseMode.is_verified_slug(mode_slug))
@ddt.data(*itertools.product(
(
CourseMode.HONOR,
CourseMode.AUDIT,
CourseMode.VERIFIED,
CourseMode.PROFESSIONAL,
CourseMode.NO_ID_PROFESSIONAL_MODE
),
(NOW, None),
))
@ddt.unpack
def test_invalid_mode_expiration(self, mode_slug, exp_dt_name):
exp_dt = self.DATES[exp_dt_name]
is_error_expected = CourseMode.is_professional_slug(mode_slug) and exp_dt is not None
try:
self.create_mode(mode_slug=mode_slug, mode_name=mode_slug.title(), expiration_datetime=exp_dt, min_price=10)
self.assertFalse(is_error_expected, "Expected a ValidationError to be thrown.")
except ValidationError as exc:
self.assertTrue(is_error_expected, "Did not expect a ValidationError to be thrown.")
self.assertEqual(
exc.messages,
[u"Professional education modes are not allowed to have expiration_datetime set."],
)
@ddt.data(
("verified", "verify_need_to_verify"),
("verified", "verify_submitted"),
("verified", "verify_approved"),
("verified", 'dummy'),
("verified", None),
('honor', None),
('honor', 'dummy'),
('audit', None),
('professional', None),
('no-id-professional', None),
('no-id-professional', 'dummy')
)
@ddt.unpack
def test_enrollment_mode_display(self, mode, verification_status):
if mode == "verified":
self.assertEqual(
enrollment_mode_display(mode, verification_status, self.course_key),
self._enrollment_display_modes_dicts(verification_status)
)
self.assertEqual(
enrollment_mode_display(mode, verification_status, self.course_key),
self._enrollment_display_modes_dicts(verification_status)
)
self.assertEqual(
enrollment_mode_display(mode, verification_status, self.course_key),
self._enrollment_display_modes_dicts(verification_status)
)
elif mode == "honor":
self.assertEqual(
enrollment_mode_display(mode, verification_status, self.course_key),
self._enrollment_display_modes_dicts(mode)
)
elif mode == "audit":
self.assertEqual(
enrollment_mode_display(mode, verification_status, self.course_key),
self._enrollment_display_modes_dicts(mode)
)
elif mode == "professional":
self.assertEqual(
enrollment_mode_display(mode, verification_status, self.course_key),
self._enrollment_display_modes_dicts(mode)
)
@ddt.data(
(['honor', 'verified', 'credit'], ['honor', 'verified']),
(['professional', 'credit'], ['professional']),
)
@ddt.unpack
def test_hide_credit_modes(self, available_modes, expected_selectable_modes):
# Create the course modes
for mode in available_modes:
CourseModeFactory.create(
course_id=self.course_key,
mode_display_name=mode,
mode_slug=mode,
)
# Check the selectable modes, which should exclude credit
selectable_modes = CourseMode.modes_for_course_dict(self.course_key)
self.assertItemsEqual(selectable_modes.keys(), expected_selectable_modes)
# When we get all unexpired modes, we should see credit as well
all_modes = CourseMode.modes_for_course_dict(self.course_key, only_selectable=False)
self.assertItemsEqual(all_modes.keys(), available_modes)
def _enrollment_display_modes_dicts(self, dict_type):
"""
Helper function to generate the enrollment display mode dict.
"""
dict_keys = ['enrollment_title', 'enrollment_value', 'show_image', 'image_alt', 'display_mode']
display_values = {
"verify_need_to_verify": ["Your verification is pending", "Verified: Pending Verification", True,
'ID verification pending', 'verified'],
"verify_approved": ["You're enrolled as a verified student", "Verified", True, 'ID Verified Ribbon/Badge',
'verified'],
"verify_none": ["", "", False, '', 'audit'],
"honor": ["You're enrolled as an honor code student", "Honor Code", False, '', 'honor'],
"audit": ["", "", False, '', 'audit'],
"professional": ["You're enrolled as a professional education student", "Professional Ed", False, '',
'professional']
}
if dict_type in ['verify_need_to_verify', 'verify_submitted']:
return dict(zip(dict_keys, display_values.get('verify_need_to_verify')))
elif dict_type is None or dict_type == 'dummy':
return dict(zip(dict_keys, display_values.get('verify_none')))
else:
return dict(zip(dict_keys, display_values.get(dict_type)))
def test_expiration_datetime_explicitly_set(self):
""" Verify that setting the expiration_date property sets the explicit flag. """
verified_mode, __ = self.create_mode('verified', 'Verified Certificate', 10)
now_dt = now()
verified_mode.expiration_datetime = now_dt
self.assertTrue(verified_mode.expiration_datetime_is_explicit)
self.assertEqual(verified_mode.expiration_datetime, now_dt)
def test_expiration_datetime_not_explicitly_set(self):
""" Verify that setting the _expiration_date property does not set the explicit flag. """
verified_mode, __ = self.create_mode('verified', 'Verified Certificate', 10)
now_dt = now()
verified_mode._expiration_datetime = now_dt # pylint: disable=protected-access
self.assertFalse(verified_mode.expiration_datetime_is_explicit)
self.assertEqual(verified_mode.expiration_datetime, now_dt)
def test_expiration_datetime_explicitly_set_to_none(self):
""" Verify that setting the _expiration_date property does not set the explicit flag. """
verified_mode, __ = self.create_mode('verified', 'Verified Certificate', 10)
self.assertFalse(verified_mode.expiration_datetime_is_explicit)
verified_mode.expiration_datetime = None
self.assertFalse(verified_mode.expiration_datetime_is_explicit)
self.assertIsNone(verified_mode.expiration_datetime)
@ddt.data(
(CourseMode.AUDIT, False),
(CourseMode.HONOR, False),
(CourseMode.VERIFIED, True),
(CourseMode.CREDIT_MODE, True),
(CourseMode.PROFESSIONAL, True),
(CourseMode.NO_ID_PROFESSIONAL_MODE, True),
)
@ddt.unpack
def test_eligible_for_cert(self, mode_slug, expected_eligibility):
"""Verify that non-audit modes are eligible for a cert."""
self.assertEqual(CourseMode.is_eligible_for_certificate(mode_slug), expected_eligibility)
@ddt.data(
(CourseMode.AUDIT, False),
(CourseMode.HONOR, False),
(CourseMode.VERIFIED, True),
(CourseMode.CREDIT_MODE, False),
(CourseMode.PROFESSIONAL, True),
(CourseMode.NO_ID_PROFESSIONAL_MODE, False),
)
@ddt.unpack
def test_verified_min_price(self, mode_slug, is_error_expected):
"""Verify that verified modes have a price."""
try:
self.create_mode(mode_slug=mode_slug, mode_name=mode_slug.title(), min_price=0)
except ValidationError:
self.assertTrue(is_error_expected, "Did not expect a ValidationError to be thrown.")
else:
self.assertFalse(is_error_expected, "Expected a ValidationError to be thrown.")
class TestDisplayPrices(ModuleStoreTestCase):
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=["USD", "$"])
def test_get_cosmetic_display_price(self):
"""
Check that get_cosmetic_display_price() returns the correct price given its inputs.
"""
course = CourseFactory.create()
registration_price = 99
course.cosmetic_display_price = 10
with patch('course_modes.models.CourseMode.min_course_price_for_currency', return_value=registration_price):
# Since registration_price is set, it overrides the cosmetic_display_price and should be returned
self.assertEqual(get_cosmetic_display_price(course), "$99")
registration_price = 0
with patch('course_modes.models.CourseMode.min_course_price_for_currency', return_value=registration_price):
# Since registration_price is not set, cosmetic_display_price should be returned
self.assertEqual(get_cosmetic_display_price(course), "$10")
course.cosmetic_display_price = 0
# Since both prices are not set, there is no price, thus "Free"
self.assertEqual(get_cosmetic_display_price(course), "Free")
| proversity-org/edx-platform | common/djangoapps/course_modes/tests/test_models.py | Python | agpl-3.0 | 21,323 |
# Copyright 2009-2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
__all__ = [
'BranchRevision',
]
from storm.locals import (
Int,
Reference,
Storm,
)
from zope.interface import implements
from lp.code.interfaces.branchrevision import IBranchRevision
class BranchRevision(Storm):
"""See `IBranchRevision`."""
__storm_table__ = 'BranchRevision'
__storm_primary__ = ("branch_id", "revision_id")
implements(IBranchRevision)
branch_id = Int(name='branch', allow_none=False)
branch = Reference(branch_id, 'Branch.id')
revision_id = Int(name='revision', allow_none=False)
revision = Reference(revision_id, 'Revision.id')
sequence = Int(name='sequence', allow_none=True)
def __init__(self, branch, revision, sequence=None):
self.branch = branch
self.revision = revision
self.sequence = sequence
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/code/model/branchrevision.py | Python | agpl-3.0 | 984 |
from gavel import app
from gavel.models import *
from gavel.constants import *
import gavel.settings as settings
import gavel.utils as utils
from flask import (
redirect,
render_template,
request,
url_for,
)
import urllib.parse
@app.route('/admin/')
@utils.requires_auth
def admin():
annotators = Annotator.query.order_by(Annotator.id).all()
items = Item.query.order_by(Item.id).all()
decisions = Decision.query.all()
counts = {}
item_counts = {}
for d in decisions:
a = d.annotator_id
w = d.winner_id
l = d.loser_id
counts[a] = counts.get(a, 0) + 1
item_counts[w] = item_counts.get(w, 0) + 1
item_counts[l] = item_counts.get(l, 0) + 1
viewed = {i.id: {a.id for a in i.viewed} for i in items}
skipped = {}
for a in annotators:
for i in a.ignore:
if a.id not in viewed[i.id]:
skipped[i.id] = skipped.get(i.id, 0) + 1
# settings
setting_closed = Setting.value_of(SETTING_CLOSED) == SETTING_TRUE
return render_template(
'admin.html',
annotators=annotators,
counts=counts,
item_counts=item_counts,
skipped=skipped,
items=items,
votes=len(decisions),
setting_closed=setting_closed,
)
@app.route('/admin/item', methods=['POST'])
@utils.requires_auth
def item():
action = request.form['action']
if action == 'Submit':
csv = request.form['data']
data = utils.data_from_csv_string(csv)
for row in data:
_item = Item(*row)
db.session.add(_item)
db.session.commit()
elif action == 'Prioritize' or action == 'Cancel':
item_id = request.form['item_id']
target_state = action == 'Prioritize'
Item.by_id(item_id).prioritized = target_state
db.session.commit()
elif action == 'Disable' or action == 'Enable':
item_id = request.form['item_id']
target_state = action == 'Enable'
Item.by_id(item_id).active = target_state
db.session.commit()
elif action == 'Delete':
item_id = request.form['item_id']
try:
db.session.execute(ignore_table.delete(ignore_table.c.item_id == item_id))
Item.query.filter_by(id=item_id).delete()
db.session.commit()
except IntegrityError as e:
return render_template('error.html', message=str(e))
return redirect(url_for('admin'))
@app.route('/admin/item_patch', methods=['POST'])
@utils.requires_auth
def item_patch():
item = Item.by_id(request.form['item_id'])
if not item:
return render_template('error.html', message='Item not found.')
if 'location' in request.form:
item.location = request.form['location']
if 'name' in request.form:
item.name = request.form['name']
if 'description' in request.form:
item.description = request.form['description']
db.session.commit()
return redirect(url_for('item_detail', item_id=item.id))
@app.route('/admin/annotator', methods=['POST'])
@utils.requires_auth
def annotator():
action = request.form['action']
if action == 'Submit':
csv = request.form['data']
data = utils.data_from_csv_string(csv)
added = []
for row in data:
annotator = Annotator(*row)
added.append(annotator)
db.session.add(annotator)
db.session.commit()
try:
email_invite_links(added)
except Exception as e:
return render_template('error.html', message=str(e))
elif action == 'Email':
annotator_id = request.form['annotator_id']
try:
email_invite_links(Annotator.by_id(annotator_id))
except Exception as e:
return render_template('error.html', message=str(e))
elif action == 'Disable' or action == 'Enable':
annotator_id = request.form['annotator_id']
target_state = action == 'Enable'
Annotator.by_id(annotator_id).active = target_state
db.session.commit()
elif action == 'Delete':
annotator_id = request.form['annotator_id']
try:
db.session.execute(ignore_table.delete(ignore_table.c.annotator_id == annotator_id))
Annotator.query.filter_by(id=annotator_id).delete()
db.session.commit()
except IntegrityError as e:
return render_template('error.html', message=str(e))
return redirect(url_for('admin'))
@app.route('/admin/setting', methods=['POST'])
@utils.requires_auth
def setting():
key = request.form['key']
if key == 'closed':
action = request.form['action']
new_value = SETTING_TRUE if action == 'Close' else SETTING_FALSE
Setting.set(SETTING_CLOSED, new_value)
db.session.commit()
return redirect(url_for('admin'))
@app.route('/admin/item/<item_id>/')
@utils.requires_auth
def item_detail(item_id):
item = Item.by_id(item_id)
if not item:
return render_template('error.html', message='Item not found.')
else:
assigned = Annotator.query.filter(Annotator.next == item).all()
viewed_ids = {i.id for i in item.viewed}
if viewed_ids:
skipped = Annotator.query.filter(
Annotator.ignore.contains(item) & ~Annotator.id.in_(viewed_ids)
)
else:
skipped = Annotator.query.filter(Annotator.ignore.contains(item))
return render_template(
'admin_item.html',
item=item,
assigned=assigned,
skipped=skipped
)
@app.route('/admin/annotator/<annotator_id>/')
@utils.requires_auth
def annotator_detail(annotator_id):
annotator = Annotator.by_id(annotator_id)
if not annotator:
return render_template('error.html', message='Annotator not found.')
else:
seen = Item.query.filter(Item.viewed.contains(annotator)).all()
ignored_ids = {i.id for i in annotator.ignore}
if ignored_ids:
skipped = Item.query.filter(
Item.id.in_(ignored_ids) & ~Item.viewed.contains(annotator)
)
else:
skipped = []
return render_template(
'admin_annotator.html',
annotator=annotator,
seen=seen,
skipped=skipped
)
def email_invite_links(annotators):
if settings.DISABLE_EMAIL or annotators is None:
return
if not isinstance(annotators, list):
annotators = [annotators]
emails = []
for annotator in annotators:
link = urllib.parse.urljoin(settings.BASE_URL, '/login/%s' % annotator.secret)
raw_body = settings.EMAIL_BODY.format(name=annotator.name, link=link)
body = '\n\n'.join(utils.get_paragraphs(raw_body))
emails.append((annotator.email, settings.EMAIL_SUBJECT, body))
utils.send_emails(emails)
| atagh/gavel-clone | gavel/controllers/admin.py | Python | agpl-3.0 | 6,919 |
import os
import subprocess
import shutil
karma = os.path.join(os.path.dirname(__file__), '../node_modules/.bin/karma')
def javascript_tests():
if not shutil.which('nodejs'):
print("W: nodejs not available, skipping javascript tests")
return 0
elif os.path.exists(karma):
chrome_exec = shutil.which('chromium') or shutil.which('chromium-browser')
if chrome_exec:
os.environ["CHROME_BIN"] = chrome_exec
else:
print("Please install a chromium browser package in order"
"to run javascript unit tests.")
return 2
return subprocess.call(
[karma, "start", "test/karma.conf.js", "--single-run"]
)
else:
print("I: skipping javascript test (karma not available)")
return 0
if __name__ == "__main__":
import sys
sys.exit(javascript_tests())
| Linaro/squad | test/javascript.py | Python | agpl-3.0 | 895 |
# coding: utf-8
# Copyright (C) 2017 Jaime Bemarás
# See LICENSE.txt
from IPy import IP
import re
ASN = re.compile(r'AS\d+', re.IGNORECASE)
def validate(resources):
outcome = True
for resource in resources:
if ASN.match(resource):
continue
else:
try:
IP(resource)
except ValueError as error:
outcome = False
print('\t~ ERROR:', error.args[0])
return outcome
| synte/ec-ripe-api | resource_validator/validator.py | Python | agpl-3.0 | 479 |
"""
Studio editing view for OpenAssessment XBlock.
"""
import copy
import logging
from uuid import uuid4
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy
from voluptuous import MultipleInvalid
from xblock.fields import List, Scope
from xblock.core import XBlock
from web_fragments.fragment import Fragment
from openassessment.xblock.data_conversion import (
create_rubric_dict,
make_django_template_key,
update_assessments_format
)
from openassessment.xblock.defaults import DEFAULT_EDITOR_ASSESSMENTS_ORDER, DEFAULT_RUBRIC_FEEDBACK_TEXT
from openassessment.xblock.resolve_dates import resolve_dates, parse_date_value, DateValidationError, InvalidDateFormat
from openassessment.xblock.schema import EDITOR_UPDATE_SCHEMA
from openassessment.xblock.validation import validator
from openassessment.xblock.editor_config import AVAILABLE_EDITORS
from openassessment.xblock.load_static import LoadStatic
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class StudioMixin:
"""
Studio editing view for OpenAssessment XBlock.
"""
DEFAULT_CRITERIA = [
{
'label': '',
'options': [
{
'label': ''
},
]
}
]
NECESSITY_OPTIONS = {
"required": ugettext_lazy("Required"),
"optional": ugettext_lazy("Optional"),
"": ugettext_lazy("None")
}
# Build editor options from AVAILABLE_EDITORS
AVAILABLE_EDITOR_OPTIONS = {
key: val.get('display_name', key) for key, val in AVAILABLE_EDITORS.items()
}
STUDIO_EDITING_TEMPLATE = 'openassessmentblock/edit/oa_edit.html'
BASE_EDITOR_ASSESSMENTS_ORDER = copy.deepcopy(DEFAULT_EDITOR_ASSESSMENTS_ORDER)
# Since the XBlock problem definition contains only assessment
# modules that are enabled, we need to keep track of the order
# that the user left assessments in the editor, including
# the ones that were disabled. This allows us to keep the order
# that the user specified.
editor_assessments_order = List(
default=DEFAULT_EDITOR_ASSESSMENTS_ORDER,
scope=Scope.content,
help="The order to display assessments in the editor."
)
def studio_view(self, context=None): # pylint: disable=unused-argument
"""
Render the OpenAssessment XBlock for editing in Studio.
Args:
context: Not actively used for this view.
Returns:
(Fragment): An HTML fragment for editing the configuration of this XBlock.
"""
rendered_template = get_template(
self.STUDIO_EDITING_TEMPLATE
).render(self.editor_context())
fragment = Fragment(rendered_template)
fragment.add_javascript_url(LoadStatic.get_url('openassessment-studio.js'))
js_context_dict = {
"ALLOWED_IMAGE_EXTENSIONS": self.ALLOWED_IMAGE_EXTENSIONS,
"ALLOWED_FILE_EXTENSIONS": self.ALLOWED_FILE_EXTENSIONS,
"FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
}
fragment.initialize_js('OpenAssessmentEditor', js_context_dict)
return fragment
def editor_context(self):
"""
Update the XBlock's XML.
Returns:
dict with keys
'rubric' (unicode), 'prompt' (unicode), 'title' (unicode),
'submission_start' (unicode), 'submission_due' (unicode),
'assessments (dict)
"""
# In the authoring GUI, date and time fields should never be null.
# Therefore, we need to resolve all "default" dates to datetime objects
# before displaying them in the editor.
try:
__, __, date_ranges = resolve_dates( # pylint: disable=redeclared-assigned-name
self.start, self.due,
[
(self.submission_start, self.submission_due)
] + [
(asmnt.get('start'), asmnt.get('due'))
for asmnt in self.valid_assessments
],
self._
)
except (DateValidationError, InvalidDateFormat):
# If the dates are somehow invalid, we still want users to be able to edit the ORA,
# so just present the dates as they are.
def _parse_date_safe(date):
try:
return parse_date_value(date, self._)
except InvalidDateFormat:
return ''
date_ranges = [
(_parse_date_safe(self.submission_start), _parse_date_safe(self.submission_due))
] + [
(_parse_date_safe(asmnt.get('start')), _parse_date_safe(asmnt.get('due')))
for asmnt in self.valid_assessments
]
submission_start, submission_due = date_ranges[0]
assessments = self._assessments_editor_context(date_ranges[1:])
self.editor_assessments_order = self._editor_assessments_order_context()
# Every rubric requires one criterion. If there is no criteria
# configured for the XBlock, return one empty default criterion, with
# an empty default option.
criteria = copy.deepcopy(self.rubric_criteria_with_labels)
if not criteria:
criteria = self.DEFAULT_CRITERIA
# To maintain backwards compatibility, if there is no
# feedback_default_text configured for the xblock, use the default text
feedback_default_text = copy.deepcopy(self.rubric_feedback_default_text)
if not feedback_default_text:
feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
course_id = self.location.course_key if hasattr(self, 'location') else None
# If allowed file types haven't been explicitly set, load from a preset
white_listed_file_types = self.get_allowed_file_types_or_preset()
white_listed_file_types_string = ','.join(white_listed_file_types) if white_listed_file_types else ''
# If rubric reuse is enabled, include information about the other ORAs in this course
rubric_reuse_data = {}
if self.is_rubric_reuse_enabled:
rubric_reuse_data = self.get_other_ora_blocks_for_rubric_editor_context()
return {
'prompts': self.prompts,
'prompts_type': self.prompts_type,
'title': self.title,
'submission_due': submission_due,
'submission_start': submission_start,
'assessments': assessments,
'criteria': criteria,
'feedbackprompt': self.rubric_feedback_prompt,
'feedback_default_text': feedback_default_text,
'text_response': self.text_response if self.text_response else '',
'text_response_editor': self.text_response_editor if self.text_response_editor else 'text',
'file_upload_response': self.file_upload_response if self.file_upload_response else '',
'necessity_options': self.NECESSITY_OPTIONS,
'available_editor_options': self.AVAILABLE_EDITOR_OPTIONS,
'file_upload_type': self.file_upload_type,
'allow_multiple_files': self.allow_multiple_files,
'white_listed_file_types': white_listed_file_types_string,
'allow_latex': self.allow_latex,
'leaderboard_show': self.leaderboard_show,
'editor_assessments_order': [
make_django_template_key(asmnt)
for asmnt in self.editor_assessments_order
],
'teams_feature_enabled': self.team_submissions_enabled,
'teams_enabled': self.teams_enabled,
'base_asset_url': self._get_base_url_path_for_course_assets(course_id),
'is_released': self.is_released(),
'teamsets': self.get_teamsets(course_id),
'selected_teamset_id': self.selected_teamset_id,
'show_rubric_during_response': self.show_rubric_during_response,
'rubric_reuse_enabled': self.is_rubric_reuse_enabled,
'rubric_reuse_data': rubric_reuse_data,
'block_location': str(self.location),
}
@XBlock.json_handler
def update_editor_context(self, data, suffix=''): # pylint: disable=unused-argument
"""
Update the XBlock's configuration.
Args:
data (dict): Data from the request; should have the format described
in the editor schema.
Keyword Arguments:
suffix (str): Not used
Returns:
dict with keys 'success' (bool) and 'msg' (str)
"""
# Validate and sanitize the data using a schema
# If the data is invalid, this means something is wrong with
# our JavaScript, so we log an exception.
try:
data = EDITOR_UPDATE_SCHEMA(data)
except MultipleInvalid:
logger.exception('Editor context is invalid')
return {'success': False, 'msg': self._('Error updating XBlock configuration')}
# Check that the editor assessment order contains all the assessments.
current_order = set(data['editor_assessments_order'])
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
# Backwards compatibility: "staff-assessment" may not be present.
# If that is the only problem with this data, just add it manually and continue.
if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {'staff-assessment'}:
data['editor_assessments_order'].append('staff-assessment')
logger.info('Backwards compatibility: editor_assessments_order now contains staff-assessment')
else:
logger.exception('editor_assessments_order does not contain all expected assessment types')
return {'success': False, 'msg': self._('Error updating XBlock configuration')}
if not data['text_response'] and not data['file_upload_response']:
return {
'success': False,
'msg': self._("Error: Text Response and File Upload Response cannot both be disabled")
}
if not data['text_response'] and data['file_upload_response'] == 'optional':
return {'success': False,
'msg': self._("Error: When Text Response is disabled, File Upload Response must be Required")}
if not data['file_upload_response'] and data['text_response'] == 'optional':
return {'success': False,
'msg': self._("Error: When File Upload Response is disabled, Text Response must be Required")}
# Backwards compatibility: We used to treat "name" as both a user-facing label
# and a unique identifier for criteria and options.
# Now we treat "name" as a unique identifier, and we've added an additional "label"
# field that we display to the user.
# If the JavaScript editor sends us a criterion or option without a "name"
# field, we should assign it a unique identifier.
for criterion in data['criteria']:
if 'name' not in criterion:
criterion['name'] = uuid4().hex
for option in criterion['options']:
if 'name' not in option:
option['name'] = uuid4().hex
xblock_validator = validator(self, self._)
success, msg = xblock_validator(
create_rubric_dict(data['prompts'], data['criteria']),
data['assessments'],
submission_start=data['submission_start'],
submission_due=data['submission_due'],
leaderboard_show=data['leaderboard_show']
)
if not success:
return {'success': False, 'msg': self._('Validation error: {error}').format(error=msg)}
# At this point, all the input data has been validated,
# so we can safely modify the XBlock fields.
self.title = data['title']
self.display_name = data['title']
self.prompts = data['prompts']
self.prompts_type = data['prompts_type']
self.rubric_criteria = data['criteria']
self.rubric_assessments = data['assessments']
self.editor_assessments_order = data['editor_assessments_order']
self.rubric_feedback_prompt = data['feedback_prompt']
self.rubric_feedback_default_text = data['feedback_default_text']
self.submission_start = data['submission_start']
self.submission_due = data['submission_due']
self.text_response = data['text_response']
self.text_response_editor = data['text_response_editor']
self.file_upload_response = data['file_upload_response']
if data['file_upload_response']:
self.file_upload_type = data['file_upload_type']
self.white_listed_file_types_string = data['white_listed_file_types']
else:
self.file_upload_type = None
self.white_listed_file_types_string = None
self.allow_multiple_files = bool(data['allow_multiple_files'])
self.allow_latex = bool(data['allow_latex'])
self.leaderboard_show = data['leaderboard_show']
self.teams_enabled = bool(data.get('teams_enabled', False))
self.selected_teamset_id = data.get('selected_teamset_id', '')
self.show_rubric_during_response = data.get('show_rubric_during_response', False)
return {'success': True, 'msg': self._('Successfully updated OpenAssessment XBlock')}
@XBlock.json_handler
def check_released(self, data, suffix=''): # pylint: disable=unused-argument
"""
Check whether the problem has been released.
Args:
data (dict): Not used
Keyword Arguments:
suffix (str): Not used
Returns:
dict with keys 'success' (bool), 'message' (unicode), and 'is_released' (bool)
"""
# There aren't currently any server-side error conditions we report to the client,
# but we send success/msg values anyway for consistency with other handlers.
return {
'success': True, 'msg': '',
'is_released': self.is_released()
}
def _assessments_editor_context(self, assessment_dates):
"""
Transform the rubric assessments list into the context
we will pass to the Django template.
Args:
assessment_dates: List of assessment date ranges (tuples of start/end datetimes).
Returns:
dict
"""
assessments = {}
for asmnt, date_range in zip(self.rubric_assessments, assessment_dates):
# Django Templates cannot handle dict keys with dashes, so we'll convert
# the dashes to underscores.
template_name = make_django_template_key(asmnt['name'])
assessments[template_name] = copy.deepcopy(asmnt)
assessments[template_name]['start'] = date_range[0]
assessments[template_name]['due'] = date_range[1]
# In addition to the data in the student training assessment, we need to include two additional
# pieces of information: a blank context to render the empty template with, and the criteria
# for each example (so we don't have any complicated logic within the template). Though this
# could be accomplished within the template, we are opting to remove logic from the template.
student_training_module = self.get_assessment_module('student-training')
student_training_template = {
'answer': {
'parts': [
{'text': ''} for _ in self.prompts
]
}
}
criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
for criterion in criteria_list:
criterion['option_selected'] = ""
student_training_template['criteria'] = criteria_list
if student_training_module:
student_training_module = update_assessments_format([student_training_module])[0]
example_list = []
# Adds each example to a modified version of the student training module dictionary.
for example in student_training_module['examples']:
criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
# Equivalent to a Join Query, this adds the selected option to the Criterion's dictionary, so that
# it can be easily referenced in the template without searching through the selected options.
for criterion in criteria_list:
for option_selected in example['options_selected']:
if option_selected['criterion'] == criterion['name']:
criterion['option_selected'] = option_selected['option']
example_list.append({
'answer': example['answer'],
'criteria': criteria_list,
})
assessments['training'] = {'examples': example_list, 'template': student_training_template}
# If we don't have student training enabled, we still need to render a single (empty, or default) example
else:
assessments['training'] = {'examples': [student_training_template], 'template': student_training_template}
return assessments
def _editor_assessments_order_context(self):
"""
Create a list of assessment names in the order
the user last set in the editor, including
assessments that are not currently enabled.
Returns:
list of assessment names
"""
# Start with the default order, to pick up any assessment types that have been added
# since the user last saved their ordering.
effective_order = copy.deepcopy(self.BASE_EDITOR_ASSESSMENTS_ORDER)
# Account for changes the user has made to the default order
user_order = copy.deepcopy(self.editor_assessments_order)
effective_order = self._subset_in_relative_order(effective_order, user_order)
# Account for inconsistencies between the user's order and the problems
# that are currently enabled in the problem (These cannot be changed)
enabled_assessments = [asmnt['name'] for asmnt in self.valid_assessments]
enabled_ordered_assessments = [
assessment for assessment in enabled_assessments if assessment in user_order
]
effective_order = self._subset_in_relative_order(effective_order, enabled_ordered_assessments)
return effective_order
def _subset_in_relative_order(self, superset, subset):
"""
Returns a copy of superset, with entries that appear in subset being reordered to match
their relative ordering in subset.
"""
superset_indices = [superset.index(item) for item in subset]
sorted_superset_indices = sorted(superset_indices)
if superset_indices != sorted_superset_indices:
for index, superset_index in enumerate(sorted_superset_indices):
superset[superset_index] = subset[index]
return superset
def _get_base_url_path_for_course_assets(self, course_key):
"""
Returns base url path for course assets
"""
if course_key is None:
return None
placeholder_id = uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = str(course_key.make_asset_key('asset', placeholder_id).for_branch(None))
if not url_path.startswith('/'):
url_path = '/' + url_path
return url_path.replace(placeholder_id, '')
def get_team_configuration(self, course_id):
"""
Returns a dict with team configuration settings.
"""
configuration_service = self.runtime.service(self, 'teams_configuration')
team_configuration = configuration_service.get_teams_configuration(course_id)
if not team_configuration:
return None
return team_configuration
def get_teamsets(self, course_id):
"""
Wrapper around get_team_configuration that returns team names only for display
"""
team_configuration = self.get_team_configuration(course_id)
if not team_configuration:
return None
return team_configuration.teamsets
| edx/edx-ora2 | openassessment/xblock/studio_mixin.py | Python | agpl-3.0 | 20,504 |
# Copyright 2018 Silvio Gregorini ([email protected])
# Copyright (c) 2018 Openforce Srls Unipersonale (www.openforce.it)
# Copyright (c) 2019 Matteo Bilotta
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import models
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
def group_by_account_and_tax(self):
grouped_lines = {}
for line in self:
group_key = (line.account_id, line.tax_line_id)
if group_key not in grouped_lines:
grouped_lines.update({group_key: []})
grouped_lines[group_key].append(line)
return grouped_lines
| OCA/l10n-italy | l10n_it_vat_statement_split_payment/models/account.py | Python | agpl-3.0 | 670 |
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.conf.urls.defaults import *
from videos.views import rpc_router
urlpatterns = patterns(
'videos.views',
url(r'^watch/$', 'watch_page', name='watch_page'),
url(r'^watch/featured/$', 'featured_videos', name='featured_videos'),
url(r'^watch/latest/$', 'latest_videos', name='latest_videos'),
url(r'^watch/popular/$', 'popular_videos', name='popular_videos'),
# temporarily commented: see https://www.pivotaltracker.com/story/show/17619883
# url(r'^volunteer/$', 'volunteer_page', name='volunteer_page'),
url(r'^volunteer/(?P<category>\w+)/$', 'volunteer_category', name='volunteer_category'),
url(r'^test_celery/$', 'test_celery'),
url(r'^test_celery_exception/$', 'test_celery_exception'),
url(r'^router/$', rpc_router, name='rpc_router'),
url(r'^router/api/$', rpc_router.api, name='rpc_api'),
url(r'^subscribe_to_updates/$', 'subscribe_to_updates', name='subscribe_to_updates'),
url(r'^feedback/$', 'feedback', name='feedback'),
url(r'^feedback/error/$', 'feedback', {'hide_captcha': True}, 'feedback_error'),
url(r'^upload_subtitles/$', 'upload_subtitles', name='upload_subtitles'),
url(r'^upload_transcription_file/$', 'upload_transcription_file', name='upload_transcription_file'),
url(r'^create/$', 'create', name='create'),
url(r'^create/feed/$', 'create_from_feed', name='create_from_feed'),
url(r'^email_friend/$', 'email_friend', name='email_friend'),
url(r'^activities/(?P<video_id>(\w|-)+)/$', 'actions_list', name='actions_list'),
url(r'^stop_notification/(?P<video_id>(\w|-)+)/$', 'stop_notification', name='stop_notification'),
url(r'^(?P<video_id>(\w|-)+/)?rollback/(?P<pk>\d+)/$', 'rollback', name='rollback'),
url(r'^(?P<video_id>(\w|-)+/)?diffing/(?P<pk>\d+)/(?P<second_pk>\d+)/$', 'diffing', name='diffing'),
url(r'^test/$', 'test_form_page', name='test_form_page'),
url(r'^video_url_make_primary/$', 'video_url_make_primary', name='video_url_make_primary'),
url(r'^video_url_create/$', 'video_url_create', name='video_url_create'),
url(r'^video_url_remove/$', 'video_url_remove', name='video_url_remove'),
url(r'^(?P<video_id>(\w|-)+)/debug/$', 'video_debug', name='video_debug'),
url(r'^(?P<video_id>(\w|-)+)/reset_metadata/$', 'reset_metadata', name='reset_metadata'),
url(r'^(?P<video_id>(\w|-)+)/set-original-language/$', 'set_original_language', name='set_original_language'),
url(r'^(?P<video_id>(\w|-)+)/$', 'redirect_to_video'),
url(r'^(?P<video_id>(\w|-)+)/info/$', 'video', name='video'),
url(r'^(?P<video_id>(\w|-)+)/info/(?P<title>[^/]+)/$', 'video', name='video_with_title'),
url(r'^(?P<video_id>(\w|-)+)/url/(?P<video_url>\d+)/$', 'video', name='video_url'),
url(r'^(?P<video_id>(\w|-)+)/(?P<lang>[\w\-]+)/(?P<lang_id>[\d]+)/$', 'language_subtitles', name='translation_history'),
url(r'^(?P<video_id>(\w|-)+)/(?P<lang>[\w\-]+)/(?P<lang_id>[\d]+)/(?P<version_id>[\d]+)/$', 'language_subtitles', name='subtitleversion_detail'),
url(r'^(?P<video_id>(\w|-)+)/(?P<lang>[\w\-]+)/$', 'legacy_history', name='translation_history_legacy'),
url(r'(?P<video_id>(\w|-)+)/staff/delete/$', 'video_staff_delete', name='video_staff_delete'),
)
| ujdhesa/unisubs | apps/videos/urls.py | Python | agpl-3.0 | 3,989 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from rest_framework import viewsets, routers
from voting_app.models import Topic
from voting_app.views import Vote
from voting_app.serializer import TopicSerializer
admin.autodiscover()
# ViewSets define the view behavior.
class TopicViewSet(viewsets.ModelViewSet):
model = Topic
serializer_class = TopicSerializer
queryset = Topic.objects.all().filter(hide=False)
router = routers.DefaultRouter()
router.register(r'topics', TopicViewSet)
urlpatterns = patterns('',
url(r'^$', 'voting_app.views.index', name='index'),
url(r'^', include(router.urls)),
url(r'^vote/$', Vote.as_view()),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^admin/', include(admin.site.urls)),
)
| gc3-uzh-ch/django-simple-poll | voting/urls.py | Python | agpl-3.0 | 833 |
# This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
import pytest
from io import BytesIO
from rinoh.backend.pdf import cos
from rinoh.backend.pdf.reader import PDFObjectReader
def test_read_boolean():
def test_boolean(bytes_boolean, boolean):
reader = PDFObjectReader(BytesIO(bytes_boolean))
result = reader.next_item()
assert isinstance(result, cos.Boolean) and bool(result) == boolean
test_boolean(b'true', True)
test_boolean(b'false', False)
def test_read_integer():
def test_integer(bytes_integer, integer):
reader = PDFObjectReader(BytesIO(bytes_integer))
result = reader.next_item()
assert isinstance(result, cos.Integer) and result == integer
test_integer(b'123', 123)
test_integer(b'43445', 43445)
test_integer(b'+17', 17)
test_integer(b'-98', -98)
test_integer(b'0', 0)
def test_read_real():
def test_real(bytes_real, real):
reader = PDFObjectReader(BytesIO(bytes_real))
result = reader.next_item()
assert isinstance(result, cos.Real) and result == real
test_real(b'34.5', 34.5)
test_real(b'-3.62', -3.62)
test_real(b'+123.6', 123.6)
test_real(b'4.', 4.0)
test_real(b'-.002', -.002)
test_real(b'0.0', 0.0)
def test_read_name():
def test_name(bytes_name, unicode_name):
reader = PDFObjectReader(BytesIO(bytes_name))
result = reader.next_item()
assert isinstance(result, cos.Name) and str(result) == unicode_name
test_name(b'/Adobe#20Green', 'Adobe Green')
test_name(b'/PANTONE#205757#20CV', 'PANTONE 5757 CV')
test_name(b'/paired#28#29parentheses', 'paired()parentheses')
test_name(b'/The_Key_of_F#23_Minor', 'The_Key_of_F#_Minor')
test_name(b'/A#42', 'AB')
def test_read_dictionary():
input = b"""
<< /Type /Example
/Subtype /DictionaryExample
/Version 0.01
/IntegerItem 12
/StringItem (a string)
/Subdictionary << /Item1 0.4
/Item2 true
/LastItem (not!)
/VeryLastItem (OK)
>>
>>"""
reader = PDFObjectReader(BytesIO(input))
result = reader.next_item()
expected = cos.Dictionary([('Type', cos.Name('Example')),
('Subtype', cos.Name('DictionaryExample')),
('Version', cos.Real(0.01)),
('IntegerItem', cos.Integer(12)),
('StringItem', cos.String('a string')),
('Subdictionary', cos.Dictionary(
[('Item1', cos.Real(0.4)),
('Item2', cos.Boolean(True)),
('LastItem', cos.String('not!')),
('VeryLastItem', cos.String('OK'))]))])
assert isinstance(result, cos.Dictionary)
assert dict(result) == dict(expected)
| brechtm/rinohtype | tests/test_pdf_reader.py | Python | agpl-3.0 | 3,238 |
# Yum plugin to re-patch container rootfs after a yum update is done
#
# Copyright (C) 2012 Oracle
#
# Authors:
# Dwight Engen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import os
from fnmatch import fnmatch
from yum.plugins import TYPE_INTERACTIVE
from yum.plugins import PluginYumExit
requires_api_version = '2.0'
plugin_type = (TYPE_INTERACTIVE,)
def posttrans_hook(conduit):
pkgs = []
patch_required = False
# If we aren't root, we can't have updated anything
if os.geteuid():
return
# See what packages have files that were patched
confpkgs = conduit.confString('main', 'packages')
if not confpkgs:
return
tmp = confpkgs.split(",")
for confpkg in tmp:
pkgs.append(confpkg.strip())
conduit.info(2, "lxc-patch: checking if updated pkgs need patching...")
ts = conduit.getTsInfo()
for tsmem in ts.getMembers():
for pkg in pkgs:
if fnmatch(pkg, tsmem.po.name):
patch_required = True
if patch_required:
conduit.info(2, "lxc-patch: patching container...")
os.spawnlp(os.P_WAIT, "lxc-patch", "lxc-patch", "--patch", "/")
| czchen/debian-lxc | config/yum/lxc-patch.py | Python | lgpl-2.1 | 1,850 |
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2015
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
This package contains all python modules implementing the DROP
Manager concepts, including their external interface, a web UI and a client
"""
| steve-ord/daliuge | daliuge-engine/dlg/manager/__init__.py | Python | lgpl-2.1 | 1,112 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Copyright (c) 2010-2019 Denis Machard
# This file is part of the extensive automation project
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
# -------------------------------------------------------------------
import threading
import time
from ea.libs.NetLayerLib import ServerAgent as NetLayerLib
from ea.libs.NetLayerLib import Messages as Messages
from ea.libs.NetLayerLib import FifoCallBack as FifoCallBack
from ea.serverinterfaces import EventServerInterface as ESI
from ea.serverinterfaces import AgentServerInterface as ASI
from ea.libs import Logger, Settings
class TestServerInterface(Logger.ClassLogger, NetLayerLib.ServerAgent):
def __init__(self, listeningAddress, agentName='TSI', context=None):
"""
Constructs TCP Server Inferface
@param listeningAddress:
@type listeningAddress:
"""
NetLayerLib.ServerAgent.__init__(self, listeningAddress=listeningAddress,
agentName=agentName,
keepAliveInterval=Settings.getInt(
'Network', 'keepalive-interval'),
inactivityTimeout=Settings.getInt(
'Network', 'inactivity-timeout'),
responseTimeout=Settings.getInt(
'Network', 'response-timeout'),
selectTimeout=Settings.get(
'Network', 'select-timeout'),
pickleVer=Settings.getInt(
'Network', 'pickle-version')
)
self.context = context
self.__mutex__ = threading.RLock()
self.__fifoThread = None
self.tests = {} # {'task-id': Boolean} # test register, with background running or not
self.testsConnected = {} # all tests connected
def startFifo(self):
"""
Start the fifo
"""
self.__fifoThread = FifoCallBack.FifoCallbackThread()
self.__fifoThread.start()
self.trace("TSI: fifo started.")
def stopFifo(self):
"""
Stop the fifo
"""
self.__fifoThread.stop()
self.__fifoThread.join()
self.trace("TSI: fifo stopped.")
def registerTest(self, id, background):
"""
Register the test on the server
@param id:
@type id:
@param background:
@type background: boolean
"""
try:
self.tests[str(id)] = bool(background)
self.trace(
'Test=%s registered, running in Background=%s' %
(id, background))
except Exception as e:
self.error(err=e)
return False
return True
def onConnection(self, client):
"""
Called on connection of the test
@param client:
@type client:
"""
NetLayerLib.ServerAgent.onConnection(self, client)
self.testsConnected[client.client_address] = {'connected-at': time.time(),
'probes': [], 'agents': []}
self.trace('test is starting: %s' % str(client.client_address))
def onDisconnection(self, client):
"""
Called on disconnection of test
@param client:
@type client:
"""
NetLayerLib.ServerAgent.onDisconnection(self, client)
self.trace('test is endding: %s' % str(client.client_address))
def resetRunningAgent(self, client):
"""
Reset all running agents used by the client passed as argument
@param client:
@type client:
"""
self.trace('Trying to cleanup active agents')
for p in client['agents']:
# we can reset only agent in ready state (ready message received)
if 'agent-name' in p:
agent = ASI.instance().getAgent(aname=p['agent-name'])
if agent is not None:
self.trace('Reset Agent=%s for Script=%s and Adapter=%s' % (p['agent-name'],
p['script-id'],
p['source-adapter']))
data = {'event': 'agent-reset', 'script_id': p['script-id'],
'source-adapter': p['source-adapter'], 'uuid': p['uuid']}
ASI.instance().notify(client=agent['address'], data=data)
def onRequest(self, client, tid, request):
"""
Reimplemented from ServerAgent
Called on incoming request
@param client:
@type client:
@param tid:
@type tid:
@param request:
@type request:
"""
self.__mutex__.acquire()
try:
_body_ = request['body']
if client not in self.testsConnected:
self.__mutex__.release()
return
self.testsConnected[client]['task-id'] = _body_['task-id']
# handle notify and save some statistics on the database
if request['cmd'] == Messages.RSQ_NOTIFY:
try:
if _body_['event'] in ['agent-data', 'agent-notify', 'agent-init',
'agent-reset', 'agent-alive', 'agent-ready']:
if _body_['event'] == 'agent-ready':
self.testsConnected[client]['agents'].append(
{
'agent-name': _body_['destination-agent'],
'script-id': _body_['script_id'],
'uuid': _body_['uuid'],
'source-adapter': _body_['source-adapter']
}
)
ASI.instance().notifyAgent(client, tid, data=_body_)
except Exception as e:
self.error('unable to handle notify for agent: %s' % e)
if _body_['event'] == 'testcase-stopped':
# reset agents
self.resetRunningAgent(client=self.testsConnected[client])
if _body_['task-id'] in self.tests:
if not self.tests[_body_['task-id']]:
# check connected time of the associated user and test
# if connected-at of the user > connected-at of the test
# then not necessary to send events
userFounded = self.context.getUser(
login=_body_['from'])
if userFounded is not None:
if client not in self.testsConnected:
self.error(
'unknown test from %s' % str(client))
else:
if userFounded['connected-at'] < self.testsConnected[client]['connected-at']:
if _body_['channel-id']:
ESI.instance().notify(body=('event', _body_),
toAddress=_body_['channel-id'])
else:
ESI.instance().notify(body=('event', _body_))
else:
self.error('test unknown: %s' % _body_['task-id'])
if _body_['event'] == 'script-stopped':
# reset agents
self.resetRunningAgent(client=self.testsConnected[client])
if _body_['task-id'] in self.tests:
self.tests.pop(_body_['task-id'])
else:
self.error('task-id unknown: %s' % _body_['task-id'])
if client in self.testsConnected:
self.testsConnected.pop(client)
else:
self.error('test unknown: %s' % str(client))
# handle requests
elif request['cmd'] == Messages.RSQ_CMD:
self.trace("cmd received: %s" % _body_['cmd'])
if 'cmd' in _body_:
# handle interact command
if _body_['cmd'] == Messages.CMD_INTERACT:
self.trace('interact called')
if _body_['task-id'] in self.tests:
if not self.tests[_body_['task-id']]:
# check connected time of the associated user and test
# if connected-at of the user > connected-at of
# the test then not necessary to send events
userFounded = self.context.getUser(
login=_body_['from'])
if userFounded is not None:
if client not in self.testsConnected:
self.error(
'unknown test from %s' %
str(client))
else:
if userFounded['connected-at'] < self.testsConnected[client]['connected-at']:
self.__fifoThread.putItem(lambda: self.onInteract(client, tid,
bodyReq=_body_,
timeout=_body_['timeout']))
else:
self.error('test unknown: %s' % _body_['task-id'])
else:
self.error('cmd unknown %s' % _body_['cmd'])
rsp = {'cmd': _body_['cmd'], 'res': Messages.CMD_ERROR}
NetLayerLib.ServerAgent.failed(
self, client, tid, body=rsp)
else:
self.error('cmd is missing')
# handle other request
else:
self.trace('%s received ' % request['cmd'])
except Exception as e:
self.error("unable to handle incoming request: %s" % e)
self.__mutex__.release()
def onInteract(self, client, tid, bodyReq, timeout=0.0):
"""
Called on interact
"""
inter = Interact(client, tid, bodyReq, timeout=timeout)
testThread = threading.Thread(target=lambda: inter.run())
testThread.start()
def trace(self, txt):
"""
Trace message
"""
if Settings.instance() is not None:
if Settings.get('Trace', 'debug-level') == 'VERBOSE':
Logger.ClassLogger.trace(self, txt=txt)
class Interact(object):
def __init__(self, client, tid, bodyReq, timeout=0.0):
"""
Interact object, not blocking
"""
self.client = client
self.tid = tid
self.bodyReq = bodyReq
self.timeout = timeout
def run(self):
"""
On run
"""
rsp = ESI.instance().interact(body=self.bodyReq, timeout=self.timeout)
_data_ = {'cmd': Messages.CMD_INTERACT}
if rsp is None:
_data_['rsp'] = None
else:
_data_['rsp'] = rsp['body']
instance().ok(self.client, self.tid, body=_data_)
TSI = None
def instance():
"""
Returns the singleton
@return:
@rtype:
"""
return TSI
def initialize(listeningAddress, context):
"""
Instance creation
@param listeningAddress:
@type listeningAddress:
"""
global TSI
TSI = TestServerInterface(listeningAddress=listeningAddress,
context=context)
TSI.startFifo()
def finalize():
"""
Destruction of the singleton
"""
global TSI
if TSI:
TSI.stopFifo()
TSI.stopSA()
TSI = None
| dmachard/extensive-testing | src/ea/serverinterfaces/TestServerInterface.py | Python | lgpl-2.1 | 13,189 |
# fMBT, free Model Based Testing tool
# Copyright (c) 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# Import this to test step implementations written in Python
# in order to enable logging.
# fmbtlog writes given message to the fmbt log (XML)
# messages can be viewed using format $al of
# fmbt-log -f '$al' logfile
#
# adapterlog writes given message to the adapter log (plain text)
# written by remote_python or remote_pyaal, for instance.
# Log function implementations are provided by the adapter
# component such as remote_python or remote_pyaal.
import datetime
import inspect
import os
import sys
import time
import urllib
_g_fmbt_adapterlogtimeformat="%s.%f"
_g_actionName = "undefined"
_g_testStep = -1
_g_simulated_actions = []
def _fmbt_call_helper(func,param = ""):
if simulated():
return ""
sys.stdout.write("fmbt_call %s.%s\n" % (func,param))
sys.stdout.flush()
response = sys.stdin.readline().rstrip()
magic,code = response.split(" ")
if magic == "fmbt_call":
if code[0] == "1":
return urllib.unquote(code[1:])
return ""
def formatTime(timeformat="%s", timestamp=None):
if timestamp == None:
timestamp = datetime.datetime.now()
# strftime on Windows does not support conversion to epoch (%s).
# Calculate it here, if needed.
if os.name == "nt":
if "%s" in timeformat:
epoch_time = time.mktime(timestamp.timetuple())
timeformat = timeformat.replace("%s", str(int(epoch_time)))
if "%F" in timeformat:
timeformat = timeformat.replace("%F", "%Y-%m-%d")
if "%T" in timeformat:
timeformat = timeformat.replace("%T", "%H:%M:%S")
return timestamp.strftime(timeformat)
def heuristic():
return _fmbt_call_helper("heuristic.get")
def setHeuristic(heuristic):
return _fmbt_call_helper("heuristic.set",heuristic)
def coverage():
return _fmbt_call_helper("coverage.get")
def setCoverage(coverage):
return _fmbt_call_helper("coverage.set",coverage)
def coverageValue():
return _fmbt_call_helper("coverage.getValue")
def fmbtlog(msg, flush=True):
try: file("/tmp/fmbt.fmbtlog", "a").write("%s\n" % (msg,))
except: pass
def adapterlog(msg, flush=True):
try:
_adapterlogWriter(file("/tmp/fmbt.adapterlog", "a"),
formatAdapterLogMessage(msg,))
except: pass
def setAdapterLogWriter(func):
"""
Override low-level adapter log writer with the given function. The
function should take two parameters: a file-like object and a log
message. The message is formatted and ready to be written to the
file. The default is
lambda fileObj, formattedMsg: fileObj.write(formattedMsg)
"""
global _adapterlogWriter
_adapterlogWriter = func
def adapterLogWriter():
"""
Return current low-level adapter log writer function.
"""
global _adapterlogWriter
return _adapterlogWriter
def reportOutput(msg):
try: file("/tmp/fmbt.reportOutput", "a").write("%s\n" % (msg,))
except: pass
def setAdapterLogTimeFormat(strftime_format):
"""
Use given time format string in timestamping adapterlog messages
"""
global _g_fmbt_adapterlogtimeformat
_g_fmbt_adapterlogtimeformat = strftime_format
def formatAdapterLogMessage(msg, fmt="%s %s\n"):
"""
Return timestamped adapter log message
"""
return fmt % (formatTime(_g_fmbt_adapterlogtimeformat), msg)
def getActionName():
"""deprecated, use actionName()"""
return _g_actionName
def actionName():
"""
Return the name of currently executed action (input or output).
"""
return _g_actionName
def getTestStep():
"""deprecated, use testStep()"""
return _g_testStep
def testStep():
"""
Return the number of currently executed test step.
"""
return _g_testStep
def simulated():
"""
Returns True if fMBT is simulating execution of an action (guard
or body block) instead of really executing it.
"""
return len(_g_simulated_actions) > 0
def _adapterlogWriter(fileObj, formattedMsg):
fileObj.write(formattedMsg)
def funcSpec(func):
"""
Return function name and args as they could have been defined
based on function object.
"""
argspec = inspect.getargspec(func)
if argspec.defaults:
kwarg_count = len(argspec.defaults)
else:
kwarg_count = 0
arg_count = len(argspec.args) - kwarg_count
arglist = [str(arg) for arg in argspec.args[:arg_count]]
kwargs = argspec.args[arg_count:]
for index, kwarg in enumerate(kwargs):
arglist.append("%s=%s" % (kwarg, repr(argspec.defaults[index])))
if argspec.varargs:
arglist.append("*%s" % (argspec.varargs,))
if argspec.keywords:
arglist.append("**%s" % (argspec.keywords,))
try:
funcspec = "%s(%s)" % (func.func_name, ", ".join(arglist))
except:
funcspec = "%s(fmbt.funcSpec error)" % (func.func_name,)
return funcspec
_g_debug_socket = None
_g_debug_conn = None
def debug(session=0):
"""
Start debugging with fmbt-debug from the point where this function
was called. Execution will stop until connection to fmbt-debug
[session] has been established.
Parameters:
session (integer, optional):
debug session that identifies which fmbt-debug should
connect to this process. The default is 0.
Example:
- execute on command line "fmbt-debug 42"
- add fmbt.debug(42) in your Python code
- run the Python code so that it will call fmbt.debug(42)
- when done the debugging on the fmbt-debug prompt, enter "c"
for continue.
"""
import bdb
import inspect
import pdb
import socket
global _g_debug_conn, _g_debug_socket
if not _g_debug_socket:
PORTBASE = 0xf4bd # 62653, fMBD
host = "127.0.0.1" # accept local host only, by default
port = PORTBASE + session
_g_debug_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
_g_debug_socket.bind((host, port))
_g_debug_socket.listen(1)
while True:
(_g_debug_conn, addr) = _g_debug_socket.accept()
_g_debug_conn.sendall("fmbt.debug\n")
msg = _g_debug_conn.recv(len("fmbt-debug\n"))
if msg.startswith("fmbt-debug"):
break
_g_debug_conn.close()
except socket.error:
# already in use, perhaps fmbt-debug is already listening to
# the socket and waiting for this process to connect
try:
_g_debug_socket.connect((host, port))
_g_debug_conn = _g_debug_socket
whos_there = _g_debug_conn.recv(len("fmbt-debug\n"))
if not whos_there.startswith("fmbt-debug"):
_g_debug_conn.close()
_g_debug_socket = None
_g_debug_conn = None
raise ValueError(
'unexpected answer "%s", fmbt-debug expected' %
(whos_there.strip(),))
_g_debug_conn.sendall("fmbt.debug\n")
except socket.error:
raise ValueError('debugger cannot listen or connect to %s:%s' % (host, port))
if not _g_debug_conn:
fmbtlog("debugger waiting for connection at %s:%s" % (host, port))
# socket.makefile does not work due to buffering issues
# therefore, use our own socket-to-file converter
class SocketToFile(object):
def __init__(self, socket_conn):
self._conn = socket_conn
def read(self, bytes=-1):
msg = []
rv = ""
try:
c = self._conn.recv(1)
except KeyboardInterrupt:
self._conn.close()
raise
while c and not rv:
msg.append(c)
if c == "\r":
rv = "".join(msg)
elif c == "\n":
rv = "".join(msg)
elif len(msg) == bytes:
rv = "".join(msg)
else:
c = self._conn.recv(1)
return rv
def readline(self):
return self.read()
def write(self, msg):
self._conn.sendall(msg)
def flush(self):
pass
connfile = SocketToFile(_g_debug_conn)
debugger = pdb.Pdb(stdin=connfile, stdout=connfile)
debugger.set_trace(inspect.currentframe().f_back)
| pombreda/fMBT | utils/fmbt.py | Python | lgpl-2.1 | 9,267 |
# Copyright (C) 2013-2017 Chris Lalancette <[email protected]>
# Copyright (C) 2013 Ian McLeod <[email protected]>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
RHEL-7 installation
"""
import os
import oz.ozutil
import oz.RedHat
import oz.OzException
class RHEL7Guest(oz.RedHat.RedHatLinuxCDYumGuest):
"""
Class for RHEL-7 installation
"""
def __init__(self, tdl, config, auto, output_disk=None, netdev=None,
diskbus=None, macaddress=None):
oz.RedHat.RedHatLinuxCDYumGuest.__init__(self, tdl, config, auto,
output_disk, netdev, diskbus,
True, True, "cpio", macaddress,
True)
self.virtio_channel_name = 'org.fedoraproject.anaconda.log.0'
def _modify_iso(self):
"""
Method to modify the ISO for autoinstallation.
"""
self._copy_kickstart(os.path.join(self.iso_contents, "ks.cfg"))
initrdline = " append initrd=initrd.img ks=cdrom:/dev/cdrom:/ks.cfg"
if self.tdl.installtype == "url":
initrdline += " repo=" + self.url + "\n"
else:
# RHEL6 dropped this command line directive due to an Anaconda bug
# that has since been fixed. Note that this used to be "method="
# but that has been deprecated for some time.
initrdline += " repo=cdrom:/dev/cdrom"
self._modify_isolinux(initrdline)
def get_auto_path(self):
"""
Method to create the correct path to the RHEL 7 kickstart file.
"""
return oz.ozutil.generate_full_auto_path("RHEL7.auto")
def get_class(tdl, config, auto, output_disk=None, netdev=None, diskbus=None,
macaddress=None):
"""
Factory method for RHEL-7 installs.
"""
if tdl.update.isdigit():
if netdev is None:
netdev = 'virtio'
if diskbus is None:
diskbus = 'virtio'
return RHEL7Guest(tdl, config, auto, output_disk, netdev, diskbus,
macaddress)
def get_supported_string():
"""
Return supported versions as a string.
"""
return "RHEL 7"
| imcleod/oz | oz/RHEL_7.py | Python | lgpl-2.1 | 2,885 |
import telepathy
from telepathy.interfaces import CONN_MGR_INTERFACE
import dbus
def parse_account(s):
lines = s.splitlines()
pairs = []
manager = None
protocol = None
for line in lines:
if not line.strip():
continue
k, v = line.split(':', 1)
k = k.strip()
v = v.strip()
if k == 'manager':
manager = v
elif k == 'protocol':
protocol = v
else:
if k not in ("account", "password"):
if v.lower() == "false":
v = False
elif v.lower() == "true":
v = True
else:
try:
v = dbus.UInt32(int(v))
except:
pass
pairs.append((k, v))
assert manager
assert protocol
return manager, protocol, dict(pairs)
def read_account(path):
return parse_account(file(path).read())
def connect(manager, protocol, account, ready_handler=None):
reg = telepathy.client.ManagerRegistry()
reg.LoadManagers()
mgr = reg.GetManager(manager)
conn_bus_name, conn_object_path = \
mgr[CONN_MGR_INTERFACE].RequestConnection(protocol, account)
return telepathy.client.Connection(conn_bus_name, conn_object_path,
ready_handler=ready_handler)
def connection_from_file(path, ready_handler=None):
manager, protocol, account = read_account(path)
return connect(manager, protocol, account, ready_handler=ready_handler)
| epage/telepathy-python | examples/account.py | Python | lgpl-2.1 | 1,551 |
#
# Copyright 2015-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import collections
import logging
import threading
Callback = collections.namedtuple('Callback',
['conn', 'dom', 'body', 'opaque'])
def _null_cb(*args, **kwargs):
pass
_NULL = Callback(None, None, _null_cb, tuple())
class Handler(object):
_log = logging.getLogger('convirt.event')
_null = [_NULL]
def __init__(self, name=None, parent=None):
self._name = id(self) if name is None else name
self._parent = parent
self._lock = threading.Lock()
self.events = collections.defaultdict(list)
def register(self, event_id, conn, dom, func, opaque=None):
with self._lock:
# TODO: weakrefs?
cb = Callback(conn, dom, func, opaque)
# TODO: debug?
self._log.info('[%s] %i -> %s', self._name, event_id, cb)
self.events[event_id].append(cb)
def fire(self, event_id, dom, *args):
for cb in self.get_callbacks(event_id):
arguments = list(args)
if cb.opaque is not None:
arguments.append(cb.opaque)
domain = cb.dom
if dom is not None:
domain = dom
self._log.debug('firing: %s(%s, %s, %s)',
cb.body, cb.conn, domain, arguments)
return cb.body(cb.conn, domain, *arguments)
def get_callbacks(self, event_id):
with self._lock:
callback = self.events.get(event_id, None)
if callback is not None:
return callback
if self._parent is not None:
self._log.warning('[%s] unknown event %r',
self._name, event_id)
return self._parent.get_callbacks(event_id)
# TODO: debug?
self._log.warning('[%s] unhandled event %r', self._name, event_id)
return self._null
@property
def registered(self):
with self._lock:
return tuple(self.events.keys())
# for testing purposes
def clear(self):
with self._lock:
self.events.clear()
root = Handler(name='root')
def fire(event_id, dom, *args):
global root
root.fire(event_id, dom, *args)
| mojaves/convirt | convirt/events.py | Python | lgpl-2.1 | 3,067 |
# !/bin/python
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# Hexa : Creation d'hexaedres
import hexablock
import os
#---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
doc = hexablock.addDocument ("default")
vx = doc.addVector (1,0,0)
vy = doc.addVector (0,1,0)
vz = doc.addVector (0,0,1)
vxy = doc.addVector (1,1,0)
nbr_files = 0
# ======================================================= save_vtk
def save_vtk () :
global nbr_files
nom = "monica%d.vtk" % nbr_files
nbr_files += 1
doc.saveVtk (nom)
# ======================================================= carre
def carre (x) :
return x*x
# ======================================================= get_center
def get_center (quad) :
px = 0
py = 0
pz = 0
for nv in range (4) :
vertex = quad.getVertex (nv)
px += vertex.getX() / 4
py += vertex.getY() / 4
pz += vertex.getZ() / 4
return [ px, py, pz ]
# ======================================================= nearest
def nearest (grid, vertex) :
nbre = grid.countVertex()
dmin = 1e+6
result = None
px = vertex.getX()
py = vertex.getY()
pz = vertex.getZ()
for nro in range (nbre) :
v1 = grid.getVertex (nro)
d2 = carre(px-v1.getX()) + carre(py-v1.getY()) + carre(pz-v1.getZ())
if (d2 < dmin) :
result = v1
dmin = d2
print vertex.getName () , px, py, pz, " -> ", result.getName()
return result
# ======================================================= nearest_quad
def nearest_quad (grid, quad) :
dmin = 1e+16
result = None
[ox, oy, oz] = get_center (quad)
nbre = grid.countQuad ()
for nro in range (nbre) :
q1 = grid.getQuad (nro)
if q1 != None :
[px, py, pz] = get_center (q1)
d2 = carre(px-ox) + carre(py-oy) + carre(pz-oz)
if (d2 < dmin) :
result = q1
dmin = d2
print quad.getName () , px, py, pz, " -> ", result.getName()
return result
# ======================================================= insert_cylinder
def insert_cylinder (plaque, nx, ny) :
hexa = plaque.getHexaIJK (nx, ny, 0)
xmin = 666 ; ymin = xmin ; zmin = xmin
xmax = -666 ; ymax = xmax ; zmax = xmax
tabv1 = []
for nv in range (8) :
node = hexa.getVertex (nv)
xmin = min (xmin, node.getX()) ; xmax = max (xmax, node.getX())
ymin = min (ymin, node.getY()) ; ymax = max (ymax, node.getY())
zmin = min (zmin, node.getZ()) ; zmax = max (zmax, node.getZ())
tabv1.append (node)
doc.removeHexa (hexa)
save_vtk ()
dx = (xmax - xmin)/2
dz = (zmax - zmin)/2
xorig = (xmin + xmax)/2
yorig = (ymin + ymax)/2
zorig = (zmin + zmax)/2 - 3*dz
orig = doc.addVertex (xorig, yorig, zorig)
nr = 1
na = 4
nh = 3
rext = dx
rint = rext/3
haut = 3
angle = 360
pipe = doc.makePipeUni (orig, vxy,vz, rint,rext,angle,haut, nr,na,nh)
hexablock.what ()
tabquad = []
tabv0 = []
for nq in range (4) :
quad = pipe.getQuadJK (1, nq, 1)
tabquad.append (quad)
print " .. tabquad[0] = ", tabquad[0].getName ()
cible = nearest_quad (plaque, tabquad[0])
tabquad[0]. setColor (5)
cible . setColor (5)
save_vtk ()
va1 = tabquad[0].getVertex (0)
va2 = tabquad[0].getVertex (1)
vb1 = cible.nearestVertex (va1)
vb2 = cible.nearestVertex (va2)
doc.setLevel (1)
doc.joinQuadsUni (tabquad, cible, va1, vb1, va2, vb2, 1)
hexablock.what ()
save_vtk ()
return
doc.setLevel (1)
for nv in range (8) :
ier = doc.mergeVertices (tabv0[nv], tabv1[nv])
print "ier = ", ier
save_vtk ()
# ======================================================= test_monica
def test_monica () :
orig = doc.addVertex (0,0,0)
lx = 1
ly = lx
lz = lx
nx = 3
ny = nx
nz = 1
plaque = doc.makeCartesianUni (orig, vx,vy,vz, lx, ly, lz, nx,ny,nz)
save_vtk ()
insert_cylinder (plaque, 1, 1)
## hexa = plaque.getHexaIJK (1,1,0)
## doc.removeHexa (hexa)
return doc
# ================================================================= Begin
doc = test_monica ()
law = doc.addLaw("Uniform", 4)
for j in range(doc.countPropagation()):
propa = doc.getPropagation(j)
propa.setLaw(law)
mesh_hexas = hexablock.mesh (doc)
| FedoraScientific/salome-hexablock | src/TEST_PY/test_v6/monica.py | Python | lgpl-2.1 | 5,353 |
#!/usr/bin/env python
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
#
# Most of the service tests are performed through the interop tests.
#
import datetime
import unittest
from lxml import etree
from rpclib.application import Application
from rpclib.auxproc.thread import ThreadAuxProc
from rpclib.auxproc.sync import SyncAuxProc
from rpclib.decorator import rpc
from rpclib.decorator import srpc
from rpclib.interface.wsdl import Wsdl11
from rpclib.model.complex import Array
from rpclib.model.complex import ComplexModel
from rpclib.model.primitive import DateTime
from rpclib.model.primitive import Float
from rpclib.model.primitive import Integer
from rpclib.model.primitive import String
from rpclib.protocol.soap import Soap11
from rpclib.protocol.http import HttpRpc
from rpclib.server.null import NullServer
from rpclib.server.wsgi import WsgiApplication
from rpclib.service import ServiceBase
Application.transport = 'test'
class Address(ComplexModel):
__namespace__ = "TestService"
street = String
city = String
zip = Integer
since = DateTime
laditude = Float
longitude = Float
class Person(ComplexModel):
__namespace__ = "TestService"
name = String
birthdate = DateTime
age = Integer
addresses = Array(Address)
titles = Array(String)
class Request(ComplexModel):
__namespace__ = "TestService"
param1 = String
param2 = Integer
class Response(ComplexModel):
__namespace__ = "TestService"
param1 = Float
class TypeNS1(ComplexModel):
__namespace__ = "TestService.NS1"
s = String
i = Integer
class TypeNS2(ComplexModel):
__namespace__ = "TestService.NS2"
d = DateTime
f = Float
class MultipleNamespaceService(ServiceBase):
@rpc(TypeNS1, TypeNS2)
def a(ctx, t1, t2):
return "OK"
class TestService(ServiceBase):
@rpc(String, _returns=String)
def aa(ctx, s):
return s
@rpc(String, Integer, _returns=DateTime)
def a(ctx, s, i):
return datetime.datetime.now()
@rpc(Person, String, Address, _returns=Address)
def b(ctx, p, s, a):
return Address()
@rpc(Person, isAsync=True)
def d(ctx, Person):
pass
@rpc(Person, isCallback=True)
def e(ctx, Person):
pass
@rpc(String, String, String, _returns=String,
_in_variable_names={'_from': 'from', '_self': 'self',
'_import': 'import'},
_out_variable_name="return")
def f(ctx, _from, _self, _import):
return '1234'
class MultipleReturnService(ServiceBase):
@rpc(String, _returns=(String, String, String))
def multi(ctx, s):
return s, 'a', 'b'
class TestSingle(unittest.TestCase):
def setUp(self):
self.app = Application([TestService], 'tns', Soap11(), Soap11())
self.app.transport = 'null.rpclib'
self.srv = TestService()
wsdl = Wsdl11(self.app.interface)
wsdl.build_interface_document('URL')
self.wsdl_str = wsdl.get_interface_document()
self.wsdl_doc = etree.fromstring(self.wsdl_str)
def test_portypes(self):
porttype = self.wsdl_doc.find('{http://schemas.xmlsoap.org/wsdl/}portType')
self.assertEquals(
len(self.srv.public_methods), len(porttype.getchildren()))
def test_override_param_names(self):
for n in ['self', 'import', 'return', 'from']:
assert n in self.wsdl_str, '"%s" not in self.wsdl_str'
class TestMultiple(unittest.TestCase):
def setUp(self):
self.app = Application([MultipleReturnService], 'tns', Soap11(), Soap11())
self.app.transport = 'none'
self.wsdl = Wsdl11(self.app.interface)
self.wsdl.build_interface_document('URL')
def test_multiple_return(self):
message_class = list(MultipleReturnService.public_methods.values())[0].out_message
message = message_class()
self.assertEquals(len(message._type_info), 3)
sent_xml = etree.Element('test')
self.app.out_protocol.to_parent_element(message_class, ('a', 'b', 'c'),
MultipleReturnService.get_tns(), sent_xml)
sent_xml = sent_xml[0]
print(etree.tostring(sent_xml, pretty_print=True))
response_data = self.app.out_protocol.from_element(message_class, sent_xml)
self.assertEquals(len(response_data), 3)
self.assertEqual(response_data[0], 'a')
self.assertEqual(response_data[1], 'b')
self.assertEqual(response_data[2], 'c')
class MultipleMethods1(ServiceBase):
@srpc(String)
def multi(s):
return "%r multi 1" % s
class MultipleMethods2(ServiceBase):
@srpc(String)
def multi(s):
return "%r multi 2" % s
class TestMultipleMethods(unittest.TestCase):
def test_single_method(self):
try:
app = Application([MultipleMethods1,MultipleMethods2], 'tns', Soap11(), Soap11())
except ValueError:
pass
else:
raise Exception('must fail.')
def test_simple_aux_nullserver(self):
data = []
class Service(ServiceBase):
@srpc(String)
def call(s):
data.append(s)
class AuxService(ServiceBase):
__aux__ = SyncAuxProc()
@srpc(String)
def call(s):
data.append(s)
app = Application([Service, AuxService], 'tns', Soap11(), Soap11())
server = NullServer(app)
server.service.call("hey")
assert data == ['hey', 'hey']
def test_simple_aux_wsgi(self):
data = []
class Service(ServiceBase):
@srpc(String, _returns=String)
def call(s):
data.append(s)
class AuxService(ServiceBase):
__aux__ = SyncAuxProc()
@srpc(String, _returns=String)
def call(s):
data.append(s)
def start_response(code, headers):
print code, headers
app = Application([Service, AuxService], 'tns', HttpRpc(), HttpRpc())
server = WsgiApplication(app)
server({
'QUERY_STRING': 's=hey',
'PATH_INFO': '/call',
'REQUEST_METHOD': 'GET',
}, start_response, "http://null")
assert data == ['hey', 'hey']
def test_thread_aux_wsgi(self):
import logging
logging.basicConfig(level=logging.DEBUG)
data = set()
class Service(ServiceBase):
@srpc(String, _returns=String)
def call(s):
data.add(s)
class AuxService(ServiceBase):
__aux__ = ThreadAuxProc()
@srpc(String, _returns=String)
def call(s):
data.add(s + "aux")
def start_response(code, headers):
print code, headers
app = Application([Service, AuxService], 'tns', HttpRpc(), HttpRpc())
server = WsgiApplication(app)
server({
'QUERY_STRING': 's=hey',
'PATH_INFO': '/call',
'REQUEST_METHOD': 'GET',
}, start_response, "http://null")
import time
time.sleep(1)
assert data == set(['hey', 'heyaux'])
def test_mixing_primary_and_aux_methods(self):
try:
class Service(ServiceBase):
@srpc(String, _returns=String, _aux=ThreadAuxProc())
def call(s):
pass
@srpc(String, _returns=String)
def mall(s):
pass
except Exception:
pass
else:
raise Exception("must fail with 'Exception: you can't mix aux and non-aux methods in a single service definition.'")
if __name__ == '__main__':
unittest.main()
| martijnvermaat/rpclib | src/rpclib/test/test_service.py | Python | lgpl-2.1 | 8,487 |
from leapp.topics import Topic
class ApiTestTopic(Topic):
name = 'api_test'
| leapp-to/prototype | tests/data/actor-api-tests/topics/apitest.py | Python | lgpl-2.1 | 82 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Vtk(CMakePackage):
"""The Visualization Toolkit (VTK) is an open-source, freely
available software system for 3D computer graphics, image
processing and visualization. """
homepage = "http://www.vtk.org"
url = "https://www.vtk.org/files/release/9.0/VTK-9.0.0.tar.gz"
list_url = "http://www.vtk.org/download/"
maintainers = ['chuckatkins', 'danlipsa']
version('9.0.0', sha256='15def4e6f84d72f82386617fe595ec124dda3cbd13ea19a0dcd91583197d8715')
version('8.2.0', sha256='34c3dc775261be5e45a8049155f7228b6bd668106c72a3c435d95730d17d57bb')
version('8.1.2', sha256='0995fb36857dd76ccfb8bb07350c214d9f9099e80b1e66b4a8909311f24ff0db')
version('8.1.1', sha256='71a09b4340f0a9c58559fe946dc745ab68a866cf20636a41d97b6046cb736324')
version('8.1.0', sha256='6e269f07b64fb13774f5925161fb4e1f379f4e6a0131c8408c555f6b58ef3cb7')
version('8.0.1', sha256='49107352923dea6de05a7b4c3906aaf98ef39c91ad81c383136e768dcf304069')
version('7.1.0', sha256='5f3ea001204d4f714be972a810a62c0f2277fbb9d8d2f8df39562988ca37497a')
version('7.0.0', sha256='78a990a15ead79cdc752e86b83cfab7dbf5b7ef51ba409db02570dbdd9ec32c3')
version('6.3.0', sha256='92a493354c5fa66bea73b5fc014154af5d9f3f6cee8d20a826f4cd5d4b0e8a5e')
version('6.1.0', sha256='bd7df10a479606d529a8b71f466c44a2bdd11fd534c62ce0aa44fad91883fa34')
# VTK7 defaults to OpenGL2 rendering backend
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('python', default=False, description='Enable Python support')
variant('qt', default=False, description='Build with support for Qt')
variant('xdmf', default=False, description='Build XDMF file support')
variant('ffmpeg', default=False, description='Build with FFMPEG support')
variant('mpi', default=True, description='Enable MPI support')
patch('gcc.patch', when='@6.1.0')
# At the moment, we cannot build with both osmesa and qt, but as of
# VTK 8.1, that should change
conflicts('+osmesa', when='+qt')
extends('python', when='+python')
# Acceptable python versions depend on vtk version
# We need vtk at least 8.0.1 for python@3,
# and at least 9.0 for [email protected]
depends_on('[email protected]:2.9', when='@:8.0 +python', type=('build', 'run'))
depends_on('[email protected]:3.7.9', when='@8.0.1:8.9 +python',
type=('build', 'run'))
depends_on('[email protected]:', when='@9.0: +python', type=('build', 'run'))
# We need mpi4py if buidling python wrappers and using MPI
depends_on('py-mpi4py', when='+python+mpi', type='run')
# python3.7 compatibility patch backported from upstream
# https://gitlab.kitware.com/vtk/vtk/commit/706f1b397df09a27ab8981ab9464547028d0c322
patch('python3.7-const-char.patch', when='@7.0.0:8.1.1 ^[email protected]:')
# The use of the OpenGL2 backend requires at least OpenGL Core Profile
# version 3.2 or higher.
depends_on('[email protected]:', when='+opengl2')
depends_on('[email protected]:', when='~opengl2')
if sys.platform != 'darwin':
depends_on('glx', when='~osmesa')
depends_on('libxt', when='~osmesa')
# Note: it is recommended to use mesa+llvm, if possible.
# mesa default is software rendering, llvm makes it faster
depends_on('mesa+osmesa', when='+osmesa')
# VTK will need Qt5OpenGL, and qt needs '-opengl' for that
depends_on('qt+opengl', when='+qt')
depends_on('boost', when='+xdmf')
depends_on('boost+mpi', when='+xdmf +mpi')
depends_on('ffmpeg', when='+ffmpeg')
depends_on('mpi', when='+mpi')
depends_on('expat')
depends_on('freetype')
depends_on('glew')
# set hl variant explicitly, similar to issue #7145
depends_on('hdf5+hl')
depends_on('jpeg')
depends_on('jsoncpp')
depends_on('libxml2')
depends_on('lz4')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('netcdf-c+mpi', when='+mpi')
depends_on('netcdf-cxx')
depends_on('libpng')
depends_on('libtiff')
depends_on('zlib')
depends_on('eigen', when='@8.2.0:')
depends_on('double-conversion', when='@8.2.0:')
depends_on('sqlite', when='@8.2.0:')
# For finding Fujitsu-MPI wrapper commands
patch('find_fujitsu_mpi.patch', when='@:8.2.0%fj')
def url_for_version(self, version):
url = "http://www.vtk.org/files/release/{0}/VTK-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_build_environment(self, env):
# VTK has some trouble finding freetype unless it is set in
# the environment
env.set('FREETYPE_DIR', self.spec['freetype'].prefix)
def cmake_args(self):
spec = self.spec
opengl_ver = 'OpenGL{0}'.format('2' if '+opengl2' in spec else '')
cmake_args = [
'-DBUILD_SHARED_LIBS=ON',
'-DVTK_RENDERING_BACKEND:STRING={0}'.format(opengl_ver),
# In general, we disable use of VTK "ThirdParty" libs, preferring
# spack-built versions whenever possible
'-DVTK_USE_SYSTEM_LIBRARIES:BOOL=ON',
# However, in a few cases we can't do without them yet
'-DVTK_USE_SYSTEM_GL2PS:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBHARU=OFF',
'-DNETCDF_DIR={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_C_ROOT={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_CXX_ROOT={0}'.format(spec['netcdf-cxx'].prefix),
# Allow downstream codes (e.g. VisIt) to override VTK's classes
'-DVTK_ALL_NEW_OBJECT_FACTORY:BOOL=ON',
# Disable wrappers for other languages.
'-DVTK_WRAP_JAVA=OFF',
'-DVTK_WRAP_TCL=OFF',
]
# Some variable names have changed
if spec.satisfies('@8.2.0:'):
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGG:BOOL=OFF',
'-DVTK_USE_SYSTEM_THEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ:BOOL=OFF',
'-DVTK_USE_SYSTEM_PUGIXML:BOOL=OFF',
])
else:
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGGTHEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ4:BOOL=OFF',
])
if '+mpi' in spec:
if spec.satisfies('@:8.2.0'):
cmake_args.extend([
'-DVTK_Group_MPI:BOOL=ON',
'-DVTK_USE_SYSTEM_DIY2:BOOL=OFF'
])
else:
cmake_args.extend([
'-DVTK_USE_MPI=ON'
])
if '+ffmpeg' in spec:
cmake_args.extend(['-DModule_vtkIOFFMPEG:BOOL=ON'])
# Enable/Disable wrappers for Python.
if '+python' in spec:
cmake_args.extend([
'-DVTK_WRAP_PYTHON=ON',
'-DPYTHON_EXECUTABLE={0}'.format(spec['python'].command.path),
])
if '+mpi' in spec:
cmake_args.append('-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON')
if spec.satisfies('@9.0.0: ^python@3:'):
cmake_args.append('-DVTK_PYTHON_VERSION=3')
else:
cmake_args.append('-DVTK_WRAP_PYTHON=OFF')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DCMAKE_MACOSX_RPATH=ON'
])
if '+qt' in spec:
qt_ver = spec['qt'].version.up_to(1)
qt_bin = spec['qt'].prefix.bin
qmake_exe = os.path.join(qt_bin, 'qmake')
cmake_args.extend([
# Enable Qt support here.
'-DVTK_QT_VERSION:STRING={0}'.format(qt_ver),
'-DQT_QMAKE_EXECUTABLE:PATH={0}'.format(qmake_exe),
'-DVTK_Group_Qt:BOOL=ON',
])
# NOTE: The following definitions are required in order to allow
# VTK to build with qt~webkit versions (see the documentation for
# more info: http://www.vtk.org/Wiki/VTK/Tutorials/QtSetup).
if '~webkit' in spec['qt']:
cmake_args.extend([
'-DVTK_Group_Qt:BOOL=OFF',
'-DModule_vtkGUISupportQt:BOOL=ON',
'-DModule_vtkGUISupportQtOpenGL:BOOL=ON',
])
if '+xdmf' in spec:
if spec.satisfies('^[email protected]:'):
# This policy exists only for CMake >= 3.12
cmake_args.extend(["-DCMAKE_POLICY_DEFAULT_CMP0074=NEW"])
cmake_args.extend([
# Enable XDMF Support here
"-DModule_vtkIOXdmf2:BOOL=ON",
"-DModule_vtkIOXdmf3:BOOL=ON",
"-DBOOST_ROOT={0}".format(spec['boost'].prefix),
"-DBOOST_LIBRARY_DIR={0}".format(spec['boost'].prefix.lib),
"-DBOOST_INCLUDE_DIR={0}".format(spec['boost'].prefix.include),
"-DBOOST_NO_SYSTEM_PATHS:BOOL=ON",
# This is needed because VTK has multiple FindBoost
# and they stick to system boost if there's a system boost
# installed with CMake
"-DBoost_NO_BOOST_CMAKE:BOOL=ON",
"-DHDF5_ROOT={0}".format(spec['hdf5'].prefix),
# The xdmf project does not export any CMake file...
"-DVTK_USE_SYSTEM_XDMF3:BOOL=OFF",
"-DVTK_USE_SYSTEM_XDMF2:BOOL=OFF"
])
if '+mpi' in spec:
cmake_args.extend(["-DModule_vtkIOParallelXdmf3:BOOL=ON"])
cmake_args.append('-DVTK_RENDERING_BACKEND:STRING=' + opengl_ver)
if spec.satisfies('@:8.1.0'):
cmake_args.append('-DVTK_USE_SYSTEM_GLEW:BOOL=ON')
if '+osmesa' in spec:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=OFF',
'-DVTK_OPENGL_HAS_OSMESA:BOOL=ON'])
else:
cmake_args.append('-DVTK_OPENGL_HAS_OSMESA:BOOL=OFF')
if spec.satisfies('@:7.9.9'):
# This option is gone in VTK 8.1.2
cmake_args.append('-DOpenGL_GL_PREFERENCE:STRING=LEGACY')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=ON'])
elif 'linux' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=ON',
'-DVTK_USE_COCOA:BOOL=OFF'])
if spec.satisfies('@:6.1.0'):
cmake_args.extend([
'-DCMAKE_C_FLAGS=-DGLX_GLXEXT_LEGACY',
'-DCMAKE_CXX_FLAGS=-DGLX_GLXEXT_LEGACY'
])
# VTK 6.1.0 (and possibly earlier) does not use
# NETCDF_CXX_ROOT to detect NetCDF C++ bindings, so
# NETCDF_CXX_INCLUDE_DIR and NETCDF_CXX_LIBRARY must be
# used instead to detect these bindings
netcdf_cxx_lib = spec['netcdf-cxx'].libs.joined()
cmake_args.extend([
'-DNETCDF_CXX_INCLUDE_DIR={0}'.format(
spec['netcdf-cxx'].prefix.include),
'-DNETCDF_CXX_LIBRARY={0}'.format(netcdf_cxx_lib),
])
# Garbage collection is unsupported in Xcode starting with
# version 5.1; if the Apple clang version of the compiler
# is 5.1.0 or later, unset the required Objective-C flags
# to remove the garbage collection flags. Versions of VTK
# after 6.1.0 set VTK_REQUIRED_OBJCXX_FLAGS to the empty
# string. This fix was recommended on the VTK mailing list
# in March 2014 (see
# https://public.kitware.com/pipermail/vtkusers/2014-March/083368.html)
if self.spec.satisfies('%[email protected]:'):
cmake_args.extend(['-DVTK_REQUIRED_OBJCXX_FLAGS='])
# A bug in tao pegtl causes build failures with intel compilers
if '%intel' in spec and spec.version >= Version('8.2'):
cmake_args.append(
'-DVTK_MODULE_ENABLE_VTK_IOMotionFX:BOOL=OFF')
return cmake_args
| rspavel/spack | var/spack/repos/builtin/packages/vtk/package.py | Python | lgpl-2.1 | 12,385 |
# python
# This file is generated by a program (mib2py).
import HOST_RESOURCES_MIB
OIDMAP = {
'1.3.6.1.2.1.25': HOST_RESOURCES_MIB.host,
'1.3.6.1.2.1.25.1': HOST_RESOURCES_MIB.hrSystem,
'1.3.6.1.2.1.25.2': HOST_RESOURCES_MIB.hrStorage,
'1.3.6.1.2.1.25.2.1': HOST_RESOURCES_MIB.hrStorageTypes,
'1.3.6.1.2.1.25.3': HOST_RESOURCES_MIB.hrDevice,
'1.3.6.1.2.1.25.3.1': HOST_RESOURCES_MIB.hrDeviceTypes,
'1.3.6.1.2.1.25.3.9': HOST_RESOURCES_MIB.hrFSTypes,
'1.3.6.1.2.1.25.4': HOST_RESOURCES_MIB.hrSWRun,
'1.3.6.1.2.1.25.5': HOST_RESOURCES_MIB.hrSWRunPerf,
'1.3.6.1.2.1.25.6': HOST_RESOURCES_MIB.hrSWInstalled,
'1.3.6.1.2.1.25.7': HOST_RESOURCES_MIB.hrMIBAdminInfo,
'1.3.6.1.2.1.25.7.1': HOST_RESOURCES_MIB.hostResourcesMibModule,
'1.3.6.1.2.1.25.7.2': HOST_RESOURCES_MIB.hrMIBCompliances,
'1.3.6.1.2.1.25.7.3': HOST_RESOURCES_MIB.hrMIBGroups,
'1.3.6.1.2.1.25.1.1': HOST_RESOURCES_MIB.hrSystemUptime,
'1.3.6.1.2.1.25.1.2': HOST_RESOURCES_MIB.hrSystemDate,
'1.3.6.1.2.1.25.1.3': HOST_RESOURCES_MIB.hrSystemInitialLoadDevice,
'1.3.6.1.2.1.25.1.4': HOST_RESOURCES_MIB.hrSystemInitialLoadParameters,
'1.3.6.1.2.1.25.1.5': HOST_RESOURCES_MIB.hrSystemNumUsers,
'1.3.6.1.2.1.25.1.6': HOST_RESOURCES_MIB.hrSystemProcesses,
'1.3.6.1.2.1.25.1.7': HOST_RESOURCES_MIB.hrSystemMaxProcesses,
'1.3.6.1.2.1.25.2.2': HOST_RESOURCES_MIB.hrMemorySize,
'1.3.6.1.2.1.25.4.1': HOST_RESOURCES_MIB.hrSWOSIndex,
'1.3.6.1.2.1.25.6.1': HOST_RESOURCES_MIB.hrSWInstalledLastChange,
'1.3.6.1.2.1.25.6.2': HOST_RESOURCES_MIB.hrSWInstalledLastUpdateTime,
'1.3.6.1.2.1.25.2.3.1.1': HOST_RESOURCES_MIB.hrStorageIndex,
'1.3.6.1.2.1.25.2.3.1.2': HOST_RESOURCES_MIB.hrStorageType,
'1.3.6.1.2.1.25.2.3.1.3': HOST_RESOURCES_MIB.hrStorageDescr,
'1.3.6.1.2.1.25.2.3.1.4': HOST_RESOURCES_MIB.hrStorageAllocationUnits,
'1.3.6.1.2.1.25.2.3.1.5': HOST_RESOURCES_MIB.hrStorageSize,
'1.3.6.1.2.1.25.2.3.1.6': HOST_RESOURCES_MIB.hrStorageUsed,
'1.3.6.1.2.1.25.2.3.1.7': HOST_RESOURCES_MIB.hrStorageAllocationFailures,
'1.3.6.1.2.1.25.3.2.1.1': HOST_RESOURCES_MIB.hrDeviceIndex,
'1.3.6.1.2.1.25.3.2.1.2': HOST_RESOURCES_MIB.hrDeviceType,
'1.3.6.1.2.1.25.3.2.1.3': HOST_RESOURCES_MIB.hrDeviceDescr,
'1.3.6.1.2.1.25.3.2.1.4': HOST_RESOURCES_MIB.hrDeviceID,
'1.3.6.1.2.1.25.3.2.1.5': HOST_RESOURCES_MIB.hrDeviceStatus,
'1.3.6.1.2.1.25.3.2.1.6': HOST_RESOURCES_MIB.hrDeviceErrors,
'1.3.6.1.2.1.25.3.3.1.1': HOST_RESOURCES_MIB.hrProcessorFrwID,
'1.3.6.1.2.1.25.3.3.1.2': HOST_RESOURCES_MIB.hrProcessorLoad,
'1.3.6.1.2.1.25.3.4.1.1': HOST_RESOURCES_MIB.hrNetworkIfIndex,
'1.3.6.1.2.1.25.3.5.1.1': HOST_RESOURCES_MIB.hrPrinterStatus,
'1.3.6.1.2.1.25.3.5.1.2': HOST_RESOURCES_MIB.hrPrinterDetectedErrorState,
'1.3.6.1.2.1.25.3.6.1.1': HOST_RESOURCES_MIB.hrDiskStorageAccess,
'1.3.6.1.2.1.25.3.6.1.2': HOST_RESOURCES_MIB.hrDiskStorageMedia,
'1.3.6.1.2.1.25.3.6.1.3': HOST_RESOURCES_MIB.hrDiskStorageRemoveble,
'1.3.6.1.2.1.25.3.6.1.4': HOST_RESOURCES_MIB.hrDiskStorageCapacity,
'1.3.6.1.2.1.25.3.7.1.1': HOST_RESOURCES_MIB.hrPartitionIndex,
'1.3.6.1.2.1.25.3.7.1.2': HOST_RESOURCES_MIB.hrPartitionLabel,
'1.3.6.1.2.1.25.3.7.1.3': HOST_RESOURCES_MIB.hrPartitionID,
'1.3.6.1.2.1.25.3.7.1.4': HOST_RESOURCES_MIB.hrPartitionSize,
'1.3.6.1.2.1.25.3.7.1.5': HOST_RESOURCES_MIB.hrPartitionFSIndex,
'1.3.6.1.2.1.25.3.8.1.1': HOST_RESOURCES_MIB.hrFSIndex,
'1.3.6.1.2.1.25.3.8.1.2': HOST_RESOURCES_MIB.hrFSMountPoint,
'1.3.6.1.2.1.25.3.8.1.3': HOST_RESOURCES_MIB.hrFSRemoteMountPoint,
'1.3.6.1.2.1.25.3.8.1.4': HOST_RESOURCES_MIB.hrFSType,
'1.3.6.1.2.1.25.3.8.1.5': HOST_RESOURCES_MIB.hrFSAccess,
'1.3.6.1.2.1.25.3.8.1.6': HOST_RESOURCES_MIB.hrFSBootable,
'1.3.6.1.2.1.25.3.8.1.7': HOST_RESOURCES_MIB.hrFSStorageIndex,
'1.3.6.1.2.1.25.3.8.1.8': HOST_RESOURCES_MIB.hrFSLastFullBackupDate,
'1.3.6.1.2.1.25.3.8.1.9': HOST_RESOURCES_MIB.hrFSLastPartialBackupDate,
'1.3.6.1.2.1.25.4.2.1.1': HOST_RESOURCES_MIB.hrSWRunIndex,
'1.3.6.1.2.1.25.4.2.1.2': HOST_RESOURCES_MIB.hrSWRunName,
'1.3.6.1.2.1.25.4.2.1.3': HOST_RESOURCES_MIB.hrSWRunID,
'1.3.6.1.2.1.25.4.2.1.4': HOST_RESOURCES_MIB.hrSWRunPath,
'1.3.6.1.2.1.25.4.2.1.5': HOST_RESOURCES_MIB.hrSWRunParameters,
'1.3.6.1.2.1.25.4.2.1.6': HOST_RESOURCES_MIB.hrSWRunType,
'1.3.6.1.2.1.25.4.2.1.7': HOST_RESOURCES_MIB.hrSWRunStatus,
'1.3.6.1.2.1.25.5.1.1.1': HOST_RESOURCES_MIB.hrSWRunPerfCPU,
'1.3.6.1.2.1.25.5.1.1.2': HOST_RESOURCES_MIB.hrSWRunPerfMem,
'1.3.6.1.2.1.25.6.3.1.1': HOST_RESOURCES_MIB.hrSWInstalledIndex,
'1.3.6.1.2.1.25.6.3.1.2': HOST_RESOURCES_MIB.hrSWInstalledName,
'1.3.6.1.2.1.25.6.3.1.3': HOST_RESOURCES_MIB.hrSWInstalledID,
'1.3.6.1.2.1.25.6.3.1.4': HOST_RESOURCES_MIB.hrSWInstalledType,
'1.3.6.1.2.1.25.6.3.1.5': HOST_RESOURCES_MIB.hrSWInstalledDate,
'1.3.6.1.2.1.25.7.3.1': HOST_RESOURCES_MIB.hrSystemGroup,
'1.3.6.1.2.1.25.7.3.2': HOST_RESOURCES_MIB.hrStorageGroup,
'1.3.6.1.2.1.25.7.3.3': HOST_RESOURCES_MIB.hrDeviceGroup,
'1.3.6.1.2.1.25.7.3.4': HOST_RESOURCES_MIB.hrSWRunGroup,
'1.3.6.1.2.1.25.7.3.5': HOST_RESOURCES_MIB.hrSWRunPerfGroup,
'1.3.6.1.2.1.25.7.3.6': HOST_RESOURCES_MIB.hrSWInstalledGroup,
}
| xiangke/pycopia | mibs/pycopia/mibs/HOST_RESOURCES_MIB_OID.py | Python | lgpl-2.1 | 5,000 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Distutils installer for PyJack
# Test for Jack2
#---------------------------------------------------#
import os
if os.path.exists("/usr/local/include/jack/jack.h"):
path = "/usr/local/include/jack/jack.h"
elif os.path.exists("/usr/include/jack/jack.h"):
path = "/usr/include/jack/jack.h"
else:
print("You don't seem to have the jack headers installed.\nPlease install them first")
exit(-1)
test = open(path).read()
pyjack_macros=[]
if ("jack_get_version_string" in test):
pyjack_macros+=[('JACK2', '1')]
else:
pyjack_macros+=[('JACK1', '1')]
#----------------------------------------------------#
from distutils.core import setup, Extension
import numpy.distutils
numpy_include_dirs = numpy.distutils.misc_util.get_numpy_include_dirs()
setup(
name = "pyjack",
version = "0.5.1",
description = "Python bindings for the Jack Audio Server",
author = "Andrew W. Schmeder, falkTX, IOhannes m zmölnig",
author_email = "[email protected]",
url = "http://sourceforge.net/projects/py-jack",
long_description = '''PyJack is a module written in C which exposes the Jack API to Python.
For information about Jack see http://jackaudio.org. This
enables a Python program to connect to and interact with pro-audio
applications which use the Jack Audio Server''',
license = "GNU LGPL2.1",
ext_modules = [Extension("jack",
["pyjack.c"],
libraries=["jack", "dl"],
include_dirs=numpy_include_dirs,
define_macros=pyjack_macros,
)],
)
| kylerbrown/pyjack | setup.py | Python | lgpl-2.1 | 1,661 |
#!/usr/bin/python
from gi.repository import Gdk
from xml.etree.ElementTree import ElementTree, Element
import re
ESCAPE_PATTERN = re.compile(r'\\u\{([0-9A-Fa-f]+?)\}')
ISO_PATTERN = re.compile(r'[A-E]([0-9]+)')
def parse_single_key(value):
key = Element('key')
uc = 0
if hasattr(__builtins__, 'unichr'):
def unescape(m):
return chr(int(m.group(1), 16))
else:
def unescape(m):
return chr(int(m.group(1), 16))
value = ESCAPE_PATTERN.sub(unescape, value)
if len(value) > 1:
key.set('text', value)
uc = ord(value[0])
keyval = Gdk.unicode_to_keyval(uc)
name = Gdk.keyval_name(keyval)
key.set('name', name)
return key
def convert(source, tree):
root = Element('layout')
for index, keymap in enumerate(tree.iter('keyMap')):
level = Element('level')
rows = {}
root.append(level)
level.set('name', 'level%d' % (index+1))
# FIXME: heuristics here
modifiers = keymap.get('modifiers')
if not modifiers:
mode = 'default'
elif 'shift' in modifiers.split(' ') or 'lock' in modifiers.split(' '):
mode = 'latched'
else:
mode = 'locked'
level.set('mode', mode)
for _map in keymap.iter('map'):
value = _map.get('to')
key = parse_single_key(value)
iso = _map.get('iso')
if not ISO_PATTERN.match(iso):
sys.stderr.write('invalid ISO key name: %s\n' % iso)
continue
if not iso[0] in rows:
rows[iso[0]] = []
rows[iso[0]].append((int(iso[1:]), key))
# add attribute to certain keys
name = key.get('name')
if name == 'space':
key.set('align', 'center')
key.set('width', '6.0')
if name in ('space', 'BackSpace'):
key.set('repeatable', 'yes')
# add subkeys
longPress = _map.get('longPress')
if longPress:
for value in longPress.split(' '):
subkey = parse_single_key(value)
key.append(subkey)
for k, v in sorted(list(rows.items()), key=lambda x: x[0], reverse=True):
row = Element('row')
for key in sorted(v, key=lambda x: x):
row.append(key[1])
level.append(row)
return root
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print("supply a CLDR keyboard file")
sys.exit(1)
source = sys.argv[-1]
itree = ElementTree()
itree.parse(source)
root = convert(source, itree)
indent(root)
otree = ElementTree(root)
if hasattr(sys.stdout, 'buffer'):
out = sys.stdout.buffer
else:
out = sys.stdout
otree.write(out, xml_declaration=True, encoding='UTF-8')
| GNOME/caribou | tools/convert_cldr.py | Python | lgpl-2.1 | 3,381 |
# coding: utf-8
import os
import shutil
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_PREFIX, \
DOWNLOAD_TMP_FILE_SUFFIX
from nxdrive.engine.processor import Processor as OldProcessor
from nxdrive.logging_config import get_logger
log = get_logger(__name__)
class Processor(OldProcessor):
def __init__(self, engine, item_getter, name=None):
super(Processor, self).__init__(engine, item_getter, name)
def acquire_state(self, row_id):
log.warning("acquire...")
result = super(Processor, self).acquire_state(row_id)
if result is not None and self._engine.get_local_watcher().is_pending_scan(result.local_parent_path):
self._dao.release_processor(self._thread_id)
# Postpone pair for watcher delay
self._engine.get_queue_manager().postpone_pair(result, self._engine.get_local_watcher().get_scan_delay())
return None
log.warning("Acquired: %r", result)
return result
def _get_partial_folders(self):
local_client = self._engine.get_local_client()
if not local_client.exists('/.partials'):
local_client.make_folder('/', '.partials')
return local_client.abspath('/.partials')
def _download_content(self, local_client, remote_client, doc_pair, file_path):
# TODO Should share between threads
file_out = os.path.join(self._get_partial_folders(), DOWNLOAD_TMP_FILE_PREFIX +
doc_pair.remote_digest + str(self._thread_id) + DOWNLOAD_TMP_FILE_SUFFIX)
# Check if the file is already on the HD
pair = self._dao.get_valid_duplicate_file(doc_pair.remote_digest)
if pair:
shutil.copy(local_client.abspath(pair.local_path), file_out)
return file_out
tmp_file = remote_client.stream_content( doc_pair.remote_ref, file_path,
parent_fs_item_id=doc_pair.remote_parent_ref, file_out=file_out)
self._update_speed_metrics()
return tmp_file
def _update_remotely(self, doc_pair, local_client, remote_client, is_renaming):
log.warning("_update_remotely")
os_path = local_client.abspath(doc_pair.local_path)
if is_renaming:
new_os_path = os.path.join(os.path.dirname(os_path), doc_pair.remote_name)
log.debug("Replacing local file '%s' by '%s'.", os_path, new_os_path)
else:
new_os_path = os_path
log.debug("Updating content of local file '%s'.", os_path)
tmp_file = self._download_content(local_client, remote_client, doc_pair, new_os_path)
# Delete original file and rename tmp file
local_client.delete_final(doc_pair.local_path)
rel_path = local_client.get_path(tmp_file)
local_client.set_remote_id(rel_path, doc_pair.remote_ref)
# Move rename
updated_info = local_client.move(rel_path, doc_pair.local_parent_path, doc_pair.remote_name)
doc_pair.local_digest = updated_info.get_digest()
self._dao.update_last_transfer(doc_pair.id, "download")
self._refresh_local_state(doc_pair, updated_info)
def _create_remotely(self, local_client, remote_client, doc_pair, parent_pair, name):
local_parent_path = parent_pair.local_path
# TODO Shared this locking system / Can have concurrent lock
self._unlock_readonly(local_client, local_parent_path)
tmp_file = None
try:
if doc_pair.folderish:
log.debug("Creating local folder '%s' in '%s'", name,
local_client.abspath(parent_pair.local_path))
# Might want do temp name to original
path = local_client.make_folder(local_parent_path, name)
else:
path, os_path, name = local_client.get_new_file(local_parent_path,
name)
tmp_file = self._download_content(local_client, remote_client, doc_pair, os_path)
log.debug("Creating local file '%s' in '%s'", name,
local_client.abspath(parent_pair.local_path))
# Move file to its folder - might want to split it in two for events
local_client.move(local_client.get_path(tmp_file),local_parent_path, name)
self._dao.update_last_transfer(doc_pair.id, "download")
finally:
self._lock_readonly(local_client, local_parent_path)
# Clean .nxpart if needed
if tmp_file is not None and os.path.exists(tmp_file):
os.remove(tmp_file)
return path
| ssdi-drive/nuxeo-drive | nuxeo-drive-client/nxdrive/engine/next/processor.py | Python | lgpl-2.1 | 4,670 |
import os
import subprocess
from pathlib import Path
import pyinstaller_versionfile
import tomli
packaging_path = Path(__file__).resolve().parent
def get_version() -> str:
project_dir = Path(__file__).resolve().parent.parent
f = project_dir / "pyproject.toml"
return str(tomli.loads(f.read_text())["tool"]["poetry"]["version"])
def make_gaphor_script():
pyproject_toml = packaging_path.parent / "pyproject.toml"
with open(pyproject_toml, "rb") as f:
toml = tomli.load(f)
gaphor_script = packaging_path / "gaphor-script.py"
with open(gaphor_script, "w") as file:
# https://github.com/pyinstaller/pyinstaller/issues/6100
# On one Windows computer, PyInstaller was adding a ; to
# end of the path, this removes it if it exists
file.write("import os\n")
file.write("if os.environ['PATH'][-1] == ';':\n")
file.write(" os.environ['PATH'] = os.environ['PATH'][:-1]\n")
# Check for and remove two semicolons in path
file.write("os.environ['PATH'] = os.environ['PATH'].replace(';;', ';')\n")
plugins = toml["tool"]["poetry"]["plugins"]
for cat in plugins.values():
for entrypoint in cat.values():
file.write(f"import {entrypoint.split(':')[0]}\n")
file.write("from gaphor.ui import main\n")
file.write("import sys\n")
file.write("main(sys.argv)\n")
def make_file_version_info():
win_packaging_path = packaging_path / "windows"
metadata = win_packaging_path / "versionfile_metadata.yml"
file_version_out = win_packaging_path / "file_version_info.txt"
version = get_version()
if "dev" in version:
version = version[: version.rfind(".dev")]
pyinstaller_versionfile.create_versionfile_from_input_file(
output_file=file_version_out,
input_file=metadata,
version=version,
)
def make_pyinstaller():
os.chdir(packaging_path)
subprocess.run(["pyinstaller", "-y", "gaphor.spec"])
| amolenaar/gaphor | packaging/make-script.py | Python | lgpl-2.1 | 2,017 |
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path, save_data, contains_word
import os, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_dataraster_coloring():
srs = '+init=epsg:32630'
lyr = mapnik.Layer('dataraster')
lyr.datasource = mapnik.Gdal(
file = '../data/raster/dataraster.tif',
band = 1,
)
lyr.srs = srs
_map = mapnik.Map(256,256, srs)
style = mapnik.Style()
rule = mapnik.Rule()
sym = mapnik.RasterSymbolizer()
# Assigning a colorizer to the RasterSymbolizer tells the later
# that it should use it to colorize the raw data raster
sym.colorizer = mapnik.RasterColorizer(mapnik.COLORIZER_DISCRETE, mapnik.Color("transparent"))
for value, color in [
( 0, "#0044cc"),
( 10, "#00cc00"),
( 20, "#ffff00"),
( 30, "#ff7f00"),
( 40, "#ff0000"),
( 50, "#ff007f"),
( 60, "#ff00ff"),
( 70, "#cc00cc"),
( 80, "#990099"),
( 90, "#660066"),
( 200, "transparent"),
]:
sym.colorizer.add_stop(value, mapnik.Color(color))
rule.symbols.append(sym)
style.rules.append(rule)
_map.append_style('foo', style)
lyr.styles.append('foo')
_map.layers.append(lyr)
_map.zoom_to_box(lyr.envelope())
im = mapnik.Image(_map.width,_map.height)
mapnik.render(_map, im)
# save a png somewhere so we can see it
save_data('test_dataraster_coloring.png', im.tostring('png'))
imdata = im.tostring()
# we have some values in the [20,30) interval so check that they're colored
assert contains_word('\xff\xff\x00\xff', imdata)
def test_dataraster_query_point():
srs = '+init=epsg:32630'
lyr = mapnik.Layer('dataraster')
lyr.datasource = mapnik.Gdal(
file = '../data/raster/dataraster.tif',
band = 1,
)
lyr.srs = srs
_map = mapnik.Map(256,256, srs)
_map.layers.append(lyr)
# point inside raster extent with valid data
x, y = 427417, 4477517
features = _map.query_point(0,x,y).features
assert len(features) == 1
feat = features[0]
center = feat.envelope().center()
assert center.x==x and center.y==y, center
value = feat['value']
assert value == 21.0, value
# point outside raster extent
features = _map.query_point(0,-427417,4477517).features
assert len(features) == 0
# point inside raster extent with nodata
features = _map.query_point(0,126850,4596050).features
assert len(features) == 0
def test_load_save_map():
map = mapnik.Map(256,256)
in_map = "../data/good_maps/raster_symbolizer.xml"
mapnik.load_map(map, in_map)
out_map = mapnik.save_map_to_string(map)
assert 'RasterSymbolizer' in out_map
assert 'RasterColorizer' in out_map
assert 'stop' in out_map
def test_raster_with_alpha_blends_correctly_with_background():
WIDTH = 500
HEIGHT = 500
map = mapnik.Map(WIDTH, HEIGHT)
WHITE = mapnik.Color(255, 255, 255)
map.background = WHITE
style = mapnik.Style()
rule = mapnik.Rule()
symbolizer = mapnik.RasterSymbolizer()
#XXX: This fixes it, see http://trac.mapnik.org/ticket/759#comment:3
# (and remove comment when this test passes)
#symbolizer.scaling="bilinear_old"
rule.symbols.append(symbolizer)
style.rules.append(rule)
map.append_style('raster_style', style)
map_layer = mapnik.Layer('test_layer')
filepath = '../data/raster/white-alpha.png'
map_layer.datasource = mapnik.Gdal(file=filepath)
map_layer.styles.append('raster_style')
map.layers.append(map_layer)
map.zoom_all()
mim = mapnik.Image(WIDTH, HEIGHT)
mapnik.render(map, mim)
save_data('test_raster_with_alpha_blends_correctly_with_background.png',
mim.tostring('png'))
imdata = mim.tostring()
# All white is expected
assert contains_word('\xff\xff\xff\xff', imdata)
def test_raster_warping():
lyrSrs = "+init=epsg:32630"
mapSrs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
lyr = mapnik.Layer('dataraster', lyrSrs)
lyr.datasource = mapnik.Gdal(
file = '../data/raster/dataraster.tif',
band = 1,
)
sym = mapnik.RasterSymbolizer()
sym.colorizer = mapnik.RasterColorizer(mapnik.COLORIZER_DISCRETE, mapnik.Color(255,255,0))
rule = mapnik.Rule()
rule.symbols.append(sym)
style = mapnik.Style()
style.rules.append(rule)
_map = mapnik.Map(256,256, mapSrs)
_map.append_style('foo', style)
lyr.styles.append('foo')
_map.layers.append(lyr)
prj_trans = mapnik.ProjTransform(mapnik.Projection(mapSrs),
mapnik.Projection(lyrSrs))
_map.zoom_to_box(prj_trans.backward(lyr.envelope()))
im = mapnik.Image(_map.width,_map.height)
mapnik.render(_map, im)
# save a png somewhere so we can see it
save_data('test_raster_warping.png', im.tostring('png'))
imdata = im.tostring()
assert contains_word('\xff\xff\x00\xff', imdata)
def test_raster_warping_does_not_overclip_source():
lyrSrs = "+init=epsg:32630"
mapSrs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
lyr = mapnik.Layer('dataraster', lyrSrs)
lyr.datasource = mapnik.Gdal(
file = '../data/raster/dataraster.tif',
band = 1,
)
sym = mapnik.RasterSymbolizer()
sym.colorizer = mapnik.RasterColorizer(mapnik.COLORIZER_DISCRETE, mapnik.Color(255,255,0))
rule = mapnik.Rule()
rule.symbols.append(sym)
style = mapnik.Style()
style.rules.append(rule)
_map = mapnik.Map(256,256, mapSrs)
_map.background=mapnik.Color('white')
_map.append_style('foo', style)
lyr.styles.append('foo')
_map.layers.append(lyr)
_map.zoom_to_box(mapnik.Box2d(3,42,4,43))
im = mapnik.Image(_map.width,_map.height)
mapnik.render(_map, im)
# save a png somewhere so we can see it
save_data('test_raster_warping_does_not_overclip_source.png',
im.tostring('png'))
assert im.view(0,200,1,1).tostring()=='\xff\xff\x00\xff'
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
| mojodna/debian-mapnik | tests/python_tests/raster_symbolizer_test.py | Python | lgpl-2.1 | 6,314 |
# -*- coding: UTF-8 -*-
from datetime import date
import re
import pytest
from pyopenmensa.feed import LazyBuilder
@pytest.fixture
def canteen():
return LazyBuilder()
def test_date_converting(canteen):
day = date(2013, 3, 7)
assert canteen.dayCount() == 0
canteen.setDayClosed('2013-03-07')
assert canteen.dayCount() == 1
canteen.setDayClosed(day)
assert canteen.dayCount() == 1
canteen.setDayClosed('07.03.2013')
assert canteen.dayCount() == 1
def test_has_meals_for(canteen):
day = date(2013, 3, 7)
assert canteen.hasMealsFor(day) is False
canteen._days[day] = {'Hausgericht': ('Gulash', [], {})}
assert canteen.hasMealsFor(day) is True
canteen.setDayClosed(day)
assert canteen.hasMealsFor(day) is False
def test_add_meal(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Gulasch')
assert canteen.hasMealsFor(day)
def test_to_long_meal_name(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Y'*251)
canteen.hasMealsFor(day)
def test_caseinsensitive_notes(canteen):
day = date(2013, 3, 7)
canteen.legendKeyFunc = lambda v: v.lower()
canteen.setLegendData(legend={'f': 'Note'})
canteen.addMeal(day, 'Test', 'Essen(F)')
assert canteen._days[day]['Test'][0] == ('Essen', ['Note'], {})
def test_notes_regex(canteen):
day = date(2013, 3, 7)
canteen.extra_regex = re.compile('_([0-9]{1,3})_(?:: +)?', re.UNICODE)
canteen.setLegendData(legend={'2': 'Found Note'})
canteen.addMeal(day, 'Test', '_2_: Essen _a_, _2,2_, (2)')
assert canteen._days[day]['Test'][0] == ('Essen _a_, _2,2_, (2)', ['Found Note'], {})
| mswart/pyopenmensa | tests/feed/test_lazy_canteen.py | Python | lgpl-3.0 | 1,685 |
'''
Puck: FreeBSD virtualization guest configuration server
Copyright (C) 2011 The Hotel Communication Network inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os.path
import cherrypy
from libs.controller import *
import models
from models import Users
class RootController(Controller):
crumbs = [Crumb("/", "Home")]
def __init__(self, lookup):
Controller.__init__(self, lookup)
self._lookup = lookup
self._routes = {}
@cherrypy.expose
@cherrypy.tools.myauth()
def index(self):
return self.render("index.html", self.crumbs[:-1])
@cherrypy.expose
def login(self, **post):
if post:
self._login(post)
return self.render("login.html", self.crumbs[:-1])
@cherrypy.expose
def logout(self, **post):
cherrypy.session.delete()
raise cherrypy.HTTPRedirect("/login")
def add(self, route, cls):
self._routes[route] = cls
def load(self):
[setattr(self, route, self._routes[route](self._lookup)) for route in self._routes]
def _login(self, post):
fields = ['user.username', 'user.password']
for f in fields:
if not f in post:
cherrypy.session['flash'] = "Invalid form data."
return False
hash_password = Users.hash_password(post['user.password'])
user = Users.first(username=post['user.username'], password=hash_password)
if not user:
cherrypy.session['flash'] = 'Invalid username or password.'
return False
creds = user.generate_auth()
cherrypy.session['user.id'] = user.id
cherrypy.session['user.group'] = user.user_group
cherrypy.session['credentials'] = creds
raise cherrypy.HTTPRedirect('/index')
| masom/Puck | server/controllers/root.py | Python | lgpl-3.0 | 2,450 |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Math.Vector import Vector
from UM.Math.Float import Float
class Plane:
"""Plane representation using normal and distance."""
def __init__(self, normal = Vector(), distance = 0.0):
super().__init__()
self._normal = normal
self._distance = distance
@property
def normal(self):
return self._normal
@property
def distance(self):
return self._distance
def intersectsRay(self, ray):
w = ray.origin - (self._normal * self._distance)
nDotR = self._normal.dot(ray.direction)
nDotW = -self._normal.dot(w)
if Float.fuzzyCompare(nDotR, 0.0):
return False
t = nDotW / nDotR
if t < 0:
return False
return t
def __repr__(self):
return "Plane(normal = {0}, distance = {1})".format(self._normal, self._distance)
| Ultimaker/Uranium | UM/Math/Plane.py | Python | lgpl-3.0 | 975 |
from __future__ import absolute_import
import logging
import os
import sys
import datetime
import psutil
from six import StringIO
from twisted.web import http, resource
from Tribler.Core.Utilities.instrumentation import WatchDog
import Tribler.Core.Utilities.json_util as json
HAS_MELIAE = True
try:
from meliae import scanner
except ImportError:
HAS_MELIAE = False
class MemoryDumpBuffer(StringIO):
"""
Meliae expects its file handle to support write(), flush() and __call__().
The StringIO class does not support __call__(), therefore we provide this subclass.
"""
def __call__(self, s):
StringIO.write(self, s)
class DebugEndpoint(resource.Resource):
"""
This endpoint is responsible for handing requests regarding debug information in Tribler.
"""
def __init__(self, session):
resource.Resource.__init__(self)
child_handler_dict = {"circuits": DebugCircuitsEndpoint, "open_files": DebugOpenFilesEndpoint,
"open_sockets": DebugOpenSocketsEndpoint, "threads": DebugThreadsEndpoint,
"cpu": DebugCPUEndpoint, "memory": DebugMemoryEndpoint,
"log": DebugLogEndpoint, "profiler": DebugProfilerEndpoint}
for path, child_cls in child_handler_dict.iteritems():
self.putChild(path, child_cls(session))
class DebugCircuitsEndpoint(resource.Resource):
"""
This class handles requests regarding the tunnel community debug information.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
self.putChild("slots", DebugCircuitSlotsEndpoint(session))
def render_GET(self, request):
"""
.. http:get:: /debug/circuits
A GET request to this endpoint returns information about the built circuits in the tunnel community.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/circuits
**Example response**:
.. sourcecode:: javascript
{
"circuits": [{
"id": 1234,
"state": "EXTENDING",
"goal_hops": 4,
"bytes_up": 45,
"bytes_down": 49,
"created": 1468176257,
"hops": [{
"host": "unknown"
}, {
"host": "39.95.147.20:8965"
}],
...
}, ...]
}
"""
tunnel_community = self.session.lm.tunnel_community
if not tunnel_community:
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error": "tunnel community not found"})
circuits_json = []
for circuit_id, circuit in tunnel_community.circuits.iteritems():
item = {'id': circuit_id, 'state': str(circuit.state), 'goal_hops': circuit.goal_hops,
'bytes_up': circuit.bytes_up, 'bytes_down': circuit.bytes_down, 'created': circuit.creation_time}
hops_array = []
for hop in circuit.hops:
hops_array.append({'host': 'unknown' if 'UNKNOWN HOST' in hop.host else '%s:%s' % (hop.host, hop.port)})
item['hops'] = hops_array
circuits_json.append(item)
return json.dumps({'circuits': circuits_json})
class DebugCircuitSlotsEndpoint(resource.Resource):
"""
This class handles requests for information about slots in the tunnel overlay.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/circuits/slots
A GET request to this endpoint returns information about the slots in the tunnel overlay.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/circuits/slots
**Example response**:
.. sourcecode:: javascript
{
"open_files": [{
"path": "path/to/open/file.txt",
"fd": 33,
}, ...]
}
"""
return json.dumps({
"slots": {
"random": self.session.lm.tunnel_community.random_slots,
"competing": self.session.lm.tunnel_community.competing_slots
}
})
class DebugOpenFilesEndpoint(resource.Resource):
"""
This class handles request for information about open files.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/open_files
A GET request to this endpoint returns information about files opened by Tribler.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/open_files
**Example response**:
.. sourcecode:: javascript
{
"open_files": [{
"path": "path/to/open/file.txt",
"fd": 33,
}, ...]
}
"""
my_process = psutil.Process()
return json.dumps({
"open_files": [{"path": open_file.path, "fd": open_file.fd} for open_file in my_process.open_files()]})
class DebugOpenSocketsEndpoint(resource.Resource):
"""
This class handles request for information about open sockets.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/open_sockets
A GET request to this endpoint returns information about open sockets.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/openfiles
**Example response**:
.. sourcecode:: javascript
{
"open_sockets": [{
"family": 2,
"status": "ESTABLISHED",
"laddr": "0.0.0.0:0",
"raddr": "0.0.0.0:0",
"type": 30
}, ...]
}
"""
my_process = psutil.Process()
sockets = []
for open_socket in my_process.connections():
sockets.append({
"family": open_socket.family,
"status": open_socket.status,
"laddr": ("%s:%d" % open_socket.laddr) if open_socket.laddr else "-",
"raddr": ("%s:%d" % open_socket.raddr) if open_socket.raddr else "-",
"type": open_socket.type
})
return json.dumps({"open_sockets": sockets})
class DebugThreadsEndpoint(resource.Resource):
"""
This class handles request for information about threads.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/threads
A GET request to this endpoint returns information about running threads.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/threads
**Example response**:
.. sourcecode:: javascript
{
"threads": [{
"thread_id": 123456,
"thread_name": "my_thread",
"frames": ["my_frame", ...]
}, ...]
}
"""
watchdog = WatchDog()
return json.dumps({"threads": watchdog.get_threads_info()})
class DebugCPUEndpoint(resource.Resource):
"""
This class handles request for information about CPU.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.putChild("history", DebugCPUHistoryEndpoint(session))
class DebugCPUHistoryEndpoint(resource.Resource):
"""
This class handles request for information about CPU usage history.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/cpu/history
A GET request to this endpoint returns information about CPU usage history in the form of a list.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/cpu/history
**Example response**:
.. sourcecode:: javascript
{
"cpu_history": [{
"time": 1504015291214,
"cpu": 3.4,
}, ...]
}
"""
history = self.session.lm.resource_monitor.get_cpu_history_dict() if self.session.lm.resource_monitor else {}
return json.dumps({"cpu_history": history})
class DebugMemoryEndpoint(resource.Resource):
"""
This class handles request for information about memory.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.putChild("history", DebugMemoryHistoryEndpoint(session))
if HAS_MELIAE:
self.putChild("dump", DebugMemoryDumpEndpoint(session))
class DebugMemoryHistoryEndpoint(resource.Resource):
"""
This class handles request for information about memory usage history.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/memory/history
A GET request to this endpoint returns information about memory usage history in the form of a list.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/memory/history
**Example response**:
.. sourcecode:: javascript
{
"memory_history": [{
"time": 1504015291214,
"mem": 324324,
}, ...]
}
"""
history = self.session.lm.resource_monitor.get_memory_history_dict() if self.session.lm.resource_monitor else {}
return json.dumps({"memory_history": history})
class DebugMemoryDumpEndpoint(resource.Resource):
"""
This class handles request for dumping memory contents.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/memory/dump
A GET request to this endpoint returns a Meliae-compatible dump of the memory contents.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/memory/dump
**Example response**:
The content of the memory dump file.
"""
content = ""
if sys.platform == "win32":
# On Windows meliae (especially older versions) segfault on writing to file
dump_buffer = MemoryDumpBuffer()
try:
scanner.dump_all_objects(dump_buffer)
except OverflowError as e:
# https://bugs.launchpad.net/meliae/+bug/569947
logging.error("meliae dump failed (your version may be too old): %s", str(e))
content = dump_buffer.getvalue()
dump_buffer.close()
else:
# On other platforms, simply writing to file is much faster
dump_file_path = os.path.join(self.session.config.get_state_dir(), 'memory_dump.json')
scanner.dump_all_objects(dump_file_path)
with open(dump_file_path, 'r') as dump_file:
content = dump_file.read()
date_str = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
request.setHeader(b'content-type', 'application/json')
request.setHeader(b'Content-Disposition', 'attachment; filename=tribler_memory_dump_%s.json' % date_str)
return content
class DebugLogEndpoint(resource.Resource):
"""
This class handles the request for displaying the logs.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/log?process=<core|gui>&max_lines=<max_lines>
A GET request to this endpoint returns a json with content of core or gui log file & max_lines requested
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/log?process=core&max_lines=5
**Example response**:
A JSON with content of the log file & max_lines requested, for eg.
{
"max_lines" : 5,
"content" :"INFO 1506675301.76 sqlitecachedb:181 Reading database version...
INFO 1506675301.76 sqlitecachedb:185 Current database version is 29
INFO 1506675301.76 sqlitecachedb:203 Beginning the first transaction...
INFO 1506675301.76 upgrade:93 tribler is in the latest version,...
INFO 1506675302.08 LaunchManyCore:254 lmc: Starting Dispersy..."
}
"""
# First, flush all the logs to make sure it is written to file
for handler in logging.getLogger().handlers:
handler.flush()
# Get the location of log file
param_process = request.args['process'][0] if request.args['process'] else 'core'
log_file_name = os.path.join(self.session.config.get_log_dir(), 'tribler-%s-info.log' % param_process)
# Default response
response = {'content': '', 'max_lines': 0}
# Check if log file exists and return last requested 'max_lines' of log
if os.path.exists(log_file_name):
try:
max_lines = int(request.args['max_lines'][0])
with open(log_file_name, 'r') as log_file:
response['content'] = self.tail(log_file, max_lines)
response['max_lines'] = max_lines
except ValueError:
with open(log_file_name, 'r') as log_file:
response['content'] = self.tail(log_file, 100) # default 100 lines
response['max_lines'] = 0
return json.dumps(response)
def tail(self, file_handler, lines=1):
"""Tail a file and get X lines from the end"""
# place holder for the lines found
lines_found = []
byte_buffer = 1024
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
file_handler.seek(block_counter * byte_buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
file_handler.seek(0)
lines_found = file_handler.readlines()
break
lines_found = file_handler.readlines()
# we found enough lines, get out
if len(lines_found) > lines:
break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
return ''.join(lines_found[-lines:])
class DebugProfilerEndpoint(resource.Resource):
"""
This class handles requests for the profiler.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/profiler
A GET request to this endpoint returns information about the state of the profiler.
This state is either STARTED or STOPPED.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/profiler
**Example response**:
.. sourcecode:: javascript
{
"state": "STARTED"
}
"""
monitor_enabled = self.session.config.get_resource_monitor_enabled()
state = "STARTED" if (monitor_enabled and self.session.lm.resource_monitor.profiler_running) else "STOPPED"
return json.dumps({"state": state})
def render_PUT(self, request):
"""
.. http:put:: /debug/profiler
A PUT request to this endpoint starts the profiler.
**Example request**:
.. sourcecode:: none
curl -X PUT http://localhost:8085/debug/profiler
**Example response**:
.. sourcecode:: javascript
{
"success": "true"
}
"""
self.session.lm.resource_monitor.start_profiler()
return json.dumps({"success": True})
def render_DELETE(self, request):
"""
.. http:delete:: /debug/profiler
A PUT request to this endpoint stops the profiler.
**Example request**:
.. sourcecode:: none
curl -X DELETE http://localhost:8085/debug/profiler
**Example response**:
.. sourcecode:: javascript
{
"success": "true"
}
"""
file_path = self.session.lm.resource_monitor.stop_profiler()
return json.dumps({"success": True, "profiler_file": file_path})
| Captain-Coder/tribler | Tribler/Core/Modules/restapi/debug_endpoint.py | Python | lgpl-3.0 | 18,160 |
# RABDAM
# Copyright (C) 2020 Garman Group, University of Oxford
# This file is part of RABDAM.
# RABDAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# RABDAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# An outer layer to the pipeline scripts. Depending upon the flags specified
# in the command line input, this script will run either the complete / a
# subsection of the pipeline.
# python -m unittest tests/test_bnet_calculation.py
import os
import unittest
from rabdam.Subroutines.CalculateBDamage import rabdam
class TestClass(unittest.TestCase):
def test_bnet_values(self):
"""
Checks that RABDAM calculates expected Bnet values for a selection of
PDB entries
"""
import os
import requests
import shutil
import pandas as pd
exp_bnet_dict = {'2O2X': 3.300580966,
'4EZF': 3.193514624,
'4MWU': 3.185476349,
'4MOV': 3.144130191,
'3NBM': 3.141821366,
'1GW1': 3.105626889,
'4EWE': 3.08241654,
'3F1P': 3.060628186,
'3IV0': 3.054440912,
'4ZWV': 3.017330004,
'1T2I': 3.004830448,
'3LX3': 2.962424378,
'5P4N': 2.916582486,
'5MAA': 2.91219352,
'1E73': 2.850203561,
'1YKI': 2.797739814,
'4WA4': 2.720540993,
'3V2J': 2.669599635,
'3CUI': 2.666605946,
'4XLA': 2.624366813,
'4DUK': 2.854175949,
'3V38': 2.500984382,
'1VJF': 2.496374854,
'5IO2': 2.467587911,
'5CM7': 2.44869046,
'2EHU': 2.448290431,
'5JOW': 2.439619791,
'2C54': 2.379224017,
'4GZK': 2.349526276,
'2NUM': 2.326904729,
'5FYO': 2.319618192,
'4ODK': 2.304354685,
'6EV4': 2.302433369,
'5P5U': 2.288966997,
'3VHV': 2.285877338,
'4JCK': 2.27150332,
'5EKM': 2.258574341,
'3H4O': 2.231817033,
'5JIG': 2.247664542,
'2H5S': 2.206850226,
'4M5I': 2.169405117,
'1Y59': 2.138787261,
'4C45': 2.131256276,
'5F90': 2.11287042,
'4NI3': 2.088735516,
'4Z6N': 2.083743584,
'5M2G': 2.06566475,
'5ER6': 2.05707889,
'4R0X': 2.006996308,
'5LLG': 1.981501196,
'1FCX': 1.976990791,
'5M90': 1.96542442,
'3NJK': 1.955577757,
'5CWG': 1.949818624,
'2P7O': 1.921138477,
'5SZC': 1.962633169,
'2I0K': 1.901555841,
'4RDK': 1.886900766,
'5MA0': 1.877853781,
'4C1E': 1.877575448,
'5EJ3': 1.875439995,
'2WUG': 1.87334953,
'4MPY': 1.842338963,
'4OTZ': 1.835716553,
'4IOO': 1.828349113,
'4Z6O': 1.800528596,
'4ZOT': 1.799163077,
'5PHB': 1.783879628,
'3UJC': 1.747894856,
'4FR8': 1.738876799,
'5PH8': 1.736825591,
'5UPM': 1.736663507,
'3MWX': 1.733132746,
'4KDX': 1.729650659,
'3WH5': 1.717975404,
'4P04': 1.714107945,
'5Y90': 1.695283923,
'4H31': 1.674014779,
'5HJE': 1.662869176,
'4YKK': 1.653894709,
'1Q0F': 1.646880018,
'5JP6': 1.629246723,
'1X7Y': 1.618817315,
'4ZC8': 1.60606196,
'5EPE': 1.604407869,
'4ZS9': 1.582398487,
'5VNX': 1.543824945,
'5IHV': 1.542271159,
'5J90': 1.526469901,
'4K6W': 1.520316883,
'3PBC': 1.512738972,
'5CMB': 1.504620762,
'4PSC': 1.491796934,
'5UPN': 1.477252783,
'4XLZ': 1.473298738,
'4XGY': 1.465885549,
'5M4G': 1.400219288,
'3A54': 1.319587779}
if not os.path.isdir('tests/temp_files/'):
os.mkdir('tests/temp_files/')
for code, exp_bnet in exp_bnet_dict.items():
# Checks cif file
cif_text = requests.get('https://files.rcsb.org/view/%s.cif' % code)
with open('tests/temp_files/%s.cif' % code, 'w') as f:
f.write(cif_text.text)
rabdam_run = rabdam(
pathToInput='%s/tests/temp_files/%s.cif' % (os.getcwd(), code),
outputDir='%s/tests/temp_files/' % os.getcwd(),
batchRun=True,
overwrite=True,
PDT=7,
windowSize=0.02,
protOrNA='protein',
HETATM=False,
removeAtoms=[],
addAtoms=[],
highlightAtoms=[],
createOrigpdb=False,
createAUpdb=False,
createUCpdb=False,
createAUCpdb=False,
createTApdb=False
)
rabdam_run.rabdam_dataframe(test=True)
rabdam_run.rabdam_analysis(
output_options=['csv', 'pdb', 'cif', 'kde', 'bnet', 'summary']
)
bnet_df = pd.read_pickle('tests/temp_files/Logfiles/Bnet_protein.pkl')
act_bnet_cif = bnet_df['Bnet'].tolist()[-1]
self.assertEqual(round(exp_bnet, 7), round(act_bnet_cif, 7))
os.remove('tests/temp_files/%s.cif' % code)
os.remove('tests/temp_files/Logfiles/Bnet_protein.pkl')
# Checks PDB file
pdb_text = requests.get('https://files.rcsb.org/view/%s.pdb' % code)
with open('tests/temp_files/%s.pdb' % code, 'w') as f:
f.write(pdb_text.text)
rabdam_run = rabdam(
pathToInput='%s/tests/temp_files/%s.pdb' % (os.getcwd(), code),
outputDir='%s/tests/temp_files/' % os.getcwd(),
batchRun=True,
overwrite=True,
PDT=7,
windowSize=0.02,
protOrNA='protein',
HETATM=False,
removeAtoms=[],
addAtoms=[],
highlightAtoms=[],
createOrigpdb=False,
createAUpdb=False,
createUCpdb=False,
createAUCpdb=False,
createTApdb=False
)
rabdam_run.rabdam_dataframe(test=True)
rabdam_run.rabdam_analysis(
output_options=['csv', 'pdb', 'cif', 'kde', 'bnet', 'summary']
)
bnet_df = pd.read_pickle(
'%s/tests/temp_files/Logfiles/Bnet_protein.pkl' % os.getcwd()
)
act_bnet_pdb = bnet_df['Bnet'].tolist()[-1]
self.assertEqual(round(exp_bnet, 7), round(act_bnet_pdb, 7))
os.remove('tests/temp_files/%s.pdb' % code)
os.remove('tests/temp_files/Logfiles/Bnet_protein.pkl')
shutil.rmtree('tests/temp_files/')
| GarmanGroup/RABDAM | tests/test_bnet_calculation.py | Python | lgpl-3.0 | 8,856 |
from cqparts.constraint import Mate, Coincident
from .base import Fastener
from ..screws import Screw
from ..utils import VectorEvaluator, Selector, Applicator
class ScrewFastener(Fastener):
"""
Screw fastener assembly.
Example usage can be found here: :ref:`cqparts_fasteners.built-in.screw`
"""
Evaluator = VectorEvaluator
class Selector(Selector):
ratio = 0.8
def get_components(self):
end_effect = self.evaluator.eval[-1]
end_point = end_effect.start_point + (end_effect.end_point - end_effect.start_point) * self.ratio
return {'screw': Screw(
head=('countersunk', {
'diameter': 9.5,
'height': 3.5,
}),
neck_length=abs(self.evaluator.eval[-1].start_point - self.evaluator.eval[0].start_point),
# only the length after the neck is threaded
length=abs(end_point - self.evaluator.eval[0].start_point),
#length=abs(self.evaluator.eval[-1].end_point - self.evaluator.eval[0].start_point),
)}
def get_constraints(self):
# bind fastener relative to its anchor; the part holding it in.
anchor_part = self.evaluator.eval[-1].part # last effected part
return [Coincident(
self.components['screw'].mate_origin,
Mate(anchor_part, self.evaluator.eval[0].start_coordsys - anchor_part.world_coords)
)]
class Applicator(Applicator):
def apply_alterations(self):
screw = self.selector.components['screw']
cutter = screw.make_cutter() # cutter in local coords
for effect in self.evaluator.eval:
relative_coordsys = screw.world_coords - effect.part.world_coords
local_cutter = relative_coordsys + cutter
effect.part.local_obj = effect.part.local_obj.cut(local_cutter)
| jmwright/cadquery-freecad-module | ThirdParty/cqparts_fasteners/fasteners/screw.py | Python | lgpl-3.0 | 1,975 |
VERSION = (1, 2, 21)
def get_version():
return '%d.%d.%d'%VERSION
__author__ = 'Marinho Brandao'
#__date__ = '$Date: 2008-07-26 14:04:51 -0300 (Ter, 26 Fev 2008) $'[7:-2]
__license__ = 'GNU Lesser General Public License (LGPL)'
__url__ = 'http://django-plus.googlecode.com'
__version__ = get_version()
def get_dynamic_template(slug, context=None):
from models import DynamicTemplate
return DynamicTemplate.objects.get(slug=slug).render(context or {})
| marinho/django-plus | djangoplus/__init__.py | Python | lgpl-3.0 | 468 |
# Generated by Django 2.2.6 on 2019-10-31 08:31
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('categories', multiselectfield.db.fields.MultiSelectField(choices=[(1, 'Handbooks and manuals by discipline'), (2, 'Business books'), (3, 'Books of literary criticism'), (4, 'Books about literary theory'), (5, 'Books about literature')], default=1, max_length=9)),
('tags', multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('sex', 'Sex'), ('work', 'Work'), ('happy', 'Happy'), ('food', 'Food'), ('field', 'Field'), ('boring', 'Boring'), ('interesting', 'Interesting'), ('huge', 'Huge'), ('nice', 'Nice')], max_length=54, null=True)),
('published_in', multiselectfield.db.fields.MultiSelectField(choices=[('Canada - Provinces', (('AB', 'Alberta'), ('BC', 'British Columbia'))), ('USA - States', (('AK', 'Alaska'), ('AL', 'Alabama'), ('AZ', 'Arizona')))], max_length=2, verbose_name='Province or State')),
('chapters', multiselectfield.db.fields.MultiSelectField(choices=[(1, 'Chapter I'), (2, 'Chapter II')], default=1, max_length=3)),
],
),
]
| goinnn/django-multiselectfield | example/app/migrations/0001_initial.py | Python | lgpl-3.0 | 1,535 |
#!/usr/local/bin/python
# check python version
import sys
ver_info = sys.version_info
# parse commandlines
if ver_info[0] < 3 and ver_info[1] < 7:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", help="input log file", metavar="LOG_FILE")
# parser.add_option("-d", "--directory", dest="dirname", help="input directory with log files", metavar="LOG_DIR")
parser.add_option("-t", "--dbtype", dest="dbtype", help="database type", default="mongodb", metavar="DB_TYPE")
(options, args) = parser.parse_args();
else:
import argparse
parser = argparse.ArgumentParser(description="Log to database ingester")
parser.add_argument("-f, --file", dest="filename", help="input log file", metavar="LOG_FILE")
# parser.add_argument("-d, --directory", dest="dirname", help="input directory with log files", metavar="LOG_DIR")
parser.add_argument("-t, --dbtype", dest="dbtype", help="database type", default="mongodb", metavar="DB_TYPE")
options = parser.parse_args()
print "file {0} ".format(options.filename)
# print "dirname {0} ".format(options.dirname)
print "dbtype {0}".format(options.dbtype)
if options.dbtype == "mongodb":
from DBDriver.MongoDBDriver import MongoDBDriver
dbingester = MongoDBDriver();
elif options.dbtype == "cassandra":
from DBDriver.CassandraDBDriver import CassandraDBDriver
dbingester = CassandraDBDriver();
else:
print "ERROR: unsupported db type {0}".format(options.dbtype);
sys.exit(2);
import re
# open the file and iterate
with open(options.filename) as f:
# read the first line
line = f.readline()
if re.match("v2.1", line):
from LogParser.LogParsers import LogParserV2_1
lparser = LogParserV2_1(options.filename)
elif re.match("v2", line):
from LogParser.LogParsers import LogParserV2
lparser = LogParserV2_1(options.filename)
else:
print "UNSUPPORTED LOG VERSION: {0}".format(line)
sys.exit(1)
for line in f:
lparser.parseLine(line, dbingester)
| EmoryUniversity/PIAT | src/common/log-analysis/python-discard/LogDBIngester.py | Python | lgpl-3.0 | 2,170 |
from vertebra.actor import actor
class test_00_actor:
def test_00_instantiate(self):
"""actor: can instantiate a base actor"""
a = actor()
assert isinstance(a,actor), "instantiated actor is actually an actor"
| jvantuyl/vertebra-py | tests/test_20_actor.py | Python | lgpl-3.0 | 224 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2011-2014 German Aerospace Center DLR
(Deutsches Zentrum fuer Luft- und Raumfahrt e.V.),
Institute of System Dynamics and Control
and BAUSCH-GALL GmbH, Munich
All rights reserved.
This file is licensed under the "BSD New" license
(see also http://opensource.org/licenses/BSD-3-Clause):
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the German Aerospace Center nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import zipfile
import collections
import os
from operator import itemgetter
import pyMtsf
from ...Simulator.FMUSimulator.FMIDescription1 import FMIDescription
StandardSeriesForFmi = [pyMtsf.Series('Fixed', None, 'constant', 1), pyMtsf.Series('Continuous', 'Time', 'linear', 100), pyMtsf.Series('Discrete', 'TimeDiscrete', 'constant', 10)]
def convertFromFmi(fmuFilename, fmi=None):
''' Returns data to initialize an MTSF result file from an FMU.
The call to initialize an MTSF result file is
pyMtsf.MTSF(resultFileName, modelDescription, modelVariables, experimentSetup, simpleTypes, units, enumerationsMatrix)
The missing data is resultFileName and experimentSetup to be specified before initializing the MTSF object.
Inputs Type:
fmuFilename String
fmi FMIDescription [optional]
if fmi is given, then fmuFilename is ignored. Otherwise the FMI description is loaded from the given file.
Outputs
modelDescription pyMtsf.ModelDescription
modelVariables pyMtsf.ModelVariables
simpleTypes list of pyMtsf.SimpleType
units list of pyMtsf.Unit
enumerationsMatrix list of pyMtsf.Enumeration
'''
def _None2Str(x):
if x is None:
return ''
else:
return x
# Load FMIDescription if necessary
if fmi is None:
fmuFile = zipfile.ZipFile(os.getcwd() + u'\\' + fmuFilename + u'.fmu', 'r')
fmi = FMIDescription(fmuFile.open('modelDescription.xml'))
# Prepare some variables
allSeriesNames = [x.name for x in StandardSeriesForFmi]
variable = collections.OrderedDict()
simpleTypes = []
units = []
enumerationsMatrix = []
variable['Time'] = pyMtsf.ScalarModelVariable('Continuous Time', 'input', 0, 'continuous', allSeriesNames.index('Continuous'), pyMtsf.StandardCategoryNames.index(pyMtsf.CategoryMapping['Real']), None, 0)
variable['TimeDiscrete'] = pyMtsf.ScalarModelVariable('Discrete Time at events', 'input', 0, 'discrete', allSeriesNames.index('Discrete'), pyMtsf.StandardCategoryNames.index(pyMtsf.CategoryMapping['Real']), None, 0)
# Alias
for var in fmi.scalarVariables.values():
if var.alias is None or var.alias.lower() == "noalias":
var.alias = 'NOAlias' # To guarantee that this variable is the first
# one in sorted order
referenceList = [(x, y.valueReference, y.alias) for x, y in fmi.scalarVariables.iteritems()]
referenceList.sort(key=itemgetter(2))
referenceList.sort(key=itemgetter(1))
for index in xrange(len(referenceList)):
variableName = referenceList[index][0]
if referenceList[index][2] in ['alias', 'negatedAlias']:
valueReference = referenceList[index][1]
prevValueReference = referenceList[index - 1][1]
if prevValueReference != valueReference:
raise ValueError("No original variable found for alias " + variableName)
if referenceList[index - 1][2] == "NOAlias":
originName = referenceList[index - 1][0]
else:
originName = fmi.scalarVariables[referenceList[index - 1][0]].aliasName
fmi.scalarVariables[variableName].aliasName = originName
else:
fmi.scalarVariables[variableName].aliasName = None
# Types and display units
uniqueSimpleType = []
for fmiVariableName, fmiVariable in fmi.scalarVariables.iteritems():
type = fmiVariable.type
unitList = [_None2Str(type.unit) + _None2Str(type.displayUnit)]
if fmi.units.has_key(type.unit):
for displayUnitName, displayUnit in fmi.units[type.unit].iteritems():
if displayUnitName != type.unit:
unitList.append(displayUnitName + '{:.16e}'.format(displayUnit.gain) + '{:.16e}'.format(displayUnit.offset))
# unitList.sort()
dataType = type.type
enumerations = ''
if dataType == 'Enumeration':
enumerations = ''.join([_None2Str(x[0]) + _None2Str(x[1]) for x in type.item])
uniqueSimpleType.append((fmiVariableName, type, _None2Str(type.name) + str(pyMtsf.DataType[dataType]) + _None2Str(type.quantity) + str(type.relativeQuantity), ''.join(unitList), enumerations))
# Simple Types
uniqueSimpleType.sort(key=itemgetter(3))
uniqueSimpleType.sort(key=itemgetter(2))
lastUniqueStr = ''
rowIndex = dict()
lastIndex = -1
uniqueDisplayUnit = []
uniqueEnumerations = []
for s in uniqueSimpleType:
fmiVariableName = s[0]
type = s[1]
uniqueStr = s[2] + s[3] + s[4]
if uniqueStr == lastUniqueStr:
rowIndex[fmiVariableName] = lastIndex
else:
lastUniqueStr = uniqueStr
lastIndex += 1
rowIndex[fmiVariableName] = lastIndex
uniqueDisplayUnit.append((type, lastIndex, s[3]))
uniqueEnumerations.append((type, lastIndex, s[4]))
dataType = type.type
simpleTypes.append(pyMtsf.SimpleType(type.name, pyMtsf.DataType[dataType], type.quantity, type.relativeQuantity, -1, type.description))
# Units
uniqueDisplayUnit.sort(key=itemgetter(2))
lastUniqueStr = ''
startRow = -1
for s in uniqueDisplayUnit:
type = s[0]
k = s[1]
uniqueStr = s[2]
if uniqueStr == lastUniqueStr:
simpleTypes[k].unitOrEnumerationRow = startRow
else:
lastUniqueStr = uniqueStr
if uniqueStr != '': # There is a unit definition
startRow = len(units)
units.append(pyMtsf.Unit(type.unit, 1.0, 0.0, 0))
if fmi.units.has_key(type.unit):
for displayUnitName, displayUnit in fmi.units[type.unit].iteritems():
if displayUnitName != type.unit:
if type.displayUnit is not None and type.displayUnit != '' and type.displayUnit == displayUnitName:
mode = 2 # DefaultDisplayUnit
else:
mode = 1 # DisplayUnit
units.append(pyMtsf.Unit(displayUnitName, displayUnit.gain, displayUnit.offset, mode))
simpleTypes[k].unitOrEnumerationRow = startRow
else:
startRow = -1
# Enumerations
uniqueEnumerations.sort(key=itemgetter(2))
lastUniqueStr = ''
startRow = -1
for s in uniqueEnumerations:
type = s[0]
k = s[1]
uniqueStr = s[2]
if uniqueStr != '':
if uniqueStr == lastUniqueStr:
simpleTypes[k].unitOrEnumerationRow = startRow
else:
lastUniqueStr = uniqueStr
startRow = len(enumerationsMatrix)
j = 0
for enum in type.item:
j += 1
if j == 1:
firstEntry = 1
else:
firstEntry = 0
enumerationsMatrix.append(pyMtsf.Enumeration(enum[0], j, enum[1], firstEntry))
simpleTypes[k].unitOrEnumerationRow = startRow
# Iterate over all fmi-variables
for fmiVariableName, fmiVariable in fmi.scalarVariables.iteritems():
variableType = fmiVariable.type.type
if variableType != "String": # Do not support strings
variability = fmiVariable.variability
aliasNegated = 0
aliasName = fmiVariable.aliasName
if aliasName is not None:
if fmiVariable.alias == 'negatedAlias':
aliasNegated = 1
# Due to possibly insufficient information in xml-file
variability = fmi.scalarVariables[aliasName].variability
categoryIndex = pyMtsf.StandardCategoryNames.index(pyMtsf.CategoryMapping[variableType])
if variability in ['constant', 'parameter']:
seriesIndex = allSeriesNames.index('Fixed')
elif variability == 'discrete':
seriesIndex = allSeriesNames.index('Discrete')
else:
seriesIndex = allSeriesNames.index('Continuous')
causality = fmiVariable.causality
# Due to FMI 1.0; in vers. 2.0 this should not be necessary
if causality is None:
causality = 'local'
if variability == 'parameter':
causality = 'parameter'
variability = 'fixed'
if causality in ['internal', 'none']:
causality = 'local'
simpleTypeRow = rowIndex[fmiVariableName]
variable[fmiVariableName] = pyMtsf.ScalarModelVariable(fmiVariable.description,
causality,
simpleTypeRow,
variability,
seriesIndex, categoryIndex,
aliasName, aliasNegated)
# Some basics for independent time variables
startRow = len(units)
units.append(pyMtsf.Unit('s', 1.0, 0.0, 0))
units.append(pyMtsf.Unit('ms', 0.001, 0.0, 1))
units.append(pyMtsf.Unit('min', 60.0, 0.0, 1))
units.append(pyMtsf.Unit('h', 3600.0, 0.0, 1))
units.append(pyMtsf.Unit('d', 86400.0, 0.0, 1))
simpleTypes.append(pyMtsf.SimpleType('Time', pyMtsf.DataType["Real"], 'Time', False, startRow, ''))
variable['Time'].simpleTypeRow = len(simpleTypes) - 1
variable['TimeDiscrete'].simpleTypeRow = len(simpleTypes) - 1
modelDescription = pyMtsf.ModelDescription(fmi.modelName, fmi.description, fmi.author, fmi.version, fmi.generationTool, fmi.generationDateAndTime, fmi.variableNamingConvention)
modelVariables = pyMtsf.ModelVariables(variable, StandardSeriesForFmi, pyMtsf.StandardCategoryNames)
return modelDescription, modelVariables, simpleTypes, units, enumerationsMatrix
if __name__ == '__main__':
import time
import numpy
nPoints = 60
BlockSize = 100
# Prepare information from FMU
name_fmu_file = u'Examples/fullRobot'
(modelDescription, modelVariables, simpleTypes, units, enumerations) = convertFromFmi(name_fmu_file)
modelVariables.allSeries[1].initialRows = nPoints * BlockSize # Continuous
# Phase 1 of result file generation
resultFileName = name_fmu_file + unicode(nPoints) + u'.mtsf'
experimentSetup = pyMtsf.ExperimentSetup(startTime=0.0, stopTime=4.78, algorithm="Dassl",
relativeTolerance=1e-7, author="", description="Test experiment",
generationDateAndTime=time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()),
generationTool="Python", machine=os.getenv('COMPUTERNAME'),
cpuTime="")
startTime = time.clock()
# Create result object
mtsf = pyMtsf.MTSF(resultFileName, modelDescription, modelVariables, experimentSetup, simpleTypes, units, enumerations)
# Some aliases
realParameter = mtsf.results.series['Fixed'].category[pyMtsf.CategoryMapping['Real']]
# integerParameter = mtsf.results.series['Fixed'].category[CategoryMapping['Integer']]
booleanParameter = mtsf.results.series['Fixed'].category[pyMtsf.CategoryMapping['Boolean']]
realContinuous = mtsf.results.series['Continuous'].category[pyMtsf.CategoryMapping['Real']]
realDiscrete = mtsf.results.series['Discrete'].category[pyMtsf.CategoryMapping['Real']]
# integerDiscrete = mtsf.results.series['Discrete'].category[CategoryMapping['Integer']]
booleanDiscrete = mtsf.results.series['Discrete'].category[pyMtsf.CategoryMapping['Boolean']]
# *************************************
# Phase 2 of result file generation
print "Write Data ..."
realParameter.writeData(numpy.random.rand(1, realParameter.nColumn) * 2e5 - 1e5)
# integerParameter.writeData(numpy.floor(0.5+numpy.random.rand(1,integerParameter.nColumn)*2e5-1e5).astype(int))
booleanParameter.writeData(numpy.floor(0.5 + numpy.random.rand(1, booleanParameter.nColumn)).astype(int))
for i in range(nPoints):
# write continuous
realContinuous.writeData(numpy.random.rand(BlockSize, realContinuous.nColumn) * 2e5 - 1e5)
# write discrete
# booleanDiscrete.writeData(numpy.floor(0.5+numpy.random.rand(2, booleanDiscrete.nColumn)).astype(int))
# realDiscrete.writeData(numpy.random.rand(2, realDiscrete.nColumn)*2e5 - 1e5)
# integerDiscrete.writeData(numpy.floor(0.5+numpy.random.rand(2, integerDiscrete.nColumn)*2e5-1e5).astype(int))
# write String
# mtsf.series['Continuous'].categories['H5T_C_S1'].writeData(numpy.ones((1,k_str),dtype=numpy.str_))
# Write times:
# realContinuous.member[0].dataset[:,0] = numpy.linspace(0,1,realContinuous.member[0].dataset.shape[0])
# realDiscrete.member[0].dataset[:,0] = numpy.linspace(0,1,realDiscrete.member[0].dataset.shape[0])
print "Data written."
# ****************************************
# Phase 3 of result file generation
mtsf.close()
print "Elapsed time = " + format(time.clock() - startTime, '0.2f') + " s."
| PySimulator/PySimulator | PySimulator/Plugins/SimulationResult/Mtsf/MtsfFmi.py | Python | lgpl-3.0 | 15,300 |
#!/usr/bin/env python2
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import copy
import dolfin
import ufl
from caches import *
from equation_solvers import *
from exceptions import *
from fenics_overrides import *
from fenics_utils import *
from pre_assembled_forms import *
from statics import *
from time_levels import *
from time_functions import *
__all__ = \
[
"AdjointVariableMap",
"PAAdjointSolvers",
"TimeFunctional"
]
class AdjointVariableMap(object):
"""
A map between forward and adjoint variables. Indexing into the
AdjointVariableMap with a forward Function yields an associated adjoint
Function, and similarly indexing into the AdjointVariableMap with an adjoint
Function yields an associated forward Function. Allocates adjoint Function s
as required.
"""
def __init__(self):
self.__a_tfns = {}
self.__f_tfns = OrderedDict()
self.__a_fns = {}
self.__f_fns = OrderedDict()
return
def __getitem__(self, key):
return self.__add(key)
def __add(self, var):
if isinstance(var, TimeFunction):
if not var in self.__a_tfns:
f_tfn = var
a_tfn = AdjointTimeFunction(f_tfn)
self.__a_tfns[f_tfn] = a_tfn
self.__f_tfns[a_tfn] = f_tfn
for level in f_tfn.all_levels():
self.__a_fns[f_tfn[level]] = a_tfn[level]
self.__f_fns[a_tfn[level]] = f_tfn[level]
return self.__a_tfns[var]
elif isinstance(var, AdjointTimeFunction):
if not var in self.__f_tfns:
f_tfn = var.forward()
a_tfn = var
self.__a_tfns[f_tfn] = a_tfn
self.__f_tfns[a_tfn] = f_tfn
for level in f_tfn.all_levels():
self.__a_fns[f_tfn[level]] = a_tfn[level]
self.__f_fns[a_tfn[level]] = f_tfn[level]
return self.__f_tfns[var]
elif isinstance(var, dolfin.Function):
if is_static_coefficient(var):
return var
elif hasattr(var, "_time_level_data"):
return self.__add(var._time_level_data[0])[var._time_level_data[1]]
elif hasattr(var, "_adjoint_data"):
if not var in self.__f_fns:
self.__a_fns[var._adjoint_data[0]] = var
self.__f_fns[var] = var._adjoint_data[0]
return var._adjoint_data[0]
else:
if not var in self.__a_fns:
a_fn = dolfin.Function(name = "%s_adjoint" % var.name(), *[var.function_space()])
a_fn._adjoint_data = [var]
self.__a_fns[var] = a_fn
self.__f_fns[a_fn] = var
return self.__a_fns[var]
elif isinstance(var, dolfin.Constant):
return var
else:
raise InvalidArgumentException("Argument must be an AdjointTimeFunction, TimeFunction, Function, or Constant")
def zero_adjoint(self):
"""
Zero all adjoint Function s,
"""
for a_fn in self.__f_fns:
if not hasattr(a_fn, "_time_level_data"):
a_fn.vector().zero()
for a_tfn in self.__f_tfns:
a_tfn.zero()
return
class TimeFunctional(object):
"""
A template for a functional with an explicit time dependence.
"""
def __init__(self):
return
def initialise(self, val = 0.0):
"""
Initialise, with an initial functional value of val.
"""
raise AbstractMethodException("initialise method not overridden")
def addto(self, s):
"""
Add to the functional at the end of timestep number s.
"""
raise AbstractMethodException("addto method not overridden")
def value(self):
"""
Return the functional value.
"""
raise AbstractMethodException("value method not overridden")
def dependencies(self, s = None, non_symbolic = False):
"""
Return the functional dependencies at the end of timestep number s. If
non_symbolic is true, also return any other dependencies on which the value
of the functional could depend at the end of timestep number s.
"""
raise AbstractMethodException("dependencies method not overridden")
def derivative(self, parameter, s):
"""
Return the derivative of the functional with respect to the specified
Constant of Function at the end of the timestep number s.
"""
raise AbstractMethodException("derivative method not overridden")
class PAAdjointSolvers(object):
"""
Defines a set of solves for adjoint equations, applying pre-assembly and
linear solver caching optimisations. Expects as input a list of earlier
forward equations and a list of later forward equations. If the earlier
equations solve for {x_1, x_2, ...}, then the Function s on which the later
equations depend should all be static or in the {x_1, x_2, ...}, although the
failure of this requirement is not treated as an error.
Constructor arguments:
f_solves_a: Earlier time forward equations, as a list of AssignmentSolver s
or EquationSolver s.
f_solves_b: Later time forward equations, as a list of AssignmentSolver s
or EquationSolver s.
a_map: The AdjointVariableMap used to convert between forward and adjoint
Function s.
"""
def __init__(self, f_solves_a, f_solves_b, a_map):
if not isinstance(f_solves_a, list):
raise InvalidArgumentException("f_solves_a must be a list of AssignmentSolver s or EquationSolver s")
for f_solve in f_solves_a:
if not isinstance(f_solve, (AssignmentSolver, EquationSolver)):
raise InvalidArgumentException("f_solves_a must be a list of AssignmentSolver s or EquationSolver s")
if not isinstance(f_solves_b, list):
raise InvalidArgumentException("f_solves_b must be a list of AssignmentSolver s or EquationSolver s")
for f_solve in f_solves_b:
if not isinstance(f_solve, (AssignmentSolver, EquationSolver)):
raise InvalidArgumentException("f_solves_b must be a list of AssignmentSolver s or EquationSolver s")
if not isinstance(a_map, AdjointVariableMap):
raise InvalidArgumentException("a_map must be an AdjointVariableMap")
# Reverse causality
f_solves_a = copy.copy(f_solves_a); f_solves_a.reverse()
f_solves_b = copy.copy(f_solves_b); f_solves_b.reverse()
la_a_forms = []
la_x = []
la_L_forms = []
la_L_as = []
la_bcs = []
la_solver_parameters = []
la_pre_assembly_parameters = []
la_keys = {}
# Create an adjoint solve for each forward solve in f_solves_a, and add
# the adjoint LHS
for f_solve in f_solves_a:
f_x = f_solve.x()
a_x = a_map[f_x]
a_space = a_x.function_space()
assert(not a_x in la_keys)
if isinstance(f_solve, AssignmentSolver):
la_a_forms.append(None)
la_bcs.append([])
la_solver_parameters.append(None)
la_pre_assembly_parameters.append(dolfin.parameters["timestepping"]["pre_assembly"].copy())
else:
assert(isinstance(f_solve, EquationSolver))
f_a = f_solve.tangent_linear()[0]
f_a_rank = form_rank(f_a)
if f_a_rank == 2:
a_test, a_trial = dolfin.TestFunction(a_space), dolfin.TrialFunction(a_space)
a_a = adjoint(f_a, adjoint_arguments = (a_test, a_trial))
la_a_forms.append(a_a)
la_bcs.append(f_solve.hbcs())
la_solver_parameters.append(copy.deepcopy(f_solve.adjoint_solver_parameters()))
else:
assert(f_a_rank == 1)
a_a = f_a
la_a_forms.append(a_a)
la_bcs.append(f_solve.hbcs())
la_solver_parameters.append(None)
la_pre_assembly_parameters.append(f_solve.pre_assembly_parameters().copy())
la_x.append(a_x)
la_L_forms.append(None)
la_L_as.append([])
la_keys[a_x] = len(la_x) - 1
# Add adjoint RHS terms corresponding to terms in each forward solve in
# f_solves_a and f_solves_b
for f_solve in f_solves_a + f_solves_b:
f_x = f_solve.x()
a_dep = a_map[f_x]
if isinstance(f_solve, AssignmentSolver):
f_rhs = f_solve.rhs()
if isinstance(f_rhs, ufl.expr.Expr):
# Adjoin an expression assignment RHS
for f_dep in ufl.algorithms.extract_coefficients(f_rhs):
if isinstance(f_dep, dolfin.Function):
a_x = a_map[f_dep]
a_rhs = differentiate_expr(f_rhs, f_dep) * a_dep
if a_x in la_keys and not isinstance(a_rhs, ufl.constantvalue.Zero):
la_L_as[la_keys[a_x]].append(a_rhs)
else:
# Adjoin a linear combination assignment RHS
for alpha, f_dep in f_rhs:
a_x = a_map[f_dep]
if a_x in la_keys:
la_L_as[la_keys[a_x]].append((alpha, a_dep))
else:
# Adjoin an equation RHS
assert(isinstance(f_solve, EquationSolver))
a_trial = dolfin.TrialFunction(a_dep.function_space())
f_a_od = f_solve.tangent_linear()[1]
for f_dep in f_a_od:
a_x = a_map[f_dep]
if a_x in la_keys:
a_test = dolfin.TestFunction(a_x.function_space())
a_key = la_keys[a_x]
a_form = -action(adjoint(f_a_od[f_dep], adjoint_arguments = (a_test, a_trial)), a_dep)
if la_L_forms[a_key] is None:
la_L_forms[a_key] = a_form
else:
la_L_forms[a_key] += a_form
self.__a_map = a_map
self.__a_a_forms = la_a_forms
self.__a_x = la_x
self.__a_L_forms = la_L_forms
self.__a_L_as = la_L_as
self.__a_bcs = la_bcs
self.__a_solver_parameters = la_solver_parameters
self.__a_pre_assembly_parameters = la_pre_assembly_parameters
self.__a_keys = la_keys
self.__functional = None
self.reassemble()
return
def reassemble(self, *args):
"""
Reassemble the adjoint solvers. If no arguments are supplied then all
equations are re-assembled. Otherwise, only the LHSs or RHSs which depend
upon the supplied Constant s or Function s are reassembled. Note that this
does not clear the assembly or linear solver caches -- hence if a static
Constant, Function, or DirichletBC is modified then one should clear the
caches before calling reassemble on the PAAdjointSolvers.
"""
def assemble_lhs(i):
if self.__a_a_forms[i] is None:
a_a = None
a_solver = None
else:
a_a_rank = form_rank(self.__a_a_forms[i])
if a_a_rank == 2:
static_bcs = n_non_static_bcs(self.__a_bcs[i]) == 0
static_form = is_static_form(self.__a_a_forms[i])
if len(self.__a_bcs[i]) > 0 and static_bcs and static_form:
a_a = assembly_cache.assemble(self.__a_a_forms[i],
bcs = self.__a_bcs[i], symmetric_bcs = self.__a_pre_assembly_parameters[i]["equations"]["symmetric_boundary_conditions"],
compress = self.__a_pre_assembly_parameters[i]["bilinear_forms"]["compress_matrices"])
a_solver = linear_solver_cache.linear_solver(self.__a_a_forms[i],
self.__a_solver_parameters[i],
bcs = self.__a_bcs[i], symmetric_bcs = self.__a_pre_assembly_parameters[i]["equations"]["symmetric_boundary_conditions"],
a = a_a)
a_solver.set_operator(a_a)
elif len(self.__a_bcs[i]) == 0 and static_form:
a_a = assembly_cache.assemble(self.__a_a_forms[i],
compress = self.__a_pre_assembly_parameters[i]["bilinear_forms"]["compress_matrices"])
a_solver = linear_solver_cache.linear_solver(self.__a_a_forms[i],
self.__a_solver_parameters[i],
a = a_a)
a_solver.set_operator(a_a)
else:
a_a = PABilinearForm(self.__a_a_forms[i], pre_assembly_parameters = self.__a_pre_assembly_parameters[i]["bilinear_forms"])
a_solver = linear_solver_cache.linear_solver(self.__a_a_forms[i],
self.__a_solver_parameters[i], self.__a_pre_assembly_parameters[i]["bilinear_forms"],
static = a_a.is_static() and static_bcs,
bcs = self.__a_bcs[i], symmetric_bcs = self.__a_pre_assembly_parameters[i]["equations"]["symmetric_boundary_conditions"])
else:
assert(a_a_rank == 1)
assert(self.__a_solver_parameters[i] is None)
a_a = PALinearForm(self.__a_a_forms[i], pre_assembly_parameters = self.__a_pre_assembly_parameters[i]["linear_forms"])
a_solver = None
return a_a, a_solver
def assemble_rhs(i):
if self.__a_L_forms[i] is None:
return None
else:
return PALinearForm(self.__a_L_forms[i], pre_assembly_parameters = self.__a_pre_assembly_parameters[i]["linear_forms"])
if len(args) == 0:
la_a, la_solvers = [], []
la_L = []
for i in xrange(len(self.__a_x)):
a_a, a_solver = assemble_lhs(i)
a_L = assemble_rhs(i)
la_a.append(a_a)
la_solvers.append(a_solver)
la_L.append(a_L)
self.set_functional(self.__functional)
else:
la_a, la_solvers = copy.copy(self.__a_a), copy.copy(self.__a_solvers)
la_L = copy.copy(self.__a_L)
for i in xrange(len(self.__a_x)):
for dep in args:
if not self.__a_a_forms[i] is None and dep in ufl.algorithms.extract_coefficients(self.__a_a_forms[i]):
la_a[i], la_solvers[i] = assemble_lhs(i)
break
for dep in args:
if not self.__a_L_forms[i] is None and dep in ufl.algorithms.extract_coefficients(self.__a_L_forms[i]):
la_L[i] = assemble_rhs(i)
break
if isinstance(self.__functional, ufl.form.Form):
for dep in args:
if dep in ufl.algorithms.extract_coefficients(self.__functional):
self.set_functional(self.__functional)
break
else:
self.set_functional(self.__functional)
self.__a_a, self.__a_solvers = la_a, la_solvers
self.__a_L = la_L
return
def a_x(self):
"""
Return the adjoint Function s being solved for.
"""
return self.__a_x
def solve(self):
"""
Solve all adjoint equations.
"""
for i in xrange(len(self.__a_x)):
a_a = self.__a_a[i]
a_x = self.__a_x[i]
a_L = self.__a_L[i]
a_L_as = self.__a_L_as[i]
a_L_rhs = self.__a_L_rhs[i]
a_bcs = self.__a_bcs[i]
a_solver = self.__a_solvers[i]
def evaluate_a_L_as(i):
if isinstance(a_L_as[i], ufl.expr.Expr):
if is_r0_function(a_x):
L = evaluate_expr(a_L_as[i], copy = False)
if isinstance(L, dolfin.GenericVector):
l_L = L.sum()
else:
assert(isinstance(L, float))
l_L = L
L = a_x.vector().copy()
L[:] = l_L
else:
L = evaluate_expr(a_L_as[i], copy = True)
if isinstance(L, float):
l_L = L
L = a_x.vector().copy()
L[:] = l_L
else:
assert(isinstance(L, dolfin.GenericVector))
else:
L = float(a_L_as[i][0]) * a_L_as[i][1].vector()
return L
def add_a_L_as(i, L):
if isinstance(a_L_as[i], ufl.expr.Expr):
l_L = evaluate_expr(a_L_as[i], copy = False)
if is_r0_function(a_x):
if isinstance(l_L, dolfin.GenericVector):
l_L = l_L.sum()
else:
assert(isinstance(l_L, float))
if isinstance(l_L, dolfin.GenericVector):
L += l_L
else:
L.add_local(l_L * numpy.ones(L.local_range(0)[1] - L.local_range(0)[0]))
L.apply("insert")
else:
L.axpy(float(a_L_as[i][0]), a_L_as[i][1].vector())
return
if a_L_rhs is None:
if len(a_L_as) == 0:
if a_L is None:
if a_a is None or len(a_bcs) == 0:
a_x.vector().zero()
continue
else:
L = a_x.vector().copy()
L.zero()
else:
L = assemble(a_L, copy = len(a_bcs) > 0)
else:
L = evaluate_a_L_as(0)
for i in xrange(1, len(a_L_as)):
add_a_L_as(i, L)
if not a_L is None:
L += assemble(a_L, copy = False)
else:
if isinstance(a_L_rhs, PAForm):
L = assemble(a_L_rhs, copy = len(a_bcs) > 0 or not a_L is None or len(a_L_as) > 0)
else:
L = assemble(a_L_rhs)
if not a_L is None:
L += assemble(a_L, copy = False)
for i in xrange(len(a_L_as)):
add_a_L_as(i, L)
if a_a is None:
assert(len(a_bcs) == 0)
assert(a_solver is None)
a_x.vector()[:] = L
elif a_solver is None:
assert(a_a.rank() == 1)
a_a = assemble(a_a, copy = False)
assert(L.local_range() == a_a.local_range())
a_x.vector().set_local(L.array() / a_a.array())
a_x.vector().apply("insert")
enforce_bcs(a_x.vector(), a_bcs)
else:
if isinstance(a_a, dolfin.GenericMatrix):
enforce_bcs(L, a_bcs)
else:
a_a = assemble(a_a, copy = len(a_bcs) > 0)
apply_bcs(a_a, a_bcs, L = L, symmetric_bcs = self.__a_pre_assembly_parameters[i]["equations"]["symmetric_boundary_conditions"])
a_solver.set_operator(a_a)
a_solver.solve(a_x.vector(), L)
return
def set_functional(self, functional):
"""
Set a functional, defining associated adjoint RHS terms.
"""
if functional is None:
self.__a_L_rhs = [None for i in xrange(len(self.__a_x))]
self.__functional = None
elif isinstance(functional, ufl.form.Form):
if not form_rank(functional) == 0:
raise InvalidArgumentException("functional must be rank 0")
a_rhs = OrderedDict()
for f_dep in ufl.algorithms.extract_coefficients(functional):
if is_static_coefficient(f_dep):
pass
elif isinstance(f_dep, dolfin.Function):
a_x = self.__a_map[f_dep]
a_rhs[a_x] = derivative(functional, f_dep)
elif isinstance(f_dep, (dolfin.Constant, dolfin.Expression)):
pass
else:
raise DependencyException("Invalid dependency")
self.__a_L_rhs = [None for i in xrange(len(self.__a_x))]
for i, a_x in enumerate(a_rhs):
if a_x in self.__a_keys:
self.__a_L_rhs[self.__a_keys[a_x]] = PALinearForm(a_rhs[a_x], pre_assembly_parameters = self.__a_pre_assembly_parameters[i]["linear_forms"])
self.__functional = functional
elif isinstance(functional, TimeFunctional):
self.__a_L_rhs = [None for i in xrange(len(self.__a_x))]
self.__functional = functional
else:
raise InvalidArgumentException("functional must be a Form or a TimeFunctional")
return
def update_functional(self, s):
"""
Update the adjoint RHS associated with the functional at the end of timestep
s.
"""
if not isinstance(s, int) or s < 0:
raise InvalidArgumentException("s must be a non-negative integer")
if not isinstance(self.__functional, TimeFunctional):
return
a_rhs = OrderedDict()
for f_dep in self.__functional.dependencies(s):
if is_static_coefficient(f_dep):
pass
elif isinstance(f_dep, dolfin.Function):
a_x = self.__a_map[f_dep]
a_rhs[a_x] = self.__functional.derivative(f_dep, s)
elif isinstance(f_dep, dolfin.Constant):
pass
else:
raise DependencyException("Invalid dependency")
self.__a_L_rhs = [None for i in xrange(len(self.__a_x))]
for a_x in a_rhs:
if not a_x in self.__a_keys:
dolfin.warning("Missing functional dependency %s" % a_x.name())
else:
self.__a_L_rhs[self.__a_keys[a_x]] = a_rhs[a_x]
return
| pf4d/dolfin-adjoint | timestepping/python/timestepping/pre_assembled_adjoint.py | Python | lgpl-3.0 | 23,635 |
import datetime
from django.db import models
from django.core import validators
from django.utils.translation import ugettext_lazy as _
from nmadb_contacts.models import Municipality, Human
class School(models.Model):
""" Information about school.
School types retrieved from `AIKOS
<http://www.aikos.smm.lt/aikos/svietimo_ir_mokslo_institucijos.htm>`_
"""
SCHOOL_TYPES = (
(1, _(u'primary')),
(2, _(u'basic')),
(3, _(u'secondary')),
(4, _(u'gymnasium')),
(5, _(u'progymnasium')),
)
title = models.CharField(
max_length=80,
unique=True,
verbose_name=_(u'title'),
)
school_type = models.PositiveSmallIntegerField(
choices=SCHOOL_TYPES,
blank=True,
null=True,
verbose_name=_(u'type'),
)
email = models.EmailField(
max_length=128,
unique=True,
blank=True,
null=True,
verbose_name=_(u'email'),
)
municipality = models.ForeignKey(
Municipality,
blank=True,
null=True,
verbose_name=_(u'municipality'),
)
class Meta(object):
ordering = [u'title',]
verbose_name=_(u'school')
verbose_name_plural=_(u'schools')
def __unicode__(self):
return unicode(self.title)
class Student(Human):
""" Information about student.
"""
school_class = models.PositiveSmallIntegerField(
validators=[
validators.MinValueValidator(6),
validators.MaxValueValidator(12),
],
verbose_name=_(u'class'),
)
school_year = models.IntegerField(
validators=[
validators.MinValueValidator(2005),
validators.MaxValueValidator(2015),
],
verbose_name=_(u'class update year'),
help_text=_(
u'This field value shows, at which year January 3 day '
u'student was in school_class.'
),
)
comment = models.TextField(
blank=True,
null=True,
verbose_name=_(u'comment'),
)
schools = models.ManyToManyField(
School,
through='StudyRelation',
)
parents = models.ManyToManyField(
Human,
through='ParentRelation',
related_name='children',
)
def current_school_class(self):
""" Returns current school class or 13 if finished.
"""
today = datetime.date.today()
school_class = self.school_class + today.year - self.school_year
if today.month >= 9:
school_class += 1
if school_class > 12:
return 13
else:
return school_class
current_school_class.short_description = _(u'current class')
def current_school(self):
""" Returns current school.
"""
study = StudyRelation.objects.filter(
student=self).order_by('entered')[0]
return study.school
current_school.short_description = _(u'current school')
def change_school(self, school, date=None):
""" Marks, that student from ``date`` study in ``school``.
.. note::
Automatically saves changes.
``date`` defaults to ``today()``. If student already studies in
some school, than marks, that he had finished it day before
``date``.
"""
if date is None:
date = datetime.date.today()
try:
old_study = StudyRelation.objects.filter(
student=self).order_by('entered')[0]
except IndexError:
pass
else:
if not old_study.finished:
old_study.finished = date - datetime.timedelta(1)
old_study.save()
study = StudyRelation()
study.student = self
study.school = school
study.entered = date
study.save()
class Meta(object):
verbose_name=_(u'student')
verbose_name_plural=_(u'students')
class StudyRelation(models.Model):
""" Relationship between student and school.
"""
student = models.ForeignKey(
Student,
verbose_name=_(u'student'),
)
school = models.ForeignKey(
School,
verbose_name=_(u'school'),
)
entered = models.DateField(
verbose_name=_(u'entered'),
)
finished = models.DateField(
blank=True,
null=True,
verbose_name=_(u'finished'),
)
class Meta(object):
ordering = [u'student', u'entered',]
verbose_name=_(u'study relation')
verbose_name_plural=_(u'study relations')
def __unicode__(self):
return u'{0.school} ({0.entered}; {0.finished})'.format(self)
# FIXME: Diploma should belong to academic, not student.
class Diploma(models.Model):
""" Information about the diploma that the student has received,
when he finished, if any.
"""
DIPLOMA_TYPE = (
(u'N', _(u'nothing')),
(u'P', _(u'certificate')),
(u'D', _(u'diploma')),
(u'DP', _(u'diploma with honour')),
)
student = models.OneToOneField(
Student,
verbose_name=_(u'student'),
)
tasks_solved = models.PositiveSmallIntegerField(
blank=True,
null=True,
verbose_name=_(u'how many tasks solved'),
)
hours = models.DecimalField(
blank=True,
null=True,
max_digits=6,
decimal_places=2,
verbose_name=_(u'hours'),
)
diploma_type = models.CharField(
max_length=3,
choices=DIPLOMA_TYPE,
verbose_name=_(u'type'),
)
number = models.PositiveSmallIntegerField(
verbose_name=_(u'number'),
)
class Meta(object):
verbose_name=_(u'diploma')
verbose_name_plural=_(u'diplomas')
class Alumni(models.Model):
""" Information about alumni.
"""
INTEREST_LEVEL = (
# Not tried to contact.
( 0, _(u'not tried to contact')),
# Tried to contact, no response.
(11, _(u'no response')),
# Tried to contact, responded.
(21, _(u'not interested')),
(22, _(u'friend')),
(23, _(u'helpmate')),
(24, _(u'regular helpmate')),
)
student = models.OneToOneField(
Student,
verbose_name=_(u'student'),
)
activity_fields = models.TextField(
blank=True,
null=True,
verbose_name=_(u'fields'),
help_text=_(
u'Alumni reported that he can help in these activity '
u'fields.'
),
)
interest_level = models.PositiveSmallIntegerField(
blank=True,
null=True,
choices=INTEREST_LEVEL,
verbose_name=_(u'interest level'),
)
abilities = models.TextField(
blank=True,
null=True,
verbose_name=_(u'abilities'),
help_text=_(u'Main abilities and interests.')
)
university = models.CharField(
max_length=128,
blank=True,
null=True,
verbose_name=_(u'university'),
help_text=_(u'Or work place.'),
)
study_field = models.CharField(
max_length=64,
blank=True,
null=True,
verbose_name=_(u'study field'),
help_text=_(u'Or employment field.'),
)
info_change_year = models.IntegerField(
blank=True,
null=True,
verbose_name=_(u'info change year'),
help_text=_(
u'Year when the information about studies '
u'will become invalid.'
),
)
notes = models.TextField(
blank=True,
null=True,
verbose_name=_(u'notes'),
)
information_received_timestamp = models.DateTimeField(
blank=True,
null=True,
verbose_name=_(u'information received timestamp'),
)
class Meta(object):
verbose_name=_(u'alumni')
verbose_name_plural=_(u'alumnis')
def contactable(self):
""" If the alumni agreed to receive information.
"""
return self.interest_level >= 22;
class StudentMark(models.Model):
""" Mark student with some mark.
"""
student = models.ForeignKey(
Student,
verbose_name=_(u'student'),
)
start = models.DateField(
verbose_name=_(u'start'),
)
end = models.DateField(
blank=True,
null=True,
verbose_name=_(u'end'),
)
def __unicode__(self):
return unicode(self.student)
class Meta(object):
abstract = True
class SocialDisadvantageMark(StudentMark):
""" Mark student as socially disadvantaged.
"""
class Meta(object):
verbose_name=_(u'social disadvantage mark')
verbose_name_plural=_(u'social disadvantage marks')
class DisabilityMark(StudentMark):
""" Mark student as having disability.
"""
disability = models.CharField(
max_length=128,
verbose_name=_(u'disability'),
)
class Meta(object):
verbose_name=_(u'disability mark')
verbose_name_plural=_(u'disability marks')
class ParentRelation(models.Model):
""" Relationship between student and his parent.
"""
RELATION_TYPE = (
(u'P', _(u'parent')),
(u'T', _(u'tutor')),
)
child = models.ForeignKey(
Student,
related_name='+',
verbose_name=_(u'child'),
)
parent = models.ForeignKey(
Human,
verbose_name=_(u'parent'),
)
relation_type = models.CharField(
max_length=2,
choices=RELATION_TYPE,
verbose_name=_(u'type'),
)
def __unicode__(self):
return u'{0.parent} -> {0.child}'.format(self)
class Meta(object):
verbose_name=_(u'parent relation')
verbose_name_plural=_(u'parent relations')
| vakaras/nmadb-students | src/nmadb_students/models.py | Python | lgpl-3.0 | 10,676 |
"""
This test illustrate how to generate an XML Mapnik style sheet from a pycnik
style sheet written in Python.
"""
import os
from pycnik import pycnik
import artefact
actual_xml_style_sheet = 'artefacts/style_sheet.xml'
expected_xml_style_sheet = 'style_sheet.xml'
class TestPycnik(artefact.TestCaseWithArtefacts):
def test_pycnik(self):
python_style_sheet = pycnik.import_style('style_sheet.py')
pycnik.translate(python_style_sheet, actual_xml_style_sheet)
with open(actual_xml_style_sheet) as actual, \
open(expected_xml_style_sheet) as expected:
self.assertEquals(actual.read(), expected.read())
| Mappy/pycnikr | tests/test_pycnik.py | Python | lgpl-3.0 | 660 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""This module contains some mixins for the different nodes.
"""
from .exceptions import (AstroidBuildingException, InferenceError,
NotFoundError)
class BlockRangeMixIn(object):
"""override block range """
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self._blockstart_toline()
def _elsed_block_range(self, lineno, orelse, last=None):
"""handle block line numbers range for try/finally, for, if and while
statements
"""
if lineno == self.fromlineno:
return lineno, lineno
if orelse:
if lineno >= orelse[0].fromlineno:
return lineno, orelse[-1].tolineno
return lineno, orelse[0].fromlineno - 1
return lineno, last or self.tolineno
class FilterStmtsMixin(object):
"""Mixin for statement filtering and assignment type"""
def _get_filtered_stmts(self, _, node, _stmts, mystmt):
"""method used in _filter_stmts to get statemtents and trigger break"""
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
def ass_type(self):
return self
class AssignTypeMixin(object):
def ass_type(self):
return self
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt):
"""method used in filter_stmts"""
if self is mystmt:
return _stmts, True
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
class ParentAssignTypeMixin(AssignTypeMixin):
def ass_type(self):
return self.parent.ass_type()
class FromImportMixIn(FilterStmtsMixin):
"""MixIn for From and Import Nodes"""
def _infer_name(self, frame, name):
return name
def do_import_module(self, modname):
"""return the ast for a module whose name is <modname> imported by <self>
"""
# handle special case where we are on a package node importing a module
# using the same name as the package, which may end in an infinite loop
# on relative imports
# XXX: no more needed ?
mymodule = self.root()
level = getattr(self, 'level', None) # Import as no level
# XXX we should investigate deeper if we really want to check
# importing itself: modname and mymodule.name be relative or absolute
if mymodule.relative_to_absolute_name(modname, level) == mymodule.name:
# FIXME: we used to raise InferenceError here, but why ?
return mymodule
try:
return mymodule.import_module(modname, level=level)
except AstroidBuildingException:
raise InferenceError(modname)
except SyntaxError, ex:
raise InferenceError(str(ex))
def real_name(self, asname):
"""get name from 'as' name"""
for name, _asname in self.names:
if name == '*':
return asname
if not _asname:
name = name.split('.', 1)[0]
_asname = name
if asname == _asname:
return name
raise NotFoundError(asname)
| lukaszpiotr/pylama_with_gjslint | pylama/checkers/pylint/astroid/mixins.py | Python | lgpl-3.0 | 4,313 |
# created based on
# https://python-packaging.readthedocs.io/en/latest/minimal.html
# But instead of python setup.py register sdist upload,
# use https://pypi.org/p/twine/
#
from setuptools import setup
import sys
import os
import re
sys.path.append("src")
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return "File '%s' not found.\n" % fname
def readVersion():
txt = read("src/moddy/version.py")
ver = re.findall(r"([0-9]+)", txt)
print("ver=%s" % ver)
return ver[0] + "." + ver[1] + "." + ver[2]
setup(
name="moddy",
install_requires=["svgwrite"],
version=readVersion(),
description="A discrete event simulator generating sequence diagrams",
long_description=read("README.rst"),
url="https://github.com/KlausPopp/Moddy",
project_urls={
"Documentation": "https://klauspopp.github.io/Moddy/",
"Source Code": "https://github.com/KlausPopp/Moddy/",
},
keywords="simulation modelling",
author="Klaus Popp",
author_email="[email protected]",
license="LGPL-3.0",
platforms="OS Independent",
package_dir={"": "src"},
packages=[
"moddy",
"moddy.seq_diag_interactive_viewer",
"moddy.lib",
"moddy.lib.net",
],
package_data={"moddy.seq_diag_interactive_viewer": ["*.css", "*.js"]},
)
| KlausPopp/Moddy | setup.py | Python | lgpl-3.0 | 1,397 |
#!/usr/bin/env python3
# coding: utf8
"""
Unit tests for module PySetTrie (see settrie.py).
Author: Márton Miháltz
https://sites.google.com/site/mmihaltz/
"""
import unittest
from settrie import SetTrie, SetTrieMap, SetTrieMultiMap
class TestSetTrie(unittest.TestCase):
"""
UnitTest for SetTrie class
"""
def setUp(self):
self.t = SetTrie([{1, 3}, {1, 3, 5}, {1, 4}, {1, 2, 4}, {2, 4}, {2, 3, 5}])
def test_print(self):
expected = """None
1
2
4#
3#
5#
4#
2
3
5#
4#
"""
from io import StringIO
outp = StringIO()
self.t.printtree(stream=outp)
self.assertEqual(outp.getvalue(), expected)
def test_iter(self):
a = []
for s in self.t:
a.append(s)
self.assertEqual(a, [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}])
def test_iter2(self):
it = iter(self.t)
for s in it:
pass
self.assertRaises(StopIteration, it.__next__)
def test_iter3(self):
t2 = SetTrie()
it = iter(t2)
self.assertRaises(StopIteration, it.__next__)
def test_aslist(self):
self.assertEqual(self.t.aslist(), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}])
def test_str(self):
self.assertEqual(str(self.t), "[{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}]")
def test_contains(self):
self.assertTrue(self.t.contains( {1, 3} ))
self.assertFalse(self.t.contains( {1} ))
self.assertTrue(self.t.contains( {1, 3, 5} ))
self.assertFalse(self.t.contains( {1, 3, 5, 7} ))
def test_in(self):
self.assertTrue({1, 3} in self.t)
self.assertFalse({1} in self.t)
self.assertTrue({1, 3, 5} in self.t)
self.assertFalse({1, 3, 5, 7} in self.t)
def test_hassuperset(self):
self.assertTrue(self.t.hassuperset({3, 5}))
self.assertFalse(self.t.hassuperset({6}))
self.assertTrue(self.t.hassuperset({1, 2, 4}))
self.assertFalse(self.t.hassuperset({2, 4, 5} ))
def test_supersets(self):
self.assertEqual(self.t.supersets({3, 5}), [{1, 3, 5}, {2, 3, 5}])
self.assertEqual(self.t.supersets({1, 4}), [{1, 2, 4}, {1, 4}])
self.assertEqual(self.t.supersets({1, 3, 5}), [{1, 3, 5}])
self.assertEqual(self.t.supersets({2}), [{1, 2, 4}, {2, 3, 5}, {2, 4}])
self.assertEqual(self.t.supersets({1}), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}])
self.assertEqual(self.t.supersets({1, 2, 5}), [])
self.assertEqual(self.t.supersets({1, 2, 4, 5}), [])
self.assertEqual(self.t.supersets({6}), [])
def test_hassubset(self):
self.assertTrue(self.t.hassubset({1, 2, 3}))
self.assertTrue(self.t.hassubset({2, 3, 4, 5}))
self.assertTrue(self.t.hassubset({1, 4}))
self.assertTrue(self.t.hassubset({2, 3, 5}))
self.assertFalse(self.t.hassubset({3, 4, 5}))
self.assertFalse(self.t.hassubset({6, 7, 8, 9, 1000}))
def test_subsets(self):
self.assertEqual(self.t.subsets({1, 2, 4, 11}), [{1, 2, 4}, {1, 4}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2, 4}), [{1, 2, 4}, {1, 4}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2}), [])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}])
self.assertEqual(self.t.subsets({0, 1, 3, 5}), [{1, 3}, {1, 3, 5}])
self.assertEqual(self.t.subsets({1, 2, 5}), [])
self.assertEqual(self.t.subsets({1, 4}), [{1, 4}]) # :)
self.assertEqual(self.t.subsets({1, 3, 5}), [{1, 3}, {1, 3, 5}])
self.assertEqual(self.t.subsets({1, 3, 5, 111}), [{1, 3}, {1, 3, 5}])
self.assertEqual(self.t.subsets({1, 4, 8}), [{1, 4}])
self.assertEqual(self.t.subsets({2, 3, 4, 5}), [{2, 3, 5}, {2, 4}])
self.assertEqual(self.t.subsets({2, 3, 5, 6}), [{2, 3, 5}])
class TestSetTrieMap(unittest.TestCase):
"""
UnitTest for SetTrieMap class
"""
def setUp(self):
self.t = SetTrieMap([({1, 3}, 'A'), ({1, 3, 5}, 'B'), ({1, 4}, 'C'),
({1, 2, 4}, 'D'), ({2, 4}, 'E'), ({2, 3, 5}, 'F')])
#self.t.printtree()
def test_print(self):
expected = """None
1
2
4: 'D'
3: 'A'
5: 'B'
4: 'C'
2
3
5: 'F'
4: 'E'
"""
from io import StringIO
outp = StringIO()
self.t.printtree(stream=outp)
self.assertEqual(outp.getvalue(), expected)
def test_contains(self):
self.assertTrue(self.t.contains( {1, 3} ))
self.assertFalse(self.t.contains( {1} ))
self.assertTrue(self.t.contains( {1, 3, 5} ))
self.assertFalse(self.t.contains( {1, 3, 5, 7} ))
def test_in(self):
self.assertTrue({1, 3} in self.t)
self.assertFalse({1} in self.t)
self.assertTrue({1, 3, 5} in self.t)
self.assertFalse({1, 3, 5, 7} in self.t)
def test_get(self):
self.assertEqual(self.t.get({1, 3}), 'A')
self.assertEqual(self.t.get({1, 3, 5}), 'B')
self.assertEqual(self.t.get({1, 4}), 'C')
self.assertEqual(self.t.get({1, 2, 4}), 'D')
self.assertEqual(self.t.get({2, 4}), 'E')
self.assertEqual(self.t.get({2, 3, 5}), 'F')
self.assertEqual(self.t.get({1, 2, 3}), None)
self.assertEqual(self.t.get({100, 101, 102}, 0xDEADBEEF), 0xDEADBEEF)
self.assertEqual(self.t.get({}), None)
def test_assign(self):
self.assertEqual(self.t.get({1, 3}), 'A')
self.t.assign({1, 3}, 'AAA')
self.assertEqual(self.t.get({1, 3}), 'AAA')
self.assertEqual(self.t.get({100, 200}), None)
self.t.assign({100, 200}, 'FOO')
self.assertEqual(self.t.get({100, 200}), 'FOO')
self.setUp()
def test_hassuperset(self):
self.assertTrue(self.t.hassuperset({3, 5}))
self.assertFalse(self.t.hassuperset({6}))
self.assertTrue(self.t.hassuperset({1, 2, 4}))
self.assertFalse(self.t.hassuperset({2, 4, 5} ))
def test_supersets(self):
self.assertEqual(self.t.supersets({3, 5}), [({1, 3, 5}, 'B'), ({2, 3, 5}, 'F')])
self.assertEqual(self.t.supersets({1}), [({1, 2, 4}, 'D'), ({1, 3}, 'A'), ({1, 3, 5}, 'B'), ({1, 4}, 'C')])
self.assertEqual(self.t.supersets({1, 2, 5}), [])
self.assertEqual(self.t.supersets({3, 5}, mode='keys'), [{1, 3, 5}, {2, 3, 5}])
self.assertEqual(self.t.supersets({1}, mode='keys'), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}])
self.assertEqual(self.t.supersets({1, 2, 5}, mode='keys'), [])
self.assertEqual(self.t.supersets({3, 5}, mode='values'), ['B', 'F'])
self.assertEqual(self.t.supersets({1}, mode='values'), ['D', 'A', 'B', 'C'])
self.assertEqual(self.t.supersets({1, 2, 5}, mode='values'), [])
def test_hassubset(self):
self.assertTrue(self.t.hassubset({1, 2, 3}))
self.assertTrue(self.t.hassubset({2, 3, 4, 5}))
self.assertTrue(self.t.hassubset({1, 4}))
self.assertTrue(self.t.hassubset({2, 3, 5}))
self.assertFalse(self.t.hassubset({3, 4, 5}))
self.assertFalse(self.t.hassubset({6, 7, 8, 9, 1000}))
def test_subsets(self):
self.assertEqual(self.t.subsets({1, 2, 4, 11}), [({1, 2, 4}, 'D'), ({1, 4}, 'C'), ({2, 4}, 'E')])
self.assertEqual(self.t.subsets({1, 2, 4}), [({1, 2, 4}, 'D'), ({1, 4}, 'C'), ({2, 4}, 'E')])
self.assertEqual(self.t.subsets({1, 2}), [])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}), [({1, 2, 4}, 'D'),
({1, 3}, 'A'),
({1, 3, 5}, 'B'),
({1, 4}, 'C'),
({2, 3, 5}, 'F'),
({2, 4}, 'E')] )
self.assertEqual(self.t.subsets({0, 1, 3, 5}), [({1, 3}, 'A'), ({1, 3, 5}, 'B')])
self.assertEqual(self.t.subsets({1, 2, 5}), [])
self.assertEqual(self.t.subsets({1, 2, 4, 11}, mode='keys'), [{1, 2, 4}, {1, 4}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2, 4}, mode='keys'), [{1, 2, 4}, {1, 4}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2}, mode='keys'), [])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}, mode='keys'), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}])
self.assertEqual(self.t.subsets({0, 1, 3, 5}, mode='keys'), [{1, 3}, {1, 3, 5}])
self.assertEqual(self.t.subsets({1, 2, 5}, mode='keys'), [])
self.assertEqual(self.t.subsets({1, 2, 4, 11}, mode='values'), ['D', 'C', 'E'])
self.assertEqual(self.t.subsets({1, 2, 4}, mode='values'), ['D', 'C', 'E'])
self.assertEqual(self.t.subsets({1, 2}, mode='values'), [])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}, mode='values'), ['D', 'A', 'B', 'C', 'F', 'E'])
self.assertEqual(self.t.subsets({0, 1, 3, 5}, mode='values'), ['A', 'B'])
self.assertEqual(self.t.subsets({1, 2, 5}, mode='values'), [])
self.assertEqual(self.t.subsets({1, 4}), [({1, 4}, 'C')])
self.assertEqual(self.t.subsets({1, 3, 5}), [({1, 3}, 'A'), ({1, 3, 5}, 'B')])
self.assertEqual(self.t.subsets({1, 3, 5, 111}), [({1, 3}, 'A'), ({1, 3, 5}, 'B')])
self.assertEqual(self.t.subsets({1, 4, 8}), [({1, 4}, 'C')])
self.assertEqual(self.t.subsets({2, 3, 4, 5}), [({2, 3, 5}, 'F'), ({2, 4}, 'E')])
self.assertEqual(self.t.subsets({2, 3, 5, 6}), [({2, 3, 5}, 'F')])
def test_iters(self):
self.assertEqual(self.t.aslist(),
[({1, 2, 4}, 'D'), ({1, 3}, 'A'), ({1, 3, 5}, 'B'), ({1, 4}, 'C'), ({2, 3, 5}, 'F'), ({2, 4}, 'E')] )
self.assertEqual(list(self.t.keys()), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}] )
self.assertEqual(list(self.t.values()), ['D', 'A', 'B', 'C', 'F', 'E'] )
self.assertEqual(list(self.t.__iter__()), list(self.t.keys()))
class TestSetTrieMultiMap(unittest.TestCase):
"""
UnitTest for SetTrieMultiMap class
"""
def setUp(self):
self.t = SetTrieMultiMap([({1, 3}, 'A'), ({1, 3}, 'AA'), ({1, 3, 5}, 'B'), ({1, 4}, 'C'), ({1, 4}, 'CC'),
({1, 2, 4}, 'D'), ({1, 2, 4}, 'DD'), ({2, 4}, 'E'), ({2, 3, 5}, 'F'),
({2, 3, 5}, 'FF'), ({2, 3, 5}, 'FFF')])
def test_aslist(self):
self.assertEqual(self.t.aslist(),
[({1, 2, 4}, 'D'), ({1, 2, 4}, 'DD'), ({1, 3}, 'A'), ({1, 3}, 'AA'), ({1, 3, 5}, 'B'),
({1, 4}, 'C'), ({1, 4}, 'CC'), ({2, 3, 5}, 'F'), ({2, 3, 5}, 'FF'), ({2, 3, 5}, 'FFF'), ({2, 4}, 'E')] )
def test_assign_returned_value(self):
x = SetTrieMultiMap()
self.assertEqual(x.assign({1, 3}, 'A'), 1)
self.assertEqual(x.assign({1, 3}, 'AA'), 2)
self.assertEqual(x.assign({1, 3}, 'A'), 3)
self.assertEqual(x.assign({2, 4, 5}, 'Y'), 1)
def test_count(self):
self.assertEqual(self.t.count({1, 3}), 2)
self.assertEqual(self.t.count({1, 3, 5}), 1)
self.assertEqual(self.t.count({1, 3, 4}), 0)
self.assertEqual(self.t.count({111, 222}), 0)
self.assertEqual(self.t.count({2, 3, 5}), 3)
def test_iterget(self):
self.assertEqual(list(self.t.iterget({1, 3})), ['A', 'AA'])
self.assertEqual(list(self.t.iterget({1, 3, 4})), [])
def test_get(self):
self.assertEqual(self.t.get({1, 3}), ['A', 'AA'])
self.assertEqual(self.t.get({1, 2, 4}), ['D', 'DD'])
self.assertEqual(self.t.get({1, 3, 5}), ['B'])
self.assertEqual(self.t.get({2, 3, 5}), ['F', 'FF', 'FFF'])
self.assertEqual(self.t.get({2, 4}), ['E'])
self.assertEqual(self.t.get({1, 3, 4}), None)
self.assertEqual(self.t.get({44}, []), [])
def test_hassuperset(self):
self.assertTrue(self.t.hassuperset({3, 5}))
self.assertFalse(self.t.hassuperset({6}))
self.assertTrue(self.t.hassuperset({1, 2, 4}))
self.assertFalse(self.t.hassuperset({2, 4, 5} ))
def test_supersets(self):
self.assertEqual(self.t.supersets({3, 5}), [({1, 3, 5}, 'B'), ({2, 3, 5}, 'F'), ({2, 3, 5}, 'FF'), ({2, 3, 5}, 'FFF')])
self.assertEqual(self.t.supersets({3, 5}, mode='values'), ['B', 'F', 'FF', 'FFF'])
self.assertEqual(self.t.supersets({3, 5}, mode='keys'), [{1, 3, 5}, {2, 3, 5}])
self.assertEqual(self.t.supersets({1}), [({1, 2, 4}, 'D'), ({1, 2, 4}, 'DD'), ({1, 3}, 'A'),
({1, 3}, 'AA'), ({1, 3, 5}, 'B'), ({1, 4}, 'C'), ({1, 4}, 'CC')] )
self.assertEqual(self.t.supersets({1}, mode='keys'), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}])
self.assertEqual(self.t.supersets({1}, mode='values'), ['D', 'DD', 'A', 'AA', 'B', 'C', 'CC'])
self.assertEqual(self.t.supersets({1, 2, 5}), [])
self.assertEqual(self.t.supersets({1, 2, 5}, mode='keys'), [])
self.assertEqual(self.t.supersets({1, 2, 5}, mode='values'), [])
def test_hassubset(self):
self.assertTrue(self.t.hassubset({1, 2, 3}))
self.assertTrue(self.t.hassubset({2, 3, 4, 5}))
self.assertTrue(self.t.hassubset({1, 4}))
self.assertTrue(self.t.hassubset({2, 3, 5}))
self.assertFalse(self.t.hassubset({3, 4, 5}))
self.assertFalse(self.t.hassubset({6, 7, 8, 9, 1000}))
def test_subsets(self):
self.assertEqual(self.t.subsets({1, 2, 4, 11}), [({1, 2, 4}, 'D'), ({1, 2, 4}, 'DD'), ({1, 4}, 'C'),
({1, 4}, 'CC'), ({2, 4}, 'E')] )
self.assertEqual(self.t.subsets({1, 2, 4, 11}, mode='keys'), [{1, 2, 4}, {1, 4}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2, 4, 11}, mode='values'), ['D', 'DD', 'C', 'CC', 'E'])
self.assertEqual(self.t.subsets({1, 2, 4}), [({1, 2, 4}, 'D'), ({1, 2, 4}, 'DD'), ({1, 4}, 'C'), ({1, 4}, 'CC'),
({2, 4}, 'E')])
self.assertEqual(self.t.subsets({1, 2, 4}, mode='keys'), [{1, 2, 4}, {1, 4}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2, 4}, mode='values'), ['D', 'DD', 'C', 'CC', 'E'])
self.assertEqual(self.t.subsets({1, 2}), [])
self.assertEqual(self.t.subsets({1, 2}, mode='keys'), [])
self.assertEqual(self.t.subsets({1, 2}, mode='values'), [])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}),
[({1, 2, 4}, 'D'), ({1, 2, 4}, 'DD'), ({1, 3}, 'A'), ({1, 3}, 'AA'), ({1, 3, 5}, 'B'),
({1, 4}, 'C'), ({1, 4}, 'CC'), ({2, 3, 5}, 'F'), ({2, 3, 5}, 'FF'), ({2, 3, 5}, 'FFF'), ({2, 4}, 'E')] )
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}), self.t.aslist())
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}, mode='keys'), list(self.t.keys()))
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}, mode='keys'), [{1, 2, 4}, {1, 3}, {1, 3, 5}, {1, 4}, {2, 3, 5}, {2, 4}])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}, mode='values'),
['D', 'DD', 'A', 'AA', 'B', 'C', 'CC', 'F', 'FF', 'FFF', 'E'])
self.assertEqual(self.t.subsets({1, 2, 3, 4, 5}, mode='values'), list(self.t.values()))
self.assertEqual(self.t.subsets({0, 1, 3, 5}), [({1, 3}, 'A'), ({1, 3}, 'AA'), ({1, 3, 5}, 'B')])
self.assertEqual(self.t.subsets({0, 1, 3, 5}, mode='keys'), [{1, 3}, {1, 3, 5}])
self.assertEqual(self.t.subsets({0, 1, 3, 5}, mode='values'), ['A', 'AA', 'B'])
self.assertEqual(self.t.subsets({1, 2, 5}), [])
self.assertEqual(self.t.subsets({1, 2, 5}, mode='keys'), [])
self.assertEqual(self.t.subsets({1, 2, 5}, mode='values'), [])
self.assertEqual(self.t.subsets({1, 4}), [({1, 4}, 'C'), ({1, 4}, 'CC')])
self.assertEqual(self.t.subsets({1, 3, 5}), [({1, 3}, 'A'), ({1, 3}, 'AA'), ({1, 3, 5}, 'B')])
self.assertEqual(self.t.subsets({1, 3, 5, 111}), [({1, 3}, 'A'), ({1, 3}, 'AA'), ({1, 3, 5}, 'B')])
self.assertEqual(self.t.subsets({1, 4, 8}), [({1, 4}, 'C'), ({1, 4}, 'CC')])
self.assertEqual(self.t.subsets({2, 3, 4, 5}), [({2, 3, 5}, 'F'), ({2, 3, 5}, 'FF'), ({2, 3, 5}, 'FFF'), ({2, 4}, 'E')])
self.assertEqual(self.t.subsets({2, 3, 5, 6}), [({2, 3, 5}, 'F'), ({2, 3, 5}, 'FF'), ({2, 3, 5}, 'FFF')])
# - - - - - - -
# If module is executed from command line, perform tests:
if __name__ == "__main__":
unittest.main(verbosity=2)
| mmihaltz/pysettrie | tests/test_settrie.py | Python | lgpl-3.0 | 15,405 |
big = 2000000 # B = the number below which primes are summed
p = [True] * big # P = whether a number is prime, all are initially true and will later be falsified
print("running sieve...")
s = 0 # S = the sum of primes less than big which begins as 0
for a in range(2, big): # loop A over all divisors less than BIG
if p[a]: # if A is prime
s += a # then add A to S
for b in range(a * a, big, a): # loop over multiples of A from A*A (first relatively prime) less than BIG, inc. by A
p[b] = False # the multiple isn't prime
print(s)
| rck109d/projectEuler | src/euler/p10_sieve.py | Python | lgpl-3.0 | 748 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.utils.decorators import ABCMeta, AbstractBackend, abstractmethod
@AbstractBackend
class ReducedVertices(object, metaclass=ABCMeta):
def __init__(self, space):
pass
@abstractmethod
def append(self, vertex_and_component):
pass
@abstractmethod
def save(self, directory, filename):
pass
@abstractmethod
def load(self, directory, filename):
pass
@abstractmethod
def __getitem__(self, key):
pass
| mathLab/RBniCS | rbnics/backends/abstract/reduced_vertices.py | Python | lgpl-3.0 | 613 |
#
# GdbLib - A Gdb python library.
# Copyright (C) 2012 Fernando Castillo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
def change(values):
pass
def removeFile(path):
index = path.rfind(os.sep)
return path[:index]
| skibyte/gdblib | gdblib/util.py | Python | lgpl-3.0 | 864 |
import string
import uuid
import random
from util import iDict
import exception
# Nothing is validating the Redir section in the xml. I may get nonexistent
# params.
#
__all__ = ['RedirectionManager', 'LocalRedirection', 'RemoteRedirection']
def str2bool(x):
if x is None:
return False
if x.lower() in ('true', 't', '1'):
return True
else:
return False
def strip_xpath(x):
if not x:
return x
if x.startswith('/'):
return ""
else:
return x
def is_identifier(x):
if not x:
return False
else:
if x[0] in string.digits:
return False
else:
return True
def rand_port():
RAND_PORT_MIN = 1025
RAND_PORT_MAX = 65535
return str(random.randint(RAND_PORT_MIN, RAND_PORT_MAX))
class LocalRedirection(object):
"""
listenaddr: Name of parameter to give the IP addr of listening tunnel on local LP
listenport: Name of parameter to give the port of the listening tunnel on local LP
destaddr: Name of parameter that contains the IP of the target computer
destport: Name of parameter that contains the port on the target computer
"""
def __init__(self, protocol, listenaddr, listenport, destaddr, destport,
closeoncompletion="false", name="", srcport=None, srcportlist=None,
srcportrange=None, *trashargs, **trashkargs):
self.protocol = protocol
# XXX - The redirection section really shouldn't have XPath in it.
self.listenaddr = strip_xpath(listenaddr)
self.listenport = strip_xpath(listenport)
self.destaddr = strip_xpath(destaddr)
self.destport = strip_xpath(destport)
self.closeoncompletion = str2bool(closeoncompletion)
self.name = name
self.srcport = strip_xpath(srcport)
self.srcportlist = srcportlist
self.srcportrange = srcportrange
def __repr__(self):
return str(self.__dict__)
class RemoteRedirection(object):
def __init__(self, protocol, listenaddr, destport, closeoncompletion="false",
name="", listenport=None, listenportlist=None,
listenportrange=None, listenportcount=None, destaddr="0.0.0.0",
*trashargs, **trashkargs):
self.protocol = protocol
self.listenaddr = strip_xpath(listenaddr)
self.listenport = strip_xpath(listenport)
self.destaddr = strip_xpath(destaddr)
self.destport = strip_xpath(destport)
self.closeoncompletion = str2bool(closeoncompletion)
self.name = name
# Need conversion?
self.listenportlist = listenportlist
self.listenportrange = listenportrange
self.listenportcount = listenportcount
def __repr__(self):
return str(self.__dict__)
class RedirectionManager:
"""This is something of a misnomer. This is really a redirection manager rather than
a redirection object. This is responsible for taking the defined tunnels in the
plug-in's XML and 'swapping out' the parameters as they pertain to redirection.
A sample redirection section appears as follows:
<redirection>
<!-- (1) The "throwing" tunnel -->
<local name="Launch"
protocol="TCP"
listenaddr="TargetIp" # IP of redirector
listenport="TargetPort" # Port of redirector
destaddr="TargetIp" # IP of target computer
destport="TargetPort" # Port of target computer
closeoncompletion="true"/>
<!-- (2) The "Callin" tunnel -->
<local name="Callin"
protocol="TCP"
listenaddr="TargetIp" # IP on redirector
listenport="CallinPort" # Port on redirector
destaddr="TargetIp" # IP of target callin
destport="ListenPort" # Port of target callin
closeoncompletion="false"/>
<!-- (3) The "callback" tunnel -->
<remote name="Callback"
protocol="TCP"
listenaddr="CallbackIp" # IP opened by egg (last redirector)
listenport="CallbackPort" # Port opened by egg (last redirector)
destport="CallbackLocalPort" # Port for throwing side to listen
closeoncompletion="false"/>
</redirection>
For the "throwing" (launch) tunnel, we:
1: Ask for/retrieve the "Destination IP" and "Destination Port", which default to
the "TargetIp" and "TargetPort" parameters
2: Ask for the "Listen IP" (listenaddr) and "Listen Port" (listenport)
and then swap them in "TargetIp" and "TargetPort"
3: After execution, restore the proper session parameters
* (listenaddr, listenport) = l(destaddr, destport)
For the "callin" tunnel, we:
1: Ask for/retrieve the "Destination IP" and Destination Port", which default to
the "TargetIp" and the "ListenPort" parameters
2: Ask for the "Listen IP" (listenaddr) and "Listen Port" (listenport) and
then swap them into "TargetIp" and "CallinPort" parameters
3: After execution, restore the proper session parameters
* (listenaddr, listenport) = l(destaddr, destport)
For the "callback" tunnel, we:
1: Ask for the Listen IP and Listen Port for which the payload will callback.
This is most likely the last hop redirector IP and a port on it
2: Ask for the Destination IP and Destination Port, which will likely be the
operator workstation. Store the Destination port as "CallbackLocalPort",
basically ignoring the DestinationIp
3: After execution, restore the proper session parameters
* (destaddr, destport) = l(listenaddr, listenport)
"""
def __init__(self, io):
self.io = io
self.active = False
# A place to store parameters for the session. We push the info, run the plug-in
# (with redirection), and then pop the info to restore it
self.session_cache = {}
def on(self):
self.active = True
def off(self):
self.active = False
def is_active(self):
return self.active
def get_status(self):
if self.active:
return "ON"
else:
return "OFF"
def get_session(self, id):
return self.session_cache.get(id)
def pre_exec(self, plugin):
if not plugin.canRedirect():
return 0
if self.is_active():
self.io.print_msg("Redirection ON")
return self.config_redirect(plugin, True)
else:
self.io.print_msg("Redirection OFF")
return self.config_redirect(plugin, False)
def post_exec(self, plugin, id):
if id == 0:
return
# if plugin doesn't do redir, return
try:
stored_session_data = self.session_cache.pop(id)
except KeyError:
return
# Restore the old information to the session
for key,val in stored_session_data['params'].items():
plugin.set(key, val)
def print_session(self, id):
try:
session = self.session_cache[id]
except KeyError:
return
self.io.print_global_redir(session)
"""
Pre plugin execution
"""
def conv_param(self, val, params, session_data={}):
"""Resolve a value from one of session, params, or the hard value"""
try:
# First try to find the session parameter
if val in session_data:
return session_data[val]
# Then try to find the context-specific parameter
if is_identifier(val):
return params[val]
except:
return None
# If it is neither a session or context parameter, return the value as is
return val
def prompt_redir_fake(self, msg, default):
done = None
while not done:
try:
line = self.io.prompt_user(msg, default)
except exception.PromptHelp, err:
self.io.print_warning('No help available')
except exception.PromptErr, err:
raise
except exception.CmdErr, err:
self.io.print_error(err.getErr())
if line:
return line
def prompt_redir(self, plugin, var, msg, default):
"""Prompt for a redirect value and set it in Truantchild"""
done = None
while not done:
try:
line = self.io.prompt_user(msg, default)
plugin.set(var, line)
done = plugin.hasValidValue(var)
except exception.PromptHelp, err:
self.io.print_warning('No help available')
except exception.PromptErr, err:
raise
except exception.CmdErr, err:
self.io.print_error(err.getErr())
return plugin.get(var)
def straight_remote(self, r, plugin):
params = iDict(plugin.getParameters())
lport = self.conv_param(r.listenport, params)
dport = self.conv_param(r.destport, params)
laddr = self.conv_param(r.listenaddr, params)
if None in (lport, dport, laddr):
return
# Do we need to choose a random local port?
# XXX - This won't happen unless lport is 0
if not lport or lport == "0":
lport = rand_port()
# Store off the old values so that we can restore them after the
# plug-in executes
cache = {r.listenaddr : plugin.get(r.listenaddr),
r.listenport : plugin.get(r.listenport),
r.destport : plugin.get(r.destport)}
self.io.print_success("Remote Tunnel - %s" % r.name)
try:
# Modify the plugin and report success
callbackip = self.prompt_redir(plugin, r.listenaddr, 'Listen IP', laddr)
callbackport = self.prompt_redir(plugin, r.listenport, 'Listen Port', lport)
plugin.set(r.destport, callbackport)
self.io.print_success("(%s) Remote %s:%s" % (r.protocol, callbackip, callbackport))
except exception.PromptErr:
self.io.print_error("Aborted by user")
for (var,val) in cache.items():
try:
plugin.set(var, val)
except:
self.io.print_error("Error setting '%s' - May be in an inconsistent state" % var)
raise
def straight_local(self, l, plugin):
"""Effectively just print the straight path to the target"""
params = iDict(plugin.getParameters())
laddr = self.conv_param(l.listenaddr, params)
lport = self.conv_param(l.listenport, params)
if not laddr or not lport:
return
# HACK HACK
# The logic here was previously wrong, which meant that people didn't have to be careful
# about their redirection sections. Until we get them fixed, we need a hack that will
# allow these invalid redirection sections if we try it the valid way and fail
enable_hack = False
try:
cache = {l.destaddr : plugin.get(l.destaddr),
l.destport : plugin.get(l.destport)}
laddr = self.conv_param(l.destaddr, params)
lport = self.conv_param(l.destport, params)
except exception.CmdErr:
enable_hack = True
cache = {l.destaddr : plugin.get(l.listenaddr),
l.destport : plugin.get(l.listenport)}
self.io.print_success("Local Tunnel - %s" % l.name)
try:
if not enable_hack:
targetip = self.prompt_redir(plugin, l.destaddr, 'Destination IP', laddr)
targetport = self.prompt_redir(plugin, l.destport, 'Destination Port', lport)
self.io.print_success("(%s) Local %s:%s" % (l.protocol, targetip, targetport))
else:
targetip = self.prompt_redir(plugin, l.listenaddr, 'Destination IP', laddr)
targetport = self.prompt_redir(plugin, l.listenport, 'Destination Port', lport)
self.io.print_success("(%s) Local %s:%s" % (l.protocol, targetip, targetport))
except exception.PromptErr:
self.io.print_error("Aborted by user")
for (var,val) in cache.items():
try:
plugin.set(var, val)
except:
self.io.print_error("Error setting '%s' - May be in an inconsistent state" % var)
raise
except Exception as e:
self.io.print_error("Error: {0}".format(str(type(e))))
def redirect_remote(self, r, plugin, session_data):
"""(destaddr, destport) = r-xform(listenaddr, listenport)
* Each of the identifiers above specifies a variable for the plug-in
(1) Prompt for Listen IP - Likely the ultimate redirector's IP
(2) Prompt for Listen Port - Likely the ultimate redirector's port
(3) Prompt for Destination - Likely 0.0.0.0
(4) Prompt for Destination Port - Likely a local port
Lookup the variables specified by listenaddr and listenport, transform them with
a given transform function, and substitute the resulting values into the
variables specified by destaddr and destport.
The plug-in will then have to open a port to listen on using the variables
specified by the destnation IP and destination port
"""
params = iDict(plugin.getParameters())
lport = self.conv_param(r.listenport, params, session_data['params'])
dport = self.conv_param(r.destport, params, session_data['params'])
laddr = self.conv_param(r.listenaddr, params, session_data['params'])
daddr = self.conv_param(r.destaddr, params, session_data['params'])
if None in (lport, dport, laddr, daddr):
for p,n in (laddr, r.listenaddr), (lport, r.listenport), (daddr, r.destaddr), (dport, r.destport):
if p == None:
self.io.print_warning("Parameter %s referenced by tunnel %s not found. This tunnel will "
"be ignored" % (n, r.name))
return
if not lport or lport == "0":
lport = rand_port()
self.io.print_success("Remote Tunnel - %s" % r.name)
#
# Prompt the user for the listenaddr and listenport
#
callbackip = self.prompt_redir(plugin, r.listenaddr, 'Listen IP', laddr)
callbackport = self.prompt_redir(plugin, r.listenport, 'Listen Port', lport)
#
# Do the substitution
#
session_data['params'][r.listenaddr] = callbackip
session_data['params'][r.listenport] = callbackport
# Get the other end of the tunnel, where the connection will eventually be made.
# This will likely be, but does not have to be, the local workstation
callbacklocalip = self.prompt_redir_fake('Destination IP', daddr)
if not dport:
dport = callbackport
callbacklocalport = self.prompt_redir(plugin, r.destport, 'Destination Port', dport)
session_data['params'][r.destport] = callbacklocalport
session_data['remote'].append(RemoteRedirection(r.protocol,
callbackip,
callbacklocalport,
listenport=callbackport,
destaddr=callbacklocalip,
name=r.name))
self.io.print_success("(%s) Remote %s:%s -> %s:%s" %
(r.protocol, callbackip, callbackport,
callbacklocalip, callbacklocalport))
def redirect_local(self, l, plugin, session_data):
"""
targetip = Destination IP (on the target)
targetport = Destination Port (on the target)
redirip = IP of the LP
redirport = Port on the LP
"""
# listenaddr - name of variable containing the LP IP
# listenport - name of variable containing the LP Port
# destaddr - name of variable containing the Target IP
# destport - name of variable containing the Target Port
# targetip - IP of the target
# targetport - Port of the target
# redirip - IP of the LP
# redirport - Port of the LP
params = iDict(plugin.getParameters())
# Get the defaults for the user prompt
laddr = self.conv_param(l.listenaddr, params, session_data['params'])
lport = self.conv_param(l.listenport, params, session_data['params'])
daddr = self.conv_param(l.destaddr, params, session_data['params'])
dport = self.conv_param(l.destport, params, session_data['params'])
if None in (laddr, lport, daddr, dport):
for p,n in (laddr, l.listenaddr), (lport, l.listenport), (daddr, l.destaddr), (dport, l.destport):
if p == None:
self.io.print_warning("Parameter %s referenced by tunnel %s not found. This tunnel will "
"be ignored" % (n, l.name))
return
self.io.print_success("Local Tunnel - %s" % l.name)
#
# Get the destination IP and port for the target
#
targetip = self.prompt_redir_fake('Destination IP', daddr)
targetport = self.prompt_redir_fake('Destination Port', dport)
#
# Get the redirection addresses
#
redirip = self.prompt_redir(plugin, l.listenaddr, 'Listen IP', '127.0.0.1')
if not dport:
dport = targetport
redirport = self.prompt_redir(plugin, l.listenport, 'Listen Port', lport)
#
#
#
session_data['params'][l.listenaddr] = targetip
session_data['params'][l.listenport] = targetport
#
# Record the redirection tunnel
#
session_data['local'].append(LocalRedirection(l.protocol, redirip,
redirport, targetip,
targetport, name=l.name))
self.io.print_success("(%s) Local %s:%s -> %s:%s" %
(l.protocol, redirip, redirport, targetip, targetport))
def config_redirect(self, plugin, do_redir):
"""Configure whether the plug-in should perform redirection
plugin - An instance of a plugin
do_redir - Should we do redirection? (True or False)"""
redir = plugin.getRedirection()
# Make a new session dictionary here
session_data = {
'params' : {}, #
'remote' : [], #
'local' : [] #
}
if do_redir:
id = uuid.uuid4()
else:
id = 0
try:
self.io.newline()
self.io.print_success("Configure Plugin Local Tunnels")
for l in redir['local']:
if do_redir:
self.redirect_local(l, plugin, session_data)
else:
self.straight_local(l, plugin)
self.io.newline()
self.io.print_success("Configure Plugin Remote Tunnels")
for r in redir['remote']:
if do_redir:
self.redirect_remote(r, plugin, session_data)
else:
self.straight_remote(r, plugin)
except exception.PromptErr:
for key,val in session_data['params'].items():
plugin.set(key, val)
raise
self.io.newline()
# Store info into the cache so that we can restore it in post_exec
if id:
self.session_cache[id] = session_data
return id
| DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/fuzzbunch/redirection.py | Python | unlicense | 20,247 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Mendel's First Law
Usage:
IPRB.py <input>
IPRB.py (--help | --version)
Options:
-h --help show this help message and exit
-v --version show version and exit
"""
problem_description = """Mendel's First Law
Problem
Probability is the mathematical study of randomly occurring phenomena. We will
model such a phenomenon with a random variable, which is simply a variable that
can take a number of different distinct outcomes depending on the result of an
underlying random process.
For example, say that we have a bag containing 3 red balls and 2 blue balls. If
we let X represent the random variable corresponding to the color of a drawn
ball, then the probability of each of the two outcomes is given by Pr(X=red)=35
and Pr(X=blue)=25.
Random variables can be combined to yield new random variables. Returning to the
ball example, let Y model the color of a second ball drawn from the bag (without
replacing the first ball). The probability of Y being red depends on whether the
first ball was red or blue. To represent all outcomes of X and Y, we therefore
use a probability tree diagram. This branching diagram represents all possible
individual probabilities for X and Y, with outcomes at the endpoints ("leaves")
of the tree. The probability of any outcome is given by the product of
probabilities along the path from the beginning of the tree; see Figure 2 for an
illustrative example.
An event is simply a collection of outcomes. Because outcomes are distinct, the
probability of an event can be written as the sum of the probabilities of its
constituent outcomes. For our colored ball example, let A be the event "Y is
blue." Pr(A) is equal to the sum of the probabilities of two different outcomes:
Pr(X=blue and Y=blue)+Pr(X=red and Y=blue), or 310+110=25 (see Figure 2 above).
Given: Three positive integers k, m, and n, representing a population containing
k+m+n organisms: k individuals are homozygous dominant for a factor, m are
heterozygous, and n are homozygous recessive.
Return: The probability that two randomly selected mating organisms will produce
an individual possessing a dominant allele (and thus displaying the dominant
phenotype). Assume that any two organisms can mate.
Sample Dataset
2 2 2
Sample Output
0.78333
"""
from docopt import docopt
def get_k_m_n(inp_file):
with open(inp_file, 'r') as inp:
k, m, n = inp.readline().strip().split(' ')
return float(k), float(m), float(n)
#TODO: Write elegant, general solution to "marbles-in-jar problem"
def calculate(k, m, n):
first_pop = k + m + n
second_pop = first_pop - 1
kk = k / first_pop * (k - 1) / second_pop
km = k / first_pop * (m) / second_pop + m / first_pop * (k) / second_pop
kn = k / first_pop * (n) / second_pop + n / first_pop * (k) / second_pop
mm = m / first_pop * (m - 1) / second_pop
mn = m / first_pop * (n) / second_pop + n / first_pop * (m) / second_pop
nn = n / first_pop * (n - 1) / second_pop
return kk, km, kn, mm, mn, nn
def main():
k, m, n = get_k_m_n(arguments['<input>'])
#k is homozygous dominant, m heterozygous, and n is homozygous recessive
#There are 6 possible combinations of parentage, though some may not be
#possible if there are only 1 individuals of a type
kk, km, kn, mm, mn, nn = calculate(k, m, n)
certain = kk + km + kn
three_quarter = 0.75 * mm
half = 0.5 * mn
print(certain + three_quarter + half)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.0.1')
main()
| SavinaRoja/challenges | Rosalind/Heredity/IPRB.py | Python | unlicense | 3,585 |
import scrapy
import re
from research.items import ResearchItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class CaltechSpider(scrapy.Spider):
name = "WISC"
allowed_domains = ["cs.wisc.edu"]
start_urls = ["https://www.cs.wisc.edu/research/groups"]
def parse(self, response):
item = ResearchItem()
for sel in response.xpath('//table[@class="views-table cols-2"]'):
item['groupname'] = sel.xpath('caption/text()').extract()[0]
item['proflist'] = []
for selp in sel.xpath('.//div[@class="views-field views-field-name-1"]/span/a'):
tmpname = selp.xpath('text()').extract()
print str(tmpname)
item['proflist'].append(tmpname)
yield item
| doge-search/webdoge | liqian/WISC/research/research/spiders/WISCSpider.py | Python | unlicense | 678 |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
def spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lms_loc_semi
def lms_rq_spray(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lms_rq_spray = self.out_spray / self.out_min_lms_spray
return self.out_lms_rq_spray
def loc_lms_spray(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
self.out_lms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_spray >= 1.0
#self.out_lms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lms_loc_spray
def nds_rq_dry(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_dry
def loc_nds_dry(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
self.out_nds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_dry >= 1.0
#self.out_nds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nds_loc_dry
def nds_rq_semi(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_semi
def loc_nds_semi(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
self.out_nds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_semi >= 1.0
#self.out_nds_loc_semi = exceed_boolean.map(lambda x:
#'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nds_loc_semi
def nds_rq_spray(self):
"""
# Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nds_rq_spray = self.out_spray / self.out_min_nds_spray
return self.out_nds_rq_spray
def loc_nds_spray(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
self.out_nds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_spray >= 1.0
#self.out_nds_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nds_loc_spray
def lds_rq_dry(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_dry
def loc_lds_dry(self):
"""
Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
self.out_lds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_dry >= 1.0
#self.out_lds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lds_loc_dry
def lds_rq_semi(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_semi
def loc_lds_semi(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
self.out_lds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_semi >= 1.0
#self.out_lds_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lds_loc_semi
def lds_rq_spray(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lds_rq_spray = self.out_spray / self.out_min_lds_spray
return self.out_lds_rq_spray
def loc_lds_spray(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_spray]
self.out_lds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_spray >= 1.0
#self.out_lds_loc_spray = exceed_boolean.map(
# lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lds_loc_spray
def min_nms_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
non-listed monocot EC25 and NOAEC
"""
s1 = pd.Series(self.ec25_nonlisted_seedling_emergence_monocot, name='seedling')
s2 = pd.Series(self.ec25_nonlisted_vegetative_vigor_monocot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_nms_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_nms_spray
def min_lms_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
listed monocot EC25 and NOAEC
"""
s1 = pd.Series(self.noaec_listed_seedling_emergence_monocot, name='seedling')
s2 = pd.Series(self.noaec_listed_vegetative_vigor_monocot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_lms_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_lms_spray
def min_nds_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
non-listed dicot EC25 and NOAEC
"""
s1 = pd.Series(self.ec25_nonlisted_seedling_emergence_dicot, name='seedling')
s2 = pd.Series(self.ec25_nonlisted_vegetative_vigor_dicot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_nds_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_nds_spray
def min_lds_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
listed dicot EC25 and NOAEC
"""
s1 = pd.Series(self.noaec_listed_seedling_emergence_dicot, name='seedling')
s2 = pd.Series(self.noaec_listed_vegetative_vigor_dicot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_lds_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_lds_spray | puruckertom/ubertool | ubertool/terrplant/terrplant_functions.py | Python | unlicense | 20,304 |
'''Convert video JSON data into CSV list.
The JSON documents should be from
https://api.twitch.tv/kraken/videos/top?limit=20&offset=0&period=all
'''
import argparse
import csv
import json
import glob
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('directory')
arg_parser.add_argument('csv_filename')
args = arg_parser.parse_args()
with open(args.csv_filename, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['id', 'url', 'date', 'views', 'length'])
for filename in sorted(glob.glob(args.directory + '/*.json')):
with open(filename) as json_file:
doc = json.load(json_file)
for video in doc['videos']:
writer.writerow([video['_id'], video['url'], video['recorded_at'], video['views'], video['length']])
if __name__ == '__main__':
main()
| ArchiveTeam/twitchtv-items | utils/apivideos2csv.py | Python | unlicense | 931 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.is_forced_active'
db.add_column(u'projects_project', 'is_forced_active',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.is_forced_active'
db.delete_column(u'projects_project', 'is_forced_active')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'projects.event': {
'Meta': {'object_name': 'Event'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'events'", 'blank': 'True', 'to': u"orm['projects.Organisation']"}),
'strategy': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'projects.member': {
'Meta': {'object_name': 'Member'},
'availability': ('django.db.models.fields.CharField', [], {'default': "'reader'", 'max_length': '20'}),
'available_after': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_frequency': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '2'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_paid_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_contacted_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'latest_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'offered_help': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'projects_active': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'active_members'", 'blank': 'True', 'to': u"orm['projects.Project']"}),
'projects_interests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'interested_members'", 'blank': 'True', 'to': u"orm['projects.Project']"}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': u"orm['projects.Skill']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': u"orm['projects.MemberType']"}),
'update_from_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member'", 'null': 'True', 'to': u"orm['projects.User']"}),
'working_on': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'working_members'", 'null': 'True', 'to': u"orm['projects.Project']"})
},
u'projects.membertype': {
'Meta': {'object_name': 'MemberType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.organisation': {
'Meta': {'object_name': 'Organisation'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'found_via': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middlemen': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'middleman_organisations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['projects.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'partnered_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'to': u"orm['projects.Project']"}),
'provided_help': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'representatives': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'strategy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organisations'", 'blank': 'True', 'to': u"orm['projects.OrganisationType']"}),
'working_with': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'projects.organisationtype': {
'Meta': {'object_name': 'OrganisationType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'complimenting_color': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'facebook_group': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forced_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'logo_styled': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'logo_thumb': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pm_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'strategy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'projects.projectactivity': {
'Meta': {'object_name': 'ProjectActivity'},
'can_accomodate': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_organisational': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'activities'", 'null': 'True', 'to': u"orm['projects.Project']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'project_activities'", 'symmetrical': 'False', 'through': u"orm['projects.UserActivity']", 'to': u"orm['projects.User']"})
},
u'projects.projectmilestone': {
'Meta': {'object_name': 'ProjectMilestone'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_technical': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'percent': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '3'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['projects.Project']"}),
'target_date': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'projects.projectmotive': {
'Meta': {'object_name': 'ProjectMotive'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'motives'", 'to': u"orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'projects.projectusageexamplestep': {
'Meta': {'object_name': 'ProjectUsageExampleStep'},
'example_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'example_steps'", 'to': u"orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
u'projects.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'skills'", 'blank': 'True', 'to': u"orm['projects.SkillGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.skillgroup': {
'Meta': {'object_name': 'SkillGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.task': {
'Meta': {'object_name': 'Task'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['projects.ProjectActivity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'max_length': '4'})
},
u'projects.update': {
'Meta': {'object_name': 'Update'},
'change': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'projects.user': {
'Meta': {'object_name': 'User'},
'available_after': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'bio': ('django.db.models.fields.TextField', [], {}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'has_confirmed_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': u"orm['projects.Skill']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'projects.useractivity': {
'Meta': {'unique_together': "(('person', 'project_activity'),)", 'object_name': 'UserActivity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_stopped_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'needs_replacement': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['projects.User']"}),
'progress': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'project_activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_activities'", 'to': u"orm['projects.ProjectActivity']"})
},
u'projects.userpointspending': {
'Meta': {'object_name': 'UserPointSpending'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'spendings'", 'to': u"orm['projects.User']"}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '10'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'projects.userprojectpause': {
'Meta': {'unique_together': "(('project', 'person'),)", 'object_name': 'UserProjectPause'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pauses'", 'to': u"orm['projects.User']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pauses'", 'to': u"orm['projects.Project']"})
}
}
complete_apps = ['projects'] | tochev/obshtestvo.bg | projects/migrations/0012_auto__add_field_project_is_forced_active.py | Python | unlicense | 19,324 |
Subsets and Splits