code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# This file contains a class to analyse text in giellatekno xml format
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2013-2014 Børre Gaup <[email protected]>
#
from __future__ import unicode_literals
import os
import unittest
import doctest
from lxml import etree
from lxml import doctestcompare
from corpustools import analyser
from corpustools import parallelize
from corpustools import util
here = os.path.dirname(__file__)
class TestAnalyser(unittest.TestCase):
def setUp(self):
self.a = analyser.Analyser(
'sme',
'xfst',
fst_file=os.path.join(here, 'analyser.xfst'),
disambiguation_analysis_file=os.path.join(here,
'disambiguation.cg3'),
function_analysis_file=os.path.join(here, 'functions.cg3'),
dependency_analysis_file=os.path.join(here, 'dependency.cg3'))
self.a.xml_file = parallelize.CorpusXMLFile(
os.path.join(here, 'smefile.xml'))
def assertXmlEqual(self, got, want):
"""Check if two stringified xml snippets are equal
"""
checker = doctestcompare.LXMLOutputChecker()
if not checker.check_output(want, got, 0):
message = checker.output_difference(
doctest.Example("", want), got, 0).encode('utf-8')
raise AssertionError(message)
def test_raise_on_None_file(self):
with self.assertRaises(TypeError):
analyser.Analyser('sme', 'xfst', None, None, None, None)
def test_raise_on_bad_file(self):
with self.assertRaises(util.ArgumentError):
analyser.Analyser('sme',
'xfst',
fst_file=os.path.join(here, 'notafile'),
disambiguation_analysis_file=os.path.join(here,
'notafile'),
function_analysis_file=os.path.join(here, 'notafile'),
dependency_analysis_file=os.path.join(here, 'notafile'))
def test_sme_ccat_output(self):
"""Test if the ccat output is what we expect it to be
"""
got = self.a.ccat()
want = (
'Muhto gaskkohagaid, ja erenoamážit dalle go lei buolaš, '
'de aggregáhta billánii. ¶\n')
self.assertEqual(got, want.encode('utf8'))
def test_sme_preprocess_output(self):
"""Test if the preprocess output is what we expect it to be
"""
got = self.a.preprocess()
want = (
'Muhto\ngaskkohagaid\n,\nja\nerenoamážit\ndalle go\nlei\n'
'buolaš\n,\nde\naggregáhta\nbillánii\n.\n¶\n')
self.assertEqual(got, want.encode('utf8'))
def test_sme_disambiguation_output(self):
"""Check if disambiguation analysis gives the expected output
"""
self.a.disambiguation_analysis()
got = self.a.get_disambiguation()
want = (
'"<Muhto>"\n\t"muhto" CC <sme> @CVP\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv <sme>\n"<,>"\n\t"," CLB\n"<ja>"\n'
'\t"ja" CC <sme> @CNP\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv <sme>\n"<dalle_go>"\n'
'\t"dalle_go" MWE CS <sme> @CVP\n"<lei>"\n'
'\t"leat" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<buolaš>"\n'
'\t"buolaš" Sem/Wthr N <sme> Sg Nom\n"<,>"\n'
'\t"," CLB\n"<de>"\n\t"de" Adv <sme>\n"<aggregáhta>"\n'
'\t"aggregáhta" N <sme> Sg Nom\n"<billánii>"\n'
'\t"billánit" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<.>"\n'
'\t"." CLB\n\n"<¶>"\n\t"¶" CLB\n\n')
self.assertEqual(got, want.encode('utf8'))
def test_sme_dependency_output(self):
"""Check if disambiguation analysis gives the expected output
"""
self.a.dependency_analysis()
got = self.a.get_dependency()
want = (
'"<Muhto>"\n\t"muhto" CC @CVP #1->1\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv @ADVL> #2->12\n"<,>"\n'
'\t"," CLB #3->4\n"<ja>"\n\t"ja" CC @CNP #4->2\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv @ADVL> #5->12\n"<dalle_go>"\n'
'\t"dalle_go" CS @CVP #6->7\n"<lei>"\n'
'\t"leat" V IV Ind Prt Sg3 @FS-ADVL> #7->12\n"<buolaš>"\n'
'\t"buolaš" N Sg Nom @<SPRED #8->7\n"<,>"\n'
'\t"," CLB #9->6\n"<de>"\n'
'\t"de" Adv @ADVL> #10->12\n"<aggregáhta>"\n'
'\t"aggregáhta" N Sg Nom @SUBJ> #11->12\n"<billánii>"\n'
'\t"billánit" V IV Ind Prt Sg3 @FS-ADVL> #12->0\n"<.>"\n'
'\t"." CLB #13->12\n\n"<¶>"\n\t"¶" CLB #1->1\n\n')
self.assertEqual(got, want.encode('utf8'))
def test_analysisXml(self):
"""Check if the xml is what it is supposed to be
"""
self.a.dependency_analysis()
self.a.get_analysis_xml()
got = self.a.xml_file.get_etree()
want = (
'<document xml:lang="sme" id="no_id">\n'
' <header>\n'
' <title>Internáhtta sosiálalaš giliguovddážin</title>\n'
' <genre code="facta"/>\n'
' <author>\n'
' <person firstname="Abba" lastname="Abbamar" sex="m" '
'born="1900" nationality="nor"/>\n'
' </author>\n'
' <translator>\n'
' <person firstname="Ibba" lastname="Ibbamar" sex="unknown" '
'born="" nationality=""/>\n'
' </translator>\n'
' <translated_from xml:lang="nob"/>\n'
' <year>2005</year>\n'
' <publChannel>\n'
' <publication>\n'
' <publisher>Almmuheaddji OS</publisher>\n'
' </publication>\n'
' </publChannel>\n'
' <wordcount>10</wordcount>\n'
' <availability>\n'
' <free/>\n'
' </availability>\n'
' <submitter name="Børre Gaup" '
'email="[email protected]"/>\n'
' <multilingual>\n'
' <language xml:lang="nob"/>\n'
' </multilingual>\n'
' <origFileName>aarseth_s.htm</origFileName>\n'
' <metadata>\n'
' <uncomplete/>\n'
' </metadata>\n'
' <version>XSLtemplate 1.9 ; file-specific xsl '
'$Revision: 1.3 $; common.xsl $Revision$; </version>\n'
' </header>\n'
' <body><disambiguation><![CDATA["<Muhto>"\n'
'\t"muhto" CC <sme> @CVP\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv <sme>\n"<,>"\n\t"," CLB\n"<ja>"\n'
'\t"ja" CC <sme> @CNP\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv <sme>\n"<dalle_go>"\n'
'\t"dalle_go" MWE CS <sme> @CVP\n"<lei>"\n'
'\t"leat" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<buolaš>"\n'
'\t"buolaš" Sem/Wthr N <sme> Sg Nom\n"<,>"\n'
'\t"," CLB\n"<de>"\n\t"de" Adv <sme>\n"<aggregáhta>"\n'
'\t"aggregáhta" N <sme> Sg Nom\n"<billánii>"\n'
'\t"billánit" V <sme> IV Ind Prt Sg3 @+FMAINV\n"<.>"\n'
'\t"." CLB\n\n"<¶>"\n\t"¶" CLB\n\n]]></disambiguation>'
'<dependency><![CDATA["<Muhto>"\n'
'\t"muhto" CC @CVP #1->1\n"<gaskkohagaid>"\n'
'\t"gaskkohagaid" Adv @ADVL> #2->12\n"<,>"\n'
'\t"," CLB #3->4\n"<ja>"\n\t"ja" CC @CNP #4->2\n"<erenoamážit>"\n'
'\t"erenoamážit" Adv @ADVL> #5->12\n"<dalle_go>"\n'
'\t"dalle_go" CS @CVP #6->7\n"<lei>"\n'
'\t"leat" V IV Ind Prt Sg3 @FS-ADVL> #7->12\n"<buolaš>"\n'
'\t"buolaš" N Sg Nom @<SPRED #8->7\n"<,>"\n'
'\t"," CLB #9->6\n"<de>"\n'
'\t"de" Adv @ADVL> #10->12\n"<aggregáhta>"\n'
'\t"aggregáhta" N Sg Nom @SUBJ> #11->12\n"<billánii>"\n'
'\t"billánit" V IV Ind Prt Sg3 @FS-ADVL> #12->0\n"<.>"\n'
'\t"." CLB #13->12\n\n"<¶>"\n'
'\t"¶" CLB #1->1\n\n]]></dependency></body></document>')
self.maxDiff = None
self.assertEqual(etree.tostring(got, encoding='unicode'), want)
| unhammer/gt-CorpusTools | corpustools/test/test_analyser.py | Python | gpl-3.0 | 8,941 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from pyglet.window.key import user_key
from pyglet.window.mouse import LEFT as MOUSE_LEFT, RIGHT as MOUSE_RIGHT
from mmfparser.player.common import PlayerChild
from mmfparser.player.eventdispatcher import EventDispatcher
DIRECTIONAL_CONTROLS = ('Up', 'Down', 'Left', 'Right')
KEY_LIST = ('Up', 'Down', 'Left', 'Right', 'Button1', 'Button2', 'Button3',
'Button4')
UP, DOWN, LEFT, RIGHT, BUTTON1, BUTTON2, BUTTON3, BUTTON4 = xrange(8)
class Player(PlayerChild, EventDispatcher):
name = ''
keys = None
keyList = None
keyNames = None
pressed_keys = None
lives = None
score = None
controls_ignored = False
use_mouse = False
def initialize(self, control):
controlType = control.getControlType()
if controlType != 'Keyboard':
raise NotImplementedError(
'control type %r unsupported' % controlType)
keys = control.keys
convert = self.player.keyboard.convert
self.keyList = keyList = []
self.keyNames = keyNames = []
for key in (keys.up, keys.down, keys.left, keys.right, keys.button1,
keys.button2, keys.button3, keys.button4):
keyList.append(convert(key.getValue()))
keyNames.append(key.getName())
self.keys = keys = {}
for index, key in enumerate(KEY_LIST):
keys[key] = keyList[index]
self.symbol_to_key = dict([(v, k) for k, v in keys.iteritems()])
self.reset()
self.player.window.push_handlers(
on_key_press = self.key_pressed,
on_key_release = self.key_released
)
self.player.mouse.push_handlers(
on_mouse_press = self.mouse_pressed,
on_mouse_release = self.mouse_released
)
def mouse_pressed(self, x, y, symbol, modifiers):
if self.controls_ignored or not self.use_mouse:
return
if symbol == MOUSE_LEFT:
self.dispatch_event('player_key_pressed', 'Button1')
elif symbol == MOUSE_RIGHT:
self.dispatch_event('player_key_pressed', 'Button2')
def mouse_released(self, x, y, symbol, modifiers):
if self.controls_ignored or not self.use_mouse:
return
if symbol == MOUSE_LEFT:
self.dispatch_event('player_key_released', 'Button1')
elif symbol == MOUSE_RIGHT:
self.dispatch_event('player_key_released', 'Button2')
def key_pressed(self, symbol, modifiers):
if self.controls_ignored:
return
try:
key = self.symbol_to_key[symbol]
if self.use_mouse and key in ('Button1', 'Button2'):
return
self.dispatch_event('player_key_pressed', key)
except KeyError:
pass
def key_released(self, symbol, modifiers):
if self.controls_ignored:
return
try:
key = self.symbol_to_key[symbol]
if self.use_mouse and key in ('Button1', 'Button2'):
return
self.dispatch_event('player_key_released', key)
except KeyError:
pass
def is_down(self, key):
if self.controls_ignored:
return False
if self.use_mouse:
if key == 'Button1':
return self.player.mouse.left
elif key == 'Button2':
return self.player.mouse.right
return self.player.keyboard[self.keys[key]]
def is_down_index(self, value):
if self.controls_ignored:
return False
if self.use_mouse:
if value == BUTTON1:
return self.player.mouse.left
elif value == BUTTON2:
return self.player.mouse.right
return self.player.keyboard[self.keyList[value]]
def set_key(self, index, key):
code = self.player.keyboard.convert(key.getValue())
name_key = KEY_LIST[index]
name = key.getName()
self.keyList[index] = code
self.keyNames[index] = name
self.keys[name_key] = code
self.symbol_to_key[code] = name_key
def set_score(self, value):
self.score = value
self.dispatch_event('score_changed', value)
def set_lives(self, value):
self.lives = value
self.dispatch_event('lives_changed', value)
def reset(self, frame = False):
self.controls_ignored = False
if frame:
return
header = self.player.gameData.header
self.lives = header.initialLives
self.score = header.initialScore
Player.register_event_type('player_key_pressed')
Player.register_event_type('player_key_released')
Player.register_event_type('score_changed')
Player.register_event_type('lives_changed')
class Players(PlayerChild):
items = None
def initialize(self):
header = self.player.gameData.header
self.items = items = []
for control in header.controls.items:
player = self.new(Player)
player.initialize(control)
items.append(player)
def reset(self, frame = False):
for player in self.items:
player.reset(frame) | joaormatos/anaconda | mmfparser/player/players.py | Python | gpl-3.0 | 6,005 |
import os
import logging.config
class MyLogger(object):
# set logging to both file and screen
def __init__(self):
logging.config.fileConfig('../config/logging.conf')
self.logger = logging.getLogger('scrapeforum')
self.logger.addHandler(logging.StreamHandler())
self.errorIndicated = False
def isErrorIndicated(self):
return self.errorIndicated
def debug(self, msg):
self.logger.debug(msg)
def info(self, msg):
self.logger.info(msg)
def warning(self, msg):
self.logger.warning(msg)
def error(self, msg):
self.logger.error(msg)
self.errorIndicated = True
def critical(self, msg):
self.logger.critical(msg)
self.errorIndicated = True
def fatal(self, msg):
self.logger.fatal(msg)
self.errorIndicated = True
| stackingfunctions/scrapeforum | python/src/mylogger.py | Python | gpl-3.0 | 862 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013-2014 by Łukasz Mierzwa
:contact: [email protected]
"""
from __future__ import unicode_literals
from setuptools import setup, find_packages
try:
from pip.req import parse_requirements
from pip.download import PipSession
required = {'install_requires': [str(r.req) for r in parse_requirements(
'requirements.txt', session=PipSession())]}
except ImportError:
required = {}
setup(
name='upaas-common',
version='0.3.2-dev',
license='GPLv3',
description='uPaaS common classes',
author='Łukasz Mierzwa',
author_email='[email protected]',
url='https://github.com/prymitive/upaas-common',
packages=find_packages(exclude=["tests"]),
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
platforms=['Linux'],
**required
)
| prymitive/upaas-common | setup.py | Python | gpl-3.0 | 1,130 |
from .. import NogginConstants
from . import PBConstants
from . import Formations
def sReady(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_READY)
Formations.fReady(team, workingPlay)
def sNoFieldPlayers(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_NO_FIELD_PLAYERS)
Formations.fNoFieldPlayers(team, workingPlay)
def sOneField(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_ONE_FIELD_PLAYER)
# no kickoff formation- would be identical to fOneField
# Formation for ball in our goal box
if shouldUseDubD(team):
Formations.fOneDubD(team, workingPlay)
elif useFinder(team):
Formations.fFinder(team, workingPlay)
else:
Formations.fOneField(team, workingPlay)
def sTwoField(team, workingPlay):
'''
This is our standard strategy. Based around the 2008.
'''
workingPlay.setStrategy(PBConstants.S_TWO_FIELD_PLAYERS)
# Kickoff Formations
if useKickoffFormation(team):
Formations.fKickoff(team, workingPlay)
# Formation for ball in our goal box
elif shouldUseDubD(team):
Formations.fTwoDubD(team, workingPlay)
# ball hasn't been seen by me or teammates in a while
elif useFinder(team):
Formations.fFinder(team, workingPlay)
else:
# Keep a defender and a chaser
Formations.fDefensiveTwoField(team, workingPlay)
def sThreeField(team, workingPlay):
'''
This is our pulled goalie strategy.
'''
workingPlay.setStrategy(PBConstants.S_THREE_FIELD_PLAYERS)
# Kickoff Formations
if useKickoffFormation(team):
Formations.fKickoff(team, workingPlay)
# Formation for ball in our goal box
elif shouldUseDubD(team):
Formations.fThreeDubD(team, workingPlay)
# ball hasn't been seen by me or teammates in a while
elif useFinder(team):
Formations.fFinder(team, workingPlay)
# Standard spread formation
else:
Formations.fThreeField(team, workingPlay)
def sTwoZone(team, workingPlay):
"""
We attempt to keep one robot forward and one back
They become chaser if the ball is closer to them
"""
sTwoField(team, workingPlay)
def sWin(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_WIN)
# Kickoff Formations
if useKickoffFormation(team):
Formations.fKickoff(team,workingPlay)
# Formation for ball in our goal box
elif shouldUseDubD(team):
Formations.fTwoDubD(team, workingPlay)
# ball hasn't been seen by me or teammates in a while
elif useFinder(team):
Formations.fFinder(team, workingPlay)
# Move the defender forward if the ball is close enough to opp goal, then become a middie
elif team.brain.ball.x > PBConstants.S_MIDDIE_DEFENDER_THRESH:
Formations.fNeutralDefenseTwoField(team, workingPlay)
else:
Formations.fDefensiveTwoField(team, workingPlay)
# Add strategies for testing various roles
def sTestDefender(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_DEFENDER)
Formations.fTestDefender(team, workingPlay)
def sTestOffender(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_OFFENDER)
Formations.fTestOffender(team, workingPlay)
def sTestMiddie(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_MIDDIE)
Formations.fTestMiddie(team, workingPlay)
def sTestChaser(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_CHASER)
# Game Ready Setup
if team.brain.gameController.currentState == 'gameReady' or\
team.brain.gameController.currentState =='gameSet':
# team is kicking off
Formations.fReady(team, workingPlay)
else:
Formations.fTestChaser(team, workingPlay)
#not sure this is the best place for these yet...
def useKickoffFormation(team):
if (team.brain.gameController.timeSincePlay() <
PBConstants.KICKOFF_FORMATION_TIME):
return True
else:
return False
def useFinder(team):
if (PBConstants.USE_FINDER and
team.brain.ball.timeSinceSeen() >
PBConstants.FINDER_TIME_THRESH):
return True
else:
return False
def shouldUseDubD(team):
if not PBConstants.USE_DUB_D:
return False
ballY = team.brain.ball.y
ballX = team.brain.ball.x
goalie = team.teammates[0]
return (
( ballY > NogginConstants.MY_GOALBOX_BOTTOM_Y + 5. and
ballY < NogginConstants.MY_GOALBOX_TOP_Y - 5. and
ballX < NogginConstants.MY_GOALBOX_RIGHT_X - 5.) or
( ballY > NogginConstants.MY_GOALBOX_TOP_Y - 5. and
ballY < NogginConstants.MY_GOALBOX_BOTTOM_Y + 5. and
ballX < NogginConstants.MY_GOALBOX_RIGHT_X + 5. and
goalie.isTeammateRole(PBConstants.CHASER) )
)
| northern-bites/nao-man | noggin/playbook/Strategies.py | Python | gpl-3.0 | 4,793 |
"""
Helcio Macedo
Checksum Verifier v1.0
https://github.com/neomacedo/ScriptsUteis
-----------------------------------------------------------
Script used to compare if local file its the same as remote.
"""
import hashlib
import urllib2
import optparse
# Remote address to file
remote_url = 'https://raw.githubusercontent.com/neomacedo/Area51/master/arquivo_teste.txt'
# Local address to the file
local_url = '../../GitHub/Area51/arquivo_teste.txt'
# Method who will return md5 Checksum [Local]
def get_local_md5_sum(url):
try:
return hashlib.md5(open(local_url, 'rb').read()).hexdigest()
except Exception as ex:
print 'Failed to get remote file checksum! \n Exception: ' + str(ex.message)
# Method who will return md5 Checksum [Remote]
def get_remote_md5_sum(url):
try:
# Parse Options
opt = optparse.OptionParser()
opt.add_option('--url', '-u', default=remote_url)
options, args = opt.parse_args()
remote = urllib2.urlopen(options.url)
md5hash = hashlib.md5()
data = remote.read()
md5hash.update(data)
return md5hash.hexdigest()
except Exception as ex:
print 'Failed to get remote file checksum! \n Exception: ' + str(ex.message)
# Main Method
if __name__ == '__main__':
print 'MD5 Local: ' + get_local_md5_sum(local_url)
print 'MD5 Remote: ' + get_remote_md5_sum(remote_url)
if get_local_md5_sum(local_url) == get_remote_md5_sum(remote_url):
print 'Local file its the same as remote file'
# EOF
| neomacedo/ScriptsUteis | Python/checksum_comparator.py | Python | gpl-3.0 | 1,563 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
#
# VidCutter - media cutter & joiner
#
# copyright © 2018 Pete Alexandrou
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import logging
import logging.handlers
import os
import shutil
import signal
import sys
import traceback
from typing import Callable, Optional
from PyQt5.QtCore import (pyqtSlot, QCommandLineOption, QCommandLineParser, QDir, QFileInfo, QProcess,
QProcessEnvironment, QSettings, QSize, QStandardPaths, QTimerEvent, Qt)
from PyQt5.QtGui import (QCloseEvent, QContextMenuEvent, QDragEnterEvent, QDropEvent, QGuiApplication, QMouseEvent,
QResizeEvent, QSurfaceFormat, qt_set_sequence_auto_mnemonic)
from PyQt5.QtWidgets import qApp, QMainWindow, QMessageBox, QSizePolicy
from vidcutter.videoconsole import ConsoleHandler, ConsoleWidget, VideoLogger
from vidcutter.videocutter import VideoCutter
from vidcutter.libs.singleapplication import SingleApplication
from vidcutter.libs.widgets import VCMessageBox
import vidcutter
import vidcutter.libs.mpv as mpv
if sys.platform == 'win32':
from vidcutter.libs.taskbarprogress import TaskbarProgress
# noinspection PyUnresolvedReferences
from PyQt5.QtWinExtras import QWinTaskbarButton
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
class MainWindow(QMainWindow):
EXIT_CODE_REBOOT = 666
TEMP_PROJECT_FILE = 'vidcutter_reboot.vcp'
WORKING_FOLDER = os.path.join(QDir.tempPath(), 'vidcutter')
def __init__(self):
super(MainWindow, self).__init__()
self.video, self.resizeTimer = '', 0
self.parse_cmdline()
self.init_settings()
self.init_logger()
self.init_scale()
self.init_cutter()
self.setWindowTitle(qApp.applicationName())
self.setContentsMargins(0, 0, 0, 0)
self.statusBar().showMessage('Ready')
self.statusBar().setStyleSheet('border: none; padding: 0; margin: 0;')
self.setAcceptDrops(True)
self.show()
if sys.platform == 'win32' and TaskbarProgress.isValidWinVer():
self.win_taskbar_button = QWinTaskbarButton(self)
self.win_taskbar_button.setWindow(self.windowHandle())
self.win_taskbar_button.progress().setVisible(True)
self.win_taskbar_button.progress().setValue(0)
self.console.setGeometry(int(self.x() - (self.width() / 2)), self.y() + int(self.height() / 3), 750, 300)
if not self.video and os.path.isfile(os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)):
self.video = os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)
if self.video:
self.file_opener(self.video)
def init_scale(self) -> None:
screen_size = qApp.desktop().availableGeometry(-1)
self.scale = 'LOW' if screen_size.width() <= 1024 else 'NORMAL'
self.setMinimumSize(self.get_size(self.scale))
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
@pyqtSlot(str)
def file_opener(self, filename: str) -> None:
try:
if QFileInfo(filename).suffix() == 'vcp':
self.cutter.openProject(project_file=filename)
if filename == os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE):
os.remove(os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE))
else:
self.cutter.loadMedia(filename)
except (FileNotFoundError, PermissionError):
QMessageBox.critical(self, 'Error loading file', sys.exc_info()[0])
logging.exception('Error loading file')
qApp.restoreOverrideCursor()
self.restart()
@staticmethod
def get_size(mode: str='NORMAL') -> QSize:
modes = {
'LOW': QSize(800, 425),
'NORMAL': QSize(930, 680),
'HIGH': QSize(1850, 1300)
}
return modes[mode]
def init_logger(self) -> None:
try:
log_path = self.get_app_config_path()
except AttributeError:
if sys.platform == 'win32':
log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower())
elif sys.platform == 'darwin':
log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName().lower())
else:
log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower())
os.makedirs(log_path, exist_ok=True)
self.console = ConsoleWidget(self)
self.consoleLogger = ConsoleHandler(self.console)
handlers = [logging.handlers.RotatingFileHandler(os.path.join(log_path, '%s.log'
% qApp.applicationName().lower()),
maxBytes=1000000, backupCount=1),
self.consoleLogger]
if self.parser.isSet(self.debug_option) or self.verboseLogs:
# noinspection PyTypeChecker
handlers.append(logging.StreamHandler())
logging.setLoggerClass(VideoLogger)
logging.basicConfig(handlers=handlers,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M',
level=logging.INFO)
logging.captureWarnings(capture=True)
sys.excepthook = MainWindow.log_uncaught_exceptions
if os.getenv('DEBUG', False):
logging.info('appconfig folder: {}'.format(log_path))
def init_settings(self) -> None:
try:
settings_path = self.get_app_config_path()
except AttributeError:
if sys.platform == 'win32':
settings_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower())
elif sys.platform == 'darwin':
settings_path = os.path.join(QDir.homePath(), 'Library', 'Preferences',
qApp.applicationName().lower())
else:
settings_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower())
os.makedirs(settings_path, exist_ok=True)
settings_file = '{}.ini'.format(qApp.applicationName().lower())
self.settings = QSettings(os.path.join(settings_path, settings_file), QSettings.IniFormat)
if self.settings.value('geometry') is not None:
self.restoreGeometry(self.settings.value('geometry'))
if self.settings.value('windowState') is not None:
self.restoreState(self.settings.value('windowState'))
self.theme = self.settings.value('theme', 'light', type=str)
self.startupvol = self.settings.value('volume', 100, type=int)
self.verboseLogs = self.settings.value('verboseLogs', 'off', type=str) in {'on', 'true'}
@staticmethod
def log_uncaught_exceptions(cls, exc, tb) -> None:
logging.critical(''.join(traceback.format_tb(tb)))
logging.critical('{0}: {1}'.format(cls, exc))
def parse_cmdline(self) -> None:
self.parser = QCommandLineParser()
self.parser.setApplicationDescription('\nVidCutter - the simplest + fastest media cutter & joiner')
self.parser.addPositionalArgument('video', 'Preload video file', '[video]')
self.parser.addPositionalArgument('project', 'Open VidCutter project file (.vcp)', '[project]')
self.debug_option = QCommandLineOption(['debug'], 'debug mode; verbose console output & logging. '
'This will basically output what is being logged to file to the '
'console stdout. Mainly useful for debugging problems with your '
'system video and/or audio stack and codec configuration.')
self.parser.addOption(self.debug_option)
self.parser.addVersionOption()
self.parser.addHelpOption()
self.parser.process(qApp)
self.args = self.parser.positionalArguments()
if self.parser.isSet(self.debug_option):
os.environ['DEBUG'] = '1'
if len(self.args) > 0:
file_path = QFileInfo(self.args[0]).absoluteFilePath()
if not os.path.exists(file_path):
sys.stderr.write('\nERROR: File not found: %s\n' % file_path)
self.close()
qApp.exit(1)
self.video = file_path
def init_cutter(self) -> None:
self.cutter = VideoCutter(self)
self.cutter.errorOccurred.connect(self.errorHandler)
self.setCentralWidget(self.cutter)
qApp.setWindowIcon(VideoCutter.getAppIcon(encoded=False))
@staticmethod
def get_bitness() -> int:
from struct import calcsize
return calcsize('P') * 8
@pyqtSlot()
def reboot(self) -> None:
if self.cutter.mediaAvailable:
self.cutter.saveProject(reboot=True)
self.save_settings()
qApp.exit(MainWindow.EXIT_CODE_REBOOT)
def save_settings(self) -> None:
self.settings.setValue('lastFolder', self.cutter.lastFolder)
self.settings.setValue('geometry', self.saveGeometry())
self.settings.setValue('windowState', self.saveState())
self.settings.sync()
@pyqtSlot(bool)
def lock_gui(self, locked: bool=True) -> None:
if locked:
qApp.setOverrideCursor(Qt.WaitCursor)
self.cutter.cliplist.setEnabled(False)
self.setEnabled(False)
else:
self.setEnabled(True)
self.cutter.cliplist.setEnabled(True)
qApp.restoreOverrideCursor()
qApp.processEvents()
@property
def flatpak(self) -> bool:
return sys.platform.startswith('linux') and QFileInfo(__file__).absolutePath().startswith('/app/')
def get_app_config_path(self) -> str:
if self.flatpak:
confpath = QProcessEnvironment.systemEnvironment().value('XDG_CONFIG_HOME', '')
if len(confpath):
return confpath
else:
return os.path.join(QDir.homePath(), '.var', 'app', vidcutter.__desktopid__, 'config')
return QStandardPaths.writableLocation(QStandardPaths.AppConfigLocation).replace(
qApp.applicationName(), qApp.applicationName().lower())
@staticmethod
def get_path(path: str=None, override: bool=False) -> str:
if override:
if getattr(sys, 'frozen', False) and getattr(sys, '_MEIPASS', False):
# noinspection PyProtectedMember, PyUnresolvedReferences
return os.path.join(sys._MEIPASS, path)
return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), path)
return ':{}'.format(path)
@pyqtSlot(str)
def errorHandler(self, msg: str, title: str=None) -> None:
qApp.restoreOverrideCursor()
QMessageBox.critical(self, 'An error occurred' if title is None else title, msg, QMessageBox.Ok)
logging.error(msg)
@staticmethod
@pyqtSlot()
def cleanup():
shutil.rmtree(MainWindow.WORKING_FOLDER, ignore_errors=True)
def contextMenuEvent(self, event: QContextMenuEvent) -> None:
if event.reason() in {QContextMenuEvent.Mouse, QContextMenuEvent.Keyboard}:
self.cutter.appmenu.popup(event.globalPos())
super(MainWindow, self).contextMenuEvent(event)
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton and self.cutter.mediaAvailable:
self.cutter.cliplist.clearSelection()
self.cutter.timeCounter.clearFocus()
self.cutter.frameCounter.clearFocus()
# noinspection PyBroadException
try:
if hasattr(self.cutter, 'notify'):
self.cutter.notify.close()
except BaseException:
pass
event.accept()
def dragEnterEvent(self, event: QDragEnterEvent) -> None:
if event.mimeData().hasUrls():
event.accept()
def dropEvent(self, event: QDropEvent) -> None:
filename = event.mimeData().urls()[0].toLocalFile()
self.file_opener(filename)
event.accept()
def resizeEvent(self, event: QResizeEvent) -> None:
try:
if self.isEnabled() and self.cutter.mediaAvailable and self.cutter.thumbnailsButton.isChecked():
if self.cutter.seekSlider.thumbnailsOn:
self.cutter.sliderWidget.setLoader(True)
self.cutter.sliderWidget.hideThumbs()
if self.resizeTimer:
self.killTimer(self.resizeTimer)
self.resizeTimer = self.startTimer(500)
except AttributeError:
pass
def timerEvent(self, event: QTimerEvent) -> None:
try:
self.cutter.seekSlider.reloadThumbs()
self.killTimer(self.resizeTimer)
self.resizeTimer = 0
except AttributeError:
pass
def closeEvent(self, event: QCloseEvent) -> Optional[Callable]:
event.accept()
try:
if not self.isEnabled():
exitwarn = VCMessageBox('Warning', 'Media is currently being processed',
'Are you sure you want to exit now?', parent=self)
exitwarn.addButton('Yes', QMessageBox.NoRole)
cancelbutton = exitwarn.addButton('No', QMessageBox.RejectRole)
exitwarn.exec_()
res = exitwarn.clickedButton()
if res == cancelbutton:
event.ignore()
return
noexit, callback = self.cutter.saveWarning()
if noexit:
event.ignore()
if callback is not None:
return callback()
else:
return
except AttributeError:
logging.exception('warning dialogs on app exit exception', exc_info=True)
self.console.deleteLater()
if hasattr(self, 'cutter'):
self.save_settings()
try:
if hasattr(self.cutter.videoService, 'smartcut_jobs'):
[
self.cutter.videoService.cleanup(job.files.values())
for job in self.cutter.videoService.smartcut_jobs
]
if hasattr(self.cutter, 'mpvWidget'):
self.cutter.mpvWidget.shutdown()
except AttributeError:
pass
try:
qApp.exit(0)
except mpv.MPVError:
pass
def main():
qt_set_sequence_auto_mnemonic(False)
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QGuiApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_Use96Dpi'):
QGuiApplication.setAttribute(Qt.AA_Use96Dpi, True)
if hasattr(Qt, 'AA_ShareOpenGLContexts'):
fmt = QSurfaceFormat()
fmt.setDepthBufferSize(24)
QSurfaceFormat.setDefaultFormat(fmt)
QGuiApplication.setAttribute(Qt.AA_ShareOpenGLContexts, True)
# if sys.platform == 'darwin':
# qApp.setStyle('Fusion')
app = SingleApplication(vidcutter.__appid__, sys.argv)
app.setApplicationName(vidcutter.__appname__)
app.setApplicationVersion(vidcutter.__version__)
app.setDesktopFileName(vidcutter.__desktopid__)
app.setOrganizationDomain(vidcutter.__domain__)
app.setQuitOnLastWindowClosed(True)
win = MainWindow()
win.stylename = app.style().objectName().lower()
app.setActivationWindow(win)
app.messageReceived.connect(win.file_opener)
app.aboutToQuit.connect(MainWindow.cleanup)
exit_code = app.exec_()
if exit_code == MainWindow.EXIT_CODE_REBOOT:
if sys.platform == 'win32':
if hasattr(win.cutter, 'mpvWidget'):
win.close()
QProcess.startDetached('"{}"'.format(qApp.applicationFilePath()))
else:
QProcess.startDetached(' '.join(sys.argv))
sys.exit(exit_code)
if __name__ == '__main__':
main()
| ozmartian/vidcutter | vidcutter/__main__.py | Python | gpl-3.0 | 17,036 |
# This file is part of pyderiv. http://forre.st/pyderiv
#
# pyderiv is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# pyderiv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyderiv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
def f():
a = noise(3)
b = 8 + noise(2)
return a, b, a + b, a /b, a*a, a*b, b*b
def go():
for x in v: print x
for x in v: print E(x)
print
for i in xrange(len(r)):
for j in xrange(len(r)):
print "%6.01f" % cov(r[i], r[j]),
print
from noise import noise, var, cov, cov_matrix, E
r = f()
v = r
go()
print cov_matrix(r)
print
print
import random
import math
def noise(variance):
return random.gauss(0, math.sqrt(variance))
def avg(l):
l = list(l)
return sum(l)/len(l)
def E(x): return x
def cov(i, j):
return avg(r[i]*r[j] for r in samples) - avg(r[i] for r in samples)*avg(r[j] for r in samples)
def var(a): return cov(a, a)
samples = [f() for i in xrange(1000000)]
v = map(avg, zip(*samples))
r = xrange(len(samples[0]))
go()
| forrestv/pyderiv | noise_test.py | Python | gpl-3.0 | 1,536 |
# oppia/api/media.py
from django.conf import settings
from django.contrib.auth import authenticate
from django.http import HttpResponseRedirect, Http404, HttpResponse, JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from oppia.api.publish import get_messages_array
from oppia.av.models import UploadedMedia
from oppia.av import handler
@csrf_exempt
def upload_view(request):
# get the messages to clear possible previous unprocessed messages
get_messages_array(request)
if request.method != 'POST':
return HttpResponse(status=405)
required = ['username', 'password']
validation_errors = []
for field in required:
if field not in request.POST:
validation_errors.append("field '{0}' missing".format(field))
# authenticate user
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(username=username, password=password)
if user is None or not user.is_active:
messages.error(request, "Invalid username/password")
response_data = {
'message': _('Authentication errors'),
'messages': get_messages_array(request)
}
return JsonResponse(response_data, status=401)
if validation_errors:
return JsonResponse({'errors': validation_errors}, status=400, )
result = handler.upload(request, user)
if result['result'] == UploadedMedia.UPLOAD_STATUS_SUCCESS:
media = result['media']
embed_code = media.get_embed_code(request.build_absolute_uri(media.file.url))
return JsonResponse({'embed_code': embed_code}, status=201)
else:
response = {'messages': result['errors']}
return JsonResponse(response, status=400)
| DigitalCampus/django-nurhi-oppia | oppia/api/media.py | Python | gpl-3.0 | 1,856 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canal (seodiv) por Hernan_Ar_c
# ------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host='http://www.seodiv.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Todos", action="todas", url=host,thumbnail='https://s32.postimg.org/544rx8n51/series.png', fanart='https://s32.postimg.org/544rx8n51/series.png'))
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron ='<\/div><img src="([^"]+)".*?\/>.*?'
patron+='<div class="title-topic">([^<]+)<\/div>.*?'
patron +='<div class="sh-topic">([^<]+)<\/div><\/a>.*?'
patron +='<div class="read-more-top"><a href="([^"]+)" style='
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedplot, scrapedurl in matches:
url = host+scrapedurl
title = scrapedtitle.decode('utf-8')
thumbnail = scrapedthumbnail
fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png'
plot = scrapedplot
itemlist.append( Item(channel=item.channel, action="temporadas" ,title=title , url=url, thumbnail=thumbnail, fanart=fanart, plot= plot, contentSerieName=title, extra=''))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
url_base= item.url
patron = '<a class="collapsed" data-toggle="collapse" data-parent="#accordion" href=.*? aria-expanded="false" aria-controls=.*?>([^<]+)<\/a>'
matches = re.compile(patron,re.DOTALL).findall(data)
temp=1
if 'Temporada'in str(matches):
for scrapedtitle in matches:
url = url_base
tempo = re.findall(r'\d+',scrapedtitle)
if tempo:
title ='Temporada'+' '+ tempo[0]
else:
title = scrapedtitle.lower()
thumbnail = item.thumbnail
plot = item.plot
fanart = scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>')
itemlist.append( Item(channel=item.channel, action="episodiosxtemp" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, temp=str(temp),contentSerieName=item.contentSerieName))
temp = temp+1
if config.get_library_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1 = item.extra1, temp=str(temp)))
return itemlist
else:
itemlist=episodiosxtemp(item)
if config.get_library_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1 = item.extra1, temp=str(temp)))
return itemlist
def episodios(item):
logger.debug('pelisalacarta.channels.seodiv episodios')
itemlist = []
templist = temporadas(item)
for tempitem in templist:
logger.debug(tempitem)
itemlist += episodiosxtemp(tempitem)
return itemlist
def episodiosxtemp(item):
logger.debug("pelisalacarta.channels.seodiv episodiosxtemp")
itemlist = []
data = httptools.downloadpage(item.url).data
tempo = item.title
if 'Temporada'in item.title:
item.title = item.title.replace('Temporada', 'temporada')
item.title = item.title.strip()
item.title = item.title.replace(' ','-')
patron ='<li><a href="([^"]+)">.*?(Capitulo|Pelicula).*?([\d]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
idioma = scrapertools.find_single_match(data,' <p><span class="ah-lead-tit">Idioma:</span> <span id="l-vipusk">([^<]+)</span></p>')
for scrapedurl, scrapedtipo, scrapedtitle in matches:
url = host+scrapedurl
title =''
thumbnail = item.thumbnail
plot = item.plot
fanart=''
if 'temporada' in item.title and item.title in scrapedurl and scrapedtipo =='Capitulo' and item.temp !='':
title = item.contentSerieName+' '+item.temp+'x'+scrapedtitle+' ('+idioma+')'
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, fulltitle=item.fulltitle, url=url, thumbnail=item.thumbnail, plot=plot))
if 'temporada' not in item.title and item.title not in scrapedurl and scrapedtipo =='Capitulo' and item.temp =='':
if item.temp == '': temp = '1'
title = item.contentSerieName+' '+temp+'x'+scrapedtitle+' ('+idioma+')'
if '#' not in scrapedurl:
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, fulltitle=item.fulltitle, url=url, thumbnail=item.thumbnail, plot=plot))
if 'temporada' not in item.title and item.title not in scrapedurl and scrapedtipo =='Pelicula':
title = scrapedtipo +' '+scrapedtitle
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, fulltitle=item.fulltitle, url=url, thumbnail=item.thumbnail, plot=plot))
return itemlist
| r0balo/pelisalacarta | python/main-classic/channels/seodiv.py | Python | gpl-3.0 | 5,982 |
import numpy as np
#numpy is used for later classifiers
#Note: this is just a template with all required methods
#text is the text represented as a string
#textName is optional, indicate sthe name of the text, used for debug
#args are aditional arguments for the feature calculator
#debug indicates wheter to display debug info
#f is features
class BaseFeature():
def __init__(self, text, textName="", args=[], debug=True):
self.text = text.lower()
self.args = args
self.debug = debug
self.textName = textName
#Features, not yet calculated
self.f = np.array([])
def debugStart(self):
if self.debug:
print "--BaseFeatures--"
def beginCalc(self):
if self.debug:
print "Feature calculation begining on " + self.textName
print "------"
def endCalc(self):
if self.debug:
print "Feature calculation finished on " + self.textName
print "Features Calculated:"
print self.f
print
def calc(self):
self.debugStart()
self.beginCalc()
#Calculations go here
self.endCalc()
return self.f
def getFeatures(self):
return self.f
def setText(self, text):
if self.debug:
print self.textName + "'s text set."
self.text = text.lower()
def setName(self, name):
if self.debug:
print "Name set to: " + self.textName
| darksteelcode/authorship | features/base.py | Python | gpl-3.0 | 1,482 |
#!/usr/bin/env python
#
# json.py
# TurboHvZ
#
# Copyright (C) 2008 Ross Light
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
A JSON-based API.
Most rules would look like::
@jsonify.when("isinstance(obj, YourClass)")
def jsonify_yourclass(obj):
return [obj.val1, obj.val2]
@jsonify can convert your objects to following types: lists, dicts, numbers and
strings.
"""
__author__ = 'Ross Light'
__date__ = 'March 30, 2008'
__docformat__ = 'reStructuredText'
__all__ = []
from turbojson.jsonify import jsonify
| zombiezen/turbohvz | hvz/json.py | Python | gpl-3.0 | 1,149 |
import mt, os, mimetypes
from time import strftime
class HTTPOut():
class mtEntry():
def __init__(self):
self.html = False
self.css = False
self.js = False
self.data = ""
self.target = ""
def __init__(self, session = None):
self.session = session
self.http_version = ""
self.status = ""
self.cookies = {}
self.headers = {}
self.mt_entrys = [];
self.text_entry = "";
self.binary_entry = "";
self.binary_start = 0;
self.binary_end = 0;
# CSS
def cssFile(self, filename): self.css(self._getFileContents(filename))
def css(self, data):
newEntry = HTTPOut.mtEntry()
newEntry.data = data
newEntry.css = True
self.mt_entrys.append(newEntry)
# Javascript
def jsFunction(self, funcName, *args):
processed_args = []
for arg in args:
if ( isinstance(arg, basestring) ): processed_args.append("\"" + arg.replace("\"", "\\\"") + "\"")
elif ( isinstance(arg, list) or isinstance(arg, dict) ): processed_args.append(str(arg))
else: processed_args.append(str(arg))
self.js(funcName + "(" + ", ".join(processed_args) + ");")
def jsFile(self, filename):
self.js(self._getFileContents(filename))
def js(self, data):
newEntry = HTTPOut.mtEntry()
newEntry.data = data
newEntry.js = True
self.mt_entrys.append(newEntry)
# HTML
def htmlFile(self, filename, target="", append=False): self.html(self._getFileContents(filename), target, append)
def html(self, data, target="", append = False):
newEntry = HTTPOut.mtEntry()
newEntry.data = data
if ( append ): newEntry.target = "+" + target
else: newEntry.target = target
newEntry.html = True
self.mt_entrys.append(newEntry)
def file(self, filepath):
self.binary_entry = filepath
def text(self, data):
self.text_entry += data
def _getFileContents(self, filepath):
if ( os.path.isfile(filepath) ):
f = open(filepath, "rb")
data = f.read()
f.close()
return str(data)
mt.log.error("404 Not Found: " + filepath)
self.status = "404 Not Found"
return None
def append(self, targ):
if ( targ == None ): return
self.cookies.update(targ.cookies)
self.headers.update(targ.headers)
self.mt_entrys.extend(targ.mt_entrys)
self.text_entry += targ.text_entry
if ( targ.http_version != "" ): self.http_version = targ.http_version
if ( targ.status != "" ): self.status = targ.status
if ( targ.binary_entry != "" ): self.binary_entry = targ.binary_entry
def send(self, socket, header_only = False):
self.headers["Date"] = strftime('%a, %d %b %Y %H:%M:%S GMT')
self.headers["Server"] = "metaTower/0.5"
content = ""
socket.settimeout(None)
if ( self.binary_entry != "" ):
if ( os.path.isfile(self.binary_entry) ):
self.status = "200 OK"
binary_size = os.path.getsize(self.binary_entry)
if ( self.binary_end == 0 ): self.binary_end = binary_size - 1
if ( self.binary_start != 0 ) or ( self.binary_end != binary_size - 1 ):
self.status = "206 Partial Content"
self.headers["Content-Range"] = "bytes " + str(self.binary_start) + "-" + str(self.binary_end) + "/" + str(binary_size)
self.headers["Accept-Ranges"] = "bytes"
self.headers["Content-Type"] = mimetypes.guess_type(self.binary_entry)[0]
self.headers["Content-Length"] = str(self.binary_end - self.binary_start + 1)
else:
mt.log.error("404 Not Found: " + self.binary_entry)
self.binary_entry = ""
self.status = "404 Not Found"
content = "404 Not Found."
self.headers["Content-Type"] = "text/plain"
self.headers["Content-Length"] = len(content)
elif ( len(self.mt_entrys) > 0 ):
self.headers["Cache-Control"] = "no-store"
locations = ""
data = ""
for entry in self.mt_entrys:
if ( entry.html ):
locations += "html:" + str(len(data)) + "," + str(len(entry.data)) + "," + entry.target + ";"
data += entry.data
if ( entry.js ):
locations += "js:" + str(len(data)) + "," + str(len(entry.data)) + ";"
data += entry.data
if ( entry.css ):
locations += "css:" + str(len(data)) + "," + str(len(entry.data)) + ";"
data += entry.data
content = "!mt:" + str(len(locations)) + ";" + locations + data
self.headers["Content-Type"] = "text/plain"
self.headers["Content-Length"] = len(content)
elif ( self.text_entry != "" ):
if ( not self.headers.has_key("Content-Type") ):
self.headers["Content-Type"] = "text/plain"
if ( not self.headers.has_key("Content-Length") ):
self.headers["Content-Length"] = len(self.text_entry)
content = self.text_entry
else:
if ( not self.headers.has_key("Content-Length") ):
self.headers["Content-Length"] = 0
# Generate and send the headers.
if ( self.http_version == "" ): self.http_version = "HTTP/1.1"
if ( self.status == "" ): self.status = "200 OK"
headers = self.http_version + " " + self.status + "\r\n"
for key in self.headers.keys():
headers += key + ": " + str(self.headers[key]) + "\r\n"
if ( len(self.cookies) > 0 ):
for key, value in self.cookies.items():
headers += "Set-Cookie: " + key + "=" + value + "\r\n"
headers += "\r\n"
socket.send(headers)
if ( header_only ): return
# send the content.
if ( self.binary_entry != "" ):
f = None
try:
f = open(self.binary_entry, "rb")
f.seek(self.binary_start)
while (self.binary_start <= self.binary_end):
chunk_size = 4096
if ( (self.binary_start+chunk_size) > (self.binary_end) ): chunk_size = (self.binary_end-self.binary_start)+1
chunk = f.read(chunk_size)
if not chunk: break
socket.send(chunk)
self.binary_start += len(chunk)
f.close()
f = None
except Exception as inst:
mt.log.error("Error reading file:" + str(inst))
finally:
if ( f != None ): f.close()
else:
socket.send(content)
| andr3wmac/metaTower | packages/http/HTTPOut.py | Python | gpl-3.0 | 7,153 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-sheet Analyser: Python library which allows to analyse time-sheets.
# Copyright (C) 2017 Carlos Serra Toro.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import matplotlib.pyplot as plt
from matplotlib.dates import MONDAY
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
from TimeSheetAnalyser.utils.misc import time_to_float_time, normalise_number,\
average_sequence
def daily_attendance_plot_presenter(time_sheet):
""" Represent a TimeSheet as a graphic view of all the attendance
entries separately, grouped by days, as a bar plot.
:param time_sheet: An object of type TimeSheet.
:return: True
"""
dates = time_sheet.get_dates(sort=True)
days_in_week = 5
min_hour = 24
max_hour = 0
start_times, end_times = [], []
for date in dates:
start_time = time_to_float_time(time_sheet[date][0].start_time)
end_time = time_to_float_time(time_sheet[date][-1].end_time)
start_times.append(start_time)
end_times.append(end_time)
if int(start_time) < min_hour:
min_hour = int(start_time)
if int(math.ceil(end_time)) > max_hour:
max_hour = int(math.ceil(end_time))
hours_range = [min_hour, min(24, max_hour)]
fig, axes = plt.subplots()
# Plots the starting and ending times for each day.
axes.plot_date(dates, start_times, fmt='og', visible=True)
axes.plot_date(dates, end_times, fmt='or', visible=True)
# Prints the time-spans for each day.
for date in dates:
for time_span in time_sheet[date]:
start_time = time_to_float_time(time_span.start_time)
end_time = time_to_float_time(time_span.end_time)
normalised_start_time = normalise_number(start_time,
input_range=hours_range)
normalised_end_time = normalise_number(end_time,
input_range=hours_range)
axes.axvline(x=date, ymin=normalised_start_time,
ymax=normalised_end_time, color='b')
# Plots the averaged starting & ending times.
average_line_fmt = {'fmt': 'm-', 'linewidth': 2, 'visible': True}
average_start_times = average_sequence(start_times, win_size=days_in_week)
axes.plot_date(dates, average_start_times, **average_line_fmt)
average_end_times = average_sequence(end_times, win_size=days_in_week)
axes.plot_date(dates, average_end_times, **average_line_fmt)
axes.grid(True)
axes.set_ylim(hours_range)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_minor_locator(WeekdayLocator(MONDAY))
axes.xaxis.set_major_formatter(DateFormatter("%B %Y"))
fig.autofmt_xdate()
hours_list = range(hours_range[0], hours_range[1] + 1)
plt.yticks(hours_list, [str(hour) for hour in hours_list],
rotation='horizontal')
plt.show()
return True
| serratoro/timesheet-analyser | TimeSheetAnalyser/presenters/daily_attendance_plot_presenter.py | Python | gpl-3.0 | 3,657 |
#!/usr/bin/env python3
from __future__ import absolute_import
import six.moves.SimpleHTTPServer
from six.moves.BaseHTTPServer import HTTPServer
from six.moves.socketserver import ForkingMixIn
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
import six.moves.urllib.parse
import sys
import os
import os.path
import json
import re
import subprocess
import signal
import time
import errno
import traceback
import collections
import crossrefs
import identifiers
import codesearch
from logger import log
from six.moves import range
def index_path(tree_name):
return config['trees'][tree_name]['index_path']
# Simple globbing implementation, except ^ and $ are also allowed.
def parse_path_filter(filter):
filter = filter.replace('(', '\\(')
filter = filter.replace(')', '\\)')
filter = filter.replace('|', '\\|')
filter = filter.replace('.', '\\.')
def star_repl(m):
if m.group(0) == '*':
return '[^/]*'
else:
return '.*'
filter = re.sub(r'\*\*|\*', star_repl, filter)
filter = filter.replace('?', '.')
def repl(m):
s = m.group(1)
components = s.split(',')
s = '|'.join(components)
return '(' + s + ')'
filter = re.sub('{([^}]*)}', repl, filter)
return filter
def escape_regex(searchString):
# a version of re.escape that doesn't escape every non-ASCII character,
# and therefore doesn't mangle utf-8 encoded characters.
# https://bugzilla.mozilla.org/show_bug.cgi?id=1446220
return re.sub(r"[(){}\[\].*?|^$\\+-]", r"\\\g<0>", searchString)
def parse_search(searchString):
pieces = searchString.split(' ')
result = {}
for i in range(len(pieces)):
if pieces[i].startswith('path:'):
result['pathre'] = parse_path_filter(pieces[i][len('path:'):])
elif pieces[i].startswith('pathre:'):
result['pathre'] = pieces[i][len('pathre:'):]
elif pieces[i].startswith('context:'):
# Require the context to be an integer <= 10.
try:
# This may throw.
context_lines = int(pieces[i][len('context:'):])
context_lines = max(0, context_lines)
context_lines = min(10, context_lines)
result['context_lines'] = context_lines
except:
pass
elif pieces[i].startswith('symbol:'):
result['symbol'] = ' '.join(pieces[i:])[len('symbol:'):].strip().replace('.', '#')
elif pieces[i].startswith('re:'):
result['re'] = (' '.join(pieces[i:]))[len('re:'):]
break
elif pieces[i].startswith('text:'):
result['re'] = escape_regex((' '.join(pieces[i:]))[len('text:'):])
break
elif pieces[i].startswith('id:'):
result['id'] = pieces[i][len('id:'):]
else:
result['default'] = escape_regex(' '.join(pieces[i:]))
break
return result
def is_trivial_search(parsed):
if 'symbol' in parsed:
return False
for k in parsed:
if k == 'context_lines':
continue
if len(parsed[k]) >= 3:
return False
return True
class SearchResults(object):
def __init__(self):
self.results = []
self.qualified_results = []
self.pathre = None
self.compiled = {}
def set_path_filter(self, path):
if not path or path == '.*':
self.pathre = None
return
try:
self.pathre = re.compile(path, re.IGNORECASE)
except re.error:
# In case the pattern is not a valid RE, treat it as literal string.
self.pathre = re.compile(re.escape(path), re.IGNORECASE)
def add_results(self, results):
self.results.append(results)
def add_qualified_results(self, qual, results, modifier):
self.qualified_results.append((qual, results, modifier))
max_count = 1000
max_work = 750
path_precedences = ['normal', 'thirdparty', 'test', 'generated']
key_precedences = ["Files", "IDL", "Definitions", "Assignments", "Uses", "Declarations", "Textual Occurrences"]
def categorize_path(self, path):
'''
Given a path, decide whether it's "normal"/"test"/"generated". These
are the 3 top-level groups by which results are categorized.
These are hardcoded heuristics that probably could be better defined
in the `config.json` metadata, with a means for trees like gecko to be
able to leverage in-tree build meta-information like moz.build and the
various mochitest.ini files, etc.
'''
def is_test(p):
# Except /unit/ and /androidTest/, all other paths contain the substring 'test', so we can exit early
# in case it is not present.
if '/unit/' in p or '/androidTest/' in p:
return True
if 'test' not in p:
return False
return ('/test/' in p or '/tests/' in p or '/mochitest/' in p or 'testing/' in p or
'/jsapi-tests/' in p or '/reftests/' in p or '/reftest/' in p or
'/crashtests/' in p or '/crashtest/' in p or
'/googletest/' in p or '/gtest/' in p or '/gtests/' in p or
'/imptests/' in p)
if '__GENERATED__' in path:
return 'generated'
elif path.startswith('third_party/'):
return "thirdparty"
elif is_test(path):
return 'test'
else:
return 'normal'
def compile_result(self, kind, qual, pathr, line_modifier):
'''
Given path-binned results of a specific analysis `kind` for a
pretty symbol (`qual`), categorize the path into generated/test/normal
and nest the results under a [pathkind, qkind, path] nested key
hierarchy where the values are an array of crossref.rs `SearchResult`
json results plus the line_modifier fixup hack.
Path filtering requested via `set_path_filter` is performed at this
stage.
line_modifier is a (closed-over) fixup function that was passed in to
add_qualified_results that's provided the given `line`. It's only ever
used by identifier_search in order to fixup "bounds" to compensate for
prefix searches.
'''
if qual:
qkind = '%s (%s)' % (kind, qual)
else:
qkind = kind
path = pathr['path']
lines = pathr['lines']
pathkind = self.categorize_path(path)
if self.pathre and not self.pathre.search(path):
return
# compiled is a map {pathkind: {qkind: {path: [(lines, line_modifier)]}}}
kind_results = self.compiled.setdefault(pathkind, collections.OrderedDict()).setdefault(qkind, {})
path_results = kind_results.setdefault(path, ([], line_modifier))
path_results[0].extend(lines)
def sort_compiled(self):
'''
Traverse the `compiled` state in `path_precedences` order, and then
its "qkind" children in their inherent order (which is derived from
the use of `key_precedences` by `get()`), transforming and propagating
the results, applying a `max_count` result limit.
Additional transformations that are performed:
- result de-duplication is performed so that a given (path, line) tuple
can only be emitted once. Because of the intentional order of
`key_precedences` this means that semantic matches should preclude
their results from being duplicated in the more naive text search
results.
- line_modifier's bounds fixups as mentioned in `compile_result` are
applied which helps the bolding logic in the display logic on the
(web) client.
'''
count = 0
line_hash = {}
result = collections.OrderedDict()
for pathkind in self.path_precedences:
for qkind in self.compiled.get(pathkind, []):
paths = list(self.compiled[pathkind][qkind].keys())
paths.sort()
for path in paths:
# see `compile_resulte docs for line_modifier above.
(lines, line_modifier) = self.compiled[pathkind][qkind][path]
lines.sort(key=lambda l: l['lno'])
lines_out = []
for line in lines:
lno = line['lno']
key = (path, lno)
if key in line_hash:
continue
line_hash[key] = True
if line_modifier:
line_modifier(line)
lines_out.append(line)
count += 1
if count == self.max_count:
break
if lines_out or qkind == 'Files':
l = result.setdefault(pathkind, collections.OrderedDict()).setdefault(qkind, [])
l.append({'path': path, 'lines': lines_out})
if count == self.max_count:
break
if count == self.max_count:
break
if count == self.max_count:
break
return result
def get(self, work_limit):
'''
Work-limiting/result-bounding logic to process the returned results,
capping them based on some heuristics. Limiting is performed for each
"key" type (AKA analysis kind), with the harder result limit occurring
in `sort_compiled` where a hard result limit `max_count` is enforced.
See `compile_result` and `sort_compiled` for more info.
'''
# compile_result will categorize each path that it sees.
# It will build a list of paths indexed by pathkind, qkind.
# Later I'll iterate over this, remove dupes, sort, and keep the top ones.
self.qualified_results.sort(key=lambda x: x[0])
for kind in self.key_precedences:
work = 0
for (qual, results, line_modifier) in self.qualified_results:
if work > self.max_work and work_limit:
log('WORK LIMIT HIT')
break
for pathr in results.get(kind, []):
self.compile_result(kind, qual, pathr, line_modifier)
work += 1
for results in self.results:
for pathr in results.get(kind, []):
self.compile_result(kind, None, pathr, None)
work += 1
r = self.sort_compiled()
return r
def search_files(tree_name, path):
pathFile = os.path.join(index_path(tree_name), 'repo-files')
objdirFile = os.path.join(index_path(tree_name), 'objdir-files')
try:
# We set the locale to make grep much faster.
results = subprocess.check_output(['grep', '-Eih', path, pathFile, objdirFile], env={'LC_CTYPE': 'C'}, universal_newlines=True)
except subprocess.CalledProcessError:
return []
results = results.strip().split('\n')
results = [ {'path': f, 'lines': []} for f in results ]
return results[:1000]
def demangle(sym):
try:
return subprocess.check_output(['c++filt', '--no-params', sym], universal_newlines=True).strip()
except subprocess.CalledProcessError:
return sym
def identifier_search(search, tree_name, needle, complete, fold_case):
needle = re.sub(r'\\(.)', r'\1', needle)
pieces = re.split(r'\.|::', needle)
# If the last segment of the search needle is too short, return no results
# because we're worried that would return too many results.
if not complete and len(pieces[-1]) < 3:
return {}
# Fixup closure for use by add_qualified_results to reduce the range of the
# match's bounds to the prefix that was included in the search needle from
# the full bounds of the search result. (So if the search was "foo::bar"
# and we matched "foo::bartab" and "foo::barhat", the idea I guess is that
# only the "bar" portion would be highlighted assuming the bounds
# previously were referencing "bartab" and "barhat".)
def line_modifier(line):
if 'bounds' in line:
(start, end) = line['bounds']
end = start + len(pieces[-1])
line['bounds'] = [start, end]
ids = identifiers.lookup(tree_name, needle, complete, fold_case)
for (i, (qualified, sym)) in enumerate(ids):
if i > 500:
break
q = demangle(sym)
if q == sym:
q = qualified
results = crossrefs.lookup(tree_name, sym)
search.add_qualified_results(q, results, line_modifier)
def get_json_search_results(tree_name, query):
try:
search_string = query['q'][0]
except:
search_string = ''
try:
fold_case = query['case'][0] != 'true'
except:
fold_case = True
try:
regexp = query['regexp'][0] == 'true'
except:
regexp = False
try:
path_filter = query['path'][0]
except:
path_filter = ''
parsed = parse_search(search_string)
# Should we just be leaving this in parsed?
context_lines = 0
if 'context_lines' in parsed:
context_lines = parsed['context_lines']
if path_filter:
parsed['pathre'] = parse_path_filter(path_filter)
if regexp:
if 'default' in parsed:
del parsed['default']
if 're' in parsed:
del parsed['re']
parsed['re'] = search_string
if 'default' in parsed and len(parsed['default']) == 0:
del parsed['default']
if is_trivial_search(parsed):
results = {}
return json.dumps(results)
title = search_string
if not title:
title = 'Files ' + path_filter
search = SearchResults()
work_limit = False
hit_timeout = False
if 'symbol' in parsed:
search.set_path_filter(parsed.get('pathre'))
symbols = parsed['symbol']
title = 'Symbol ' + symbols
search.add_results(crossrefs.lookup(tree_name, symbols))
elif 're' in parsed:
path = parsed.get('pathre', '.*')
(substr_results, timed_out) = codesearch.search(parsed['re'], fold_case, path, tree_name, context_lines)
search.add_results({'Textual Occurrences': substr_results})
hit_timeout |= timed_out
elif 'id' in parsed:
search.set_path_filter(parsed.get('pathre'))
identifier_search(search, tree_name, parsed['id'], complete=True, fold_case=fold_case)
elif 'default' in parsed:
work_limit = True
path = parsed.get('pathre', '.*')
(substr_results, timed_out) = codesearch.search(parsed['default'], fold_case, path, tree_name, context_lines)
search.add_results({'Textual Occurrences': substr_results})
hit_timeout |= timed_out
if 'pathre' not in parsed:
file_results = search_files(tree_name, parsed['default'])
search.add_results({'Files': file_results})
identifier_search(search, tree_name, parsed['default'], complete=False, fold_case=fold_case)
elif 'pathre' in parsed:
path = parsed['pathre']
search.add_results({'Files': search_files(tree_name, path)})
else:
assert False
results = {}
results = search.get(work_limit)
results['*title*'] = title
results['*timedout*'] = hit_timeout
return json.dumps(results)
class Handler(six.moves.SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
pid = os.fork()
if pid:
# Parent process
log('request(handled by %d) %s', pid, self.path)
timedOut = [False]
def handler(signum, frame):
log('timeout %d, killing', pid)
timedOut[0] = True
os.kill(pid, signal.SIGKILL)
signal.signal(signal.SIGALRM, handler)
signal.alarm(15)
t = time.time()
while True:
try:
(pid2, status) = os.waitpid(pid, 0)
break
except OSError as e:
if e.errno != errno.EINTR: raise e
failed = timedOut[0]
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
log('error pid %d - %f', pid, time.time() - t)
failed = True
else:
log('finish pid %d - %f', pid, time.time() - t)
if failed:
self.send_response(504)
self.end_headers()
else:
# Child process
try:
self.process_request()
os._exit(0)
except:
e = traceback.format_exc()
log('exception\n%s', e)
os._exit(1)
def log_request(self, *args):
pass
def process_request(self):
url = six.moves.urllib.parse.urlparse(self.path)
path_elts = url.path.split('/')
# Strip any extra slashes.
path_elts = [ elt for elt in path_elts if elt != '' ]
if len(path_elts) >= 2 and path_elts[1] == 'search':
tree_name = path_elts[0]
query = six.moves.urllib.parse.parse_qs(url.query)
j = get_json_search_results(tree_name, query)
if 'json' in self.headers.get('Accept', ''):
self.generateJson(j)
else:
j = j.replace("</", "<\\/").replace("<script", "<\\script").replace("<!", "<\\!")
template = os.path.join(index_path(tree_name), 'templates/search.html')
self.generateWithTemplate({'{{BODY}}': j, '{{TITLE}}': 'Search'}, template)
elif len(path_elts) >= 2 and path_elts[1] == 'define':
tree_name = path_elts[0]
query = six.moves.urllib.parse.parse_qs(url.query)
symbol = query['q'][0]
results = crossrefs.lookup(tree_name, symbol)
definition = results['Definitions'][0]
filename = definition['path']
lineno = definition['lines'][0]['lno']
url = '/' + tree_name + '/source/' + filename + '#' + str(lineno)
self.send_response(301)
self.send_header("Location", url)
self.end_headers()
else:
return six.moves.SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def generateJson(self, data):
databytes = data.encode('utf-8')
self.send_response(200)
self.send_header("Vary", "Accept")
self.send_header("Content-type", "application/json;charset=utf-8")
self.send_header("Content-Length", str(len(databytes)))
self.end_headers()
self.wfile.write(databytes)
def generateWithTemplate(self, replacements, templateFile):
output = open(templateFile).read()
for (k, v) in replacements.items():
output = output.replace(k, v)
databytes = output.encode('utf-8')
self.send_response(200)
self.send_header("Vary", "Accept")
self.send_header("Content-type", "text/html;charset=utf-8")
self.send_header("Content-Length", str(len(databytes)))
self.end_headers()
self.wfile.write(databytes)
config_fname = sys.argv[1]
status_fname = sys.argv[2]
config = json.load(open(config_fname))
os.chdir(config['mozsearch_path'])
crossrefs.load(config)
codesearch.load(config)
identifiers.load(config)
# We *append* to the status file because other server components
# also write to this file when they are done starting up, and we
# don't want to clobber those messages.
with open(status_fname, "a") as status_out:
status_out.write("router.py loaded\n")
class ForkingServer(ForkingMixIn, HTTPServer):
pass
server_address = ('', 8000)
httpd = ForkingServer(server_address, Handler)
httpd.serve_forever()
| bill-mccloskey/searchfox | router/router.py | Python | mpl-2.0 | 20,106 |
from sqlalchemy.schema import CreateTable
from community_share import store, config, setup
from community_share.models.user import UserReview
config.load_from_environment()
table_sql = CreateTable(UserReview.__table__).compile(store.engine)
print(table_sql)
| seanastephens/communityshare | update_scripts/make_sql.py | Python | mpl-2.0 | 261 |
# This file is part of the Simulation Manager project for VecNet.
# For copyright and licensing information about this project, see the
# NOTICE.txt and LICENSE.md files in its top-level directory; they are
# available at https://github.com/vecnet/simulation-manager
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Tests for the submit_group.py script.
"""
import random
import sys
from crc_nd.utils.test_io import WritesOutputFiles
from django.test import LiveServerTestCase
from mock import patch
from path import path
from vecnet.simulation import ExecutionRequest, sim_model, Simulation, SimulationGroup as SimGroup, submission_status
from .constants import TEST_OUTPUT_ROOT
from .mixins import UsesDatabaseApi
from sim_manager import scripts, working_dirs
from sim_manager.models import SimulationGroup
from sim_manager.scripts import api_urls, batch, input_files, submit_group
from sim_manager.scripts.batch import test_utils
from sim_manager.scripts.constants import SIMULATION_DEFINITION_FILENAME, SIMULATION_SCRIPT
class MainTests(LiveServerTestCase, UsesDatabaseApi, WritesOutputFiles):
"""
Tests for the script's main function.
"""
@classmethod
def setUpClass(cls):
super(MainTests, cls).setUpClass()
cls.setup_database_api_user()
cls.set_output_root(TEST_OUTPUT_ROOT)
working_dirs.TestingApi.use_testing_root()
# Add the scripts package's directory to the module search path so the loading of the batch system in the
# submit_group.py script works. When the script is executed at the command line, the package directory will
# automatically be added to the search path. But here in the test suite, the package is imported, so it's
# directory is not added automatically. Therefore, we explicitly add it.
scripts_dir = path(scripts.__file__).dirname()
sys.path.append(scripts_dir)
cls.simulation_script = scripts_dir / SIMULATION_SCRIPT
@classmethod
def tearDownClass(cls):
cls.remove_database_api_user()
working_dirs.TestingApi.reset_root_to_default()
sys.path.pop()
@patch('sim_manager.scripts.submit_group.BATCH_SYSTEM', batch.MOCK)
def test_run_script(self):
group = SimulationGroup.objects.create(submitter=self.test_user)
self.group_id = group.id
self.assertEqual(group.script_status, submission_status.READY_TO_RUN)
self.sim_group = SimGroup()
simulation_1 = Simulation(model=sim_model.OPEN_MALARIA, model_version='32', id_on_client='349',
output_url='http://ingestor.example.com/output-files/')
simulation_1.input_files['scenario.xml'] = 'http://www.example.com/data/scenarios/1234/scenario.xml'
simulation_2 = Simulation(model=sim_model.EMOD, model_version='1.6', cmd_line_args=['--foo', 'bar'],
id_on_client='350', output_url=simulation_1.output_url)
simulation_2.input_files['config.json'] = 'https://files.vecnet.org/4710584372'
simulation_2.input_files['campaign.json'] = 'https://files.vecnet.org/678109'
self.sim_group.simulations = [simulation_1, simulation_2]
self.execution_request = ExecutionRequest(simulation_group=self.sim_group)
group.setup_working_dir(self.execution_request)
group_url = self.live_server_url + ('/api/v1/sim-groups/%s/' % group.id)
simulations_url = self.live_server_url + '/api/v1/simulations/'
api_urls.write_for_group(group.working_dir, group_url, simulations_url)
self.check_expected_state = self.expect_script_started
group.working_dir.chdir()
self.initialize_output_dir()
stdout = self.get_output_dir() / 'stdout.txt'
with stdout.open('w') as f:
exit_status = submit_group.main('foo', 'bar', stdout=f, test_callback=self.callback)
self.assertEqual(exit_status, 0)
group = SimulationGroup.objects.get(id=group.id)
self.assertEqual(group.script_status, submission_status.SCRIPT_DONE)
def callback(self):
if self.check_expected_state:
self.check_expected_state()
else:
self.fail('callback unexpectedly called')
def expect_script_started(self):
"""
Confirm that the submission script was started.
"""
self.assertGroupScriptStatus(submission_status.STARTED_SCRIPT)
self.check_expected_state = self.expect_cached_files
def expect_cached_files(self):
"""
Confirm that the submission script cached input files.
"""
self.assertGroupScriptStatus(submission_status.CACHING_FILES)
self.assertTrue(input_files.TestingApi.add_to_cache_mock.called)
args, kwargs = input_files.TestingApi.add_to_cache_mock.call_args
self.assertEqual((self.execution_request.input_files,), args)
self.check_expected_state = self.expect_simulation_created
self.simulations_created = 0
test_utils.Mocks.submit_job.reset_mock()
test_utils.Mocks.submit_job.return_value = generate_job_id()
def expect_simulation_created(self):
"""
Confirm that the submission script has created a new simulation in the database.
"""
self.assertGroupScriptStatus(submission_status.SUBMITTING_JOBS)
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.simulation_set.count(), self.simulations_created + 1)
self.simulations_created += 1
# Check that the working directory is set up properly for the simulation that was just created
simulation = group.simulation_set.order_by('created_when').last()
self.assertTrue(simulation.working_dir.isdir())
sim_definition_path = simulation.working_dir / SIMULATION_DEFINITION_FILENAME
self.assertTrue(sim_definition_path.isfile())
sim_definition = Simulation.read_json_file(sim_definition_path)
expected_sim_definition = self.sim_group.simulations[self.simulations_created - 1]
self.assertEqual(sim_definition.model, expected_sim_definition.model)
self.assertEqual(sim_definition.model_version, expected_sim_definition.model_version)
self.assertEqual(sim_definition.input_files, expected_sim_definition.input_files)
self.assertEqual(sim_definition.cmd_line_args, expected_sim_definition.cmd_line_args)
self.assertEqual(sim_definition.id_on_client, expected_sim_definition.id_on_client)
self.assertEqual(sim_definition.output_url, expected_sim_definition.output_url)
# Check that the simulation was submitted to the batch system.
self.assertTrue(test_utils.Mocks.submit_job.called)
args, kwargs = test_utils.Mocks.submit_job.call_args
executable, working_dir, cmd_args = args[0], args[1], args[2:]
self.assertEqual(executable, sys.executable)
self.assertEqual(working_dir, simulation.working_dir)
self.assertEqual(list(cmd_args), [self.simulation_script])
self.assertEqual(simulation.batch_job_id, test_utils.Mocks.submit_job.return_value)
test_utils.Mocks.submit_job.reset_mock()
if self.simulations_created < len(self.sim_group.simulations):
test_utils.Mocks.submit_job.return_value = generate_job_id()
else:
self.check_expected_state = None
def assertGroupScriptStatus(self, expected_status):
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.script_status, expected_status)
def generate_job_id():
return str(random.randint(1, 100000)) | vecnet/simulation-manager | sim_manager/tests/test_submit_group.py | Python | mpl-2.0 | 7,814 |
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""HTML Processors."""
from .processor import Processor
from .text import TextProcessor
import logging
import os
import xmltodict
import json
from xml.parsers.expat import ExpatError
from .html import HTMLProcessor
class XmlProcessor(HTMLProcessor):
"""Processor for XMLdocuments.
When processing, converts document to json one line including all attributes
Immediately processes with TextProcessor after processing.
"""
item_type = "xml"
text_processor = TextProcessor()
def handle_spider_item(self, data, url_object):
"""Immediately process the spider item."""
return self.process(data, url_object)
def handle_queue_item(self, item):
"""Immediately process the queue item."""
result = self.process_file(item.file_path, item.url)
if os.path.exists(item.file_path):
os.remove(item.file_path)
return result
def process(self, data, url_object):
"""Process XML data.
Converts document to json before processing with TextProcessor.
if XML is not well formed, treat it as HTML
"""
logging.info("Process XML %s" % url_object.url)
try:
data = json.dumps(xmltodict.parse(data))
return self.text_processor.process(data, url_object)
except ExpatError:
return super(XmlProcessor,self).process(data,url_object)
Processor.register_processor(XmlProcessor.item_type, XmlProcessor)
| os2webscanner/os2webscanner | scrapy-webscanner/scanners/processors/xml.py | Python | mpl-2.0 | 2,214 |
"""
Usage:
import_localities < Localities.csv
"""
from django.contrib.gis.geos import GEOSGeometry
from django.utils.text import slugify
from ..import_from_csv import ImportFromCSVCommand
from ...utils import parse_nptg_datetime
from ...models import Locality
class Command(ImportFromCSVCommand):
"""
Imports localities from the NPTG
"""
def handle_rows(self, rows):
existing_localities = Locality.objects.defer('search_vector', 'latlong').in_bulk()
slugs = {
locality.slug: locality for locality in existing_localities.values()
}
to_update = []
to_create = []
for row in rows:
modified_at = parse_nptg_datetime(row["ModificationDateTime"])
locality_code = row['NptgLocalityCode']
if locality_code in existing_localities:
locality = existing_localities[locality_code]
if modified_at and modified_at == locality.modified_at:
continue
else:
locality = Locality()
created_at = parse_nptg_datetime(row["CreationDateTime"])
locality.modified_at = modified_at
locality.created_at = created_at
locality.name = row['LocalityName'].replace('\'', '\u2019')
locality.short_name = row['ShortName']
if locality.name == locality.short_name:
locality.short_name = ''
locality.qualifier_name = row['QualifierName']
locality.admin_area_id = row['AdministrativeAreaCode']
locality.latlong = GEOSGeometry(f"SRID=27700;POINT({row['Easting']} {row['Northing']})")
if row['NptgDistrictCode'] == '310': # bogus code seemingly used for localities with no district
locality.district_id = None
else:
locality.district_id = row['NptgDistrictCode']
if locality.id:
to_update.append(locality)
else:
locality.id = locality_code
slug = slugify(locality.get_qualified_name())
locality.slug = slug
i = 0
while locality.slug in slugs:
i += 1
locality.slug = f"{slug}-{i}"
slugs[locality.slug] = locality
to_create.append(locality)
Locality.objects.bulk_update(to_update, fields=[
'name', 'qualifier_name', 'short_name', 'admin_area', 'latlong', 'modified_at', 'created_at', 'district'
], batch_size=100)
Locality.objects.bulk_create(to_create)
| jclgoodwin/bustimes.org.uk | busstops/management/commands/import_localities.py | Python | mpl-2.0 | 2,629 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sqlalchemy as sa
from relengapi.blueprints.tokenauth import types
from relengapi.lib import db
from relengapi.lib.permissions import p
class Token(db.declarative_base('relengapi')):
__tablename__ = 'auth_tokens'
def __init__(self, permissions=None, **kwargs):
if permissions is not None:
kwargs['_permissions'] = ','.join((str(a) for a in permissions))
super(Token, self).__init__(**kwargs)
id = sa.Column(sa.Integer, primary_key=True)
typ = sa.Column(sa.String(4), nullable=False)
description = sa.Column(sa.Text, nullable=False)
user = sa.Column(sa.Text, nullable=True)
disabled = sa.Column(sa.Boolean, nullable=False)
_permissions = sa.Column(sa.Text, nullable=False)
def to_jsontoken(self):
tok = types.JsonToken(id=self.id, typ=self.typ, description=self.description,
permissions=[str(a) for a in self.permissions],
disabled=self.disabled)
if self.user:
tok.user = self.user
return tok
@property
def permissions(self):
token_permissions = [p.get(permissionstr)
for permissionstr in self._permissions.split(',')]
# silently ignore any nonexistent permissions; this allows us to remove unused
# permissions without causing tokens permitting those permissions to fail
# completely
return [a for a in token_permissions if a]
| Callek/build-relengapi | relengapi/blueprints/tokenauth/tables.py | Python | mpl-2.0 | 1,677 |
"""
Django settings for cadasta project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
from .languages import FORM_LANGS # noqa
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@=fy$)xx+6yjo*us@&+m6$14@l-s6#atg(msm=9%)9@%b7l%h('
ALLOWED_HOSTS = ['*']
AUTH_USER_MODEL = 'accounts.User'
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.gis',
'django.contrib.humanize',
'corsheaders',
'core',
'geography',
'accounts',
'organization',
'spatial',
'questionnaires',
'resources',
'buckets',
'party',
'xforms',
'search',
'tasks',
'django_filters',
'crispy_forms',
'parsley',
'widget_tweaks',
'django_countries',
'leaflet',
'rest_framework',
'rest_framework_gis',
'rest_framework.authtoken',
'rest_framework_docs',
'djoser',
'tutelary',
'allauth',
'allauth.account',
'allauth.socialaccount',
'sass_processor',
'simple_history',
'jsonattrs',
'compressor',
'django_otp',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'audit_log.middleware.UserLoggingMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
'accounts.middleware.UserLanguageMiddleware',
'django_otp.middleware.OTPMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_tmp_scoped_token.TokenAuth',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_VERSIONING_CLASS':
'rest_framework.versioning.NamespaceVersioning',
'DEFAULT_VERSION': 'v1',
'EXCEPTION_HANDLER': 'core.views.api.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100,
'HTML_SELECT_CUTOFF': 100,
}
SITE_NAME = 'Cadasta'
BASE_TEMPLATE_DIR = os.path.join(os.path.dirname(BASE_DIR), 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_TEMPLATE_DIR,
os.path.join(BASE_TEMPLATE_DIR, 'allauth')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
},
},
]
AUTHENTICATION_BACKENDS = [
'core.backends.Auth',
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.AuthenticationBackend',
'accounts.backends.PhoneAuthenticationBackend'
]
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
DJOSER = {
'SITE_NAME': SITE_NAME,
'SET_PASSWORD_RETYPE': True,
'PASSWORD_RESET_CONFIRM_RETYPE': True,
'PASSWORD_RESET_CONFIRM_URL':
'account/password/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': 'account/activate/{uid}/{token}',
# 'SEND_ACTIVATION_EMAIL': True,
'SERIALIZERS': {
'set_password_retype': 'accounts.serializers.ChangePasswordSerializer'
}
}
CORS_ORIGIN_ALLOW_ALL = False
LOGIN_REDIRECT_URL = '/dashboard/'
LOGIN_URL = '/account/login/'
LOGOUT_URL = '/account/logout/'
WSGI_APPLICATION = 'config.wsgi.application'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = LOGIN_URL
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
ACCOUNT_FORMS = {
'signup': 'accounts.forms.RegisterForm',
'profile': 'accounts.forms.ProfileForm',
}
ACCOUNT_ADAPTER = 'accounts.adapter.DefaultAccountAdapter'
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGOUT_REDIRECT_URL = LOGIN_URL
ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 86400
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': ('django.contrib.auth.'
'password_validation.UserAttributeSimilarityValidator'),
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 10,
}
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
{
'NAME':
'accounts.validators.CharacterTypePasswordValidator'
},
{
'NAME':
'accounts.validators.EmailSimilarityValidator'
},
]
OSM_ATTRIBUTION = _(
"Base map data © <a href=\"http://openstreetmap.org\">"
"OpenStreetMap</a> contributors under "
"<a href=\"http://opendatacommons.org/licenses/odbl/\">ODbL</a>"
)
DIGITALGLOBE_ATTRIBUTION = _("Imagery © DigitalGlobe")
DIGITALGLOBE_TILESET_URL_FORMAT = (
'https://{{s}}.tiles.mapbox.com/v4/digitalglobe.{}'
'/{{z}}/{{x}}/{{y}}.png?access_token='
'pk.eyJ1IjoiZGlnaXRhbGdsb2JlIiwiYSI6ImNpaHhtenBmZjAzYW1'
'1a2tvY2p3MnpjcGcifQ.vF1gH0mGgK31yeHC1k1Tqw'
)
LEAFLET_CONFIG = {
'TILES': [
(
_("OpenStreetMap"),
'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
{'attribution': OSM_ATTRIBUTION,
'maxZoom': 19}
),
(
_("+Vivid imagery"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('n6ngnadl'),
{'attribution': DIGITALGLOBE_ATTRIBUTION,
'maxZoom': 22}
),
(
_("Recent imagery"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('nal0g75k'),
{'attribution': DIGITALGLOBE_ATTRIBUTION,
'maxZoom': 22}
),
(
_("+Vivid imagery with OpenStreetMap"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('n6nhclo2'),
{'attribution': (OSM_ATTRIBUTION, DIGITALGLOBE_ATTRIBUTION),
'maxZoom': 22}
),
(
_("Recent imagery with OpenStreetMap"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('nal0mpda'),
{'attribution': (OSM_ATTRIBUTION, DIGITALGLOBE_ATTRIBUTION),
'maxZoom': 22}
),
],
'RESET_VIEW': False,
'PLUGINS': {
'draw': {
'js': '/static/leaflet/draw/leaflet.draw.js'
},
'groupedlayercontrol': {
'js': '/static/js/leaflet.groupedlayercontrol.min.js',
'css': '/static/css/leaflet.groupedlayercontrol.min.css'
}
}
}
# Invalid names for Cadasta organizations, projects, and usernames
CADASTA_INVALID_ENTITY_NAMES = ['add', 'new']
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]
LANGUAGES = [
# ('ar', _('Arabic')), (hiding until RTL support fixed)
('en', _('English')),
('fr', _('French')),
# ('de', _('German')), (hiding until translation coverage >= 75%)
('es', _('Spanish')),
('id', _('Indonesian')),
('it', _('Italian')),
('pt', _('Portuguese'))
# ('sw', _('Swahili')), (hiding until translation coverage >= 75%)
]
MEASUREMENT_DEFAULT = 'metric'
MEASUREMENTS = [
('metric', _('Metric')),
('imperial', _('Imperial')),
]
DEFAULT_AVATAR = '/static/img/avatar_sm.jpg'
ACCEPTED_AVATAR_TYPES = ['image/png', 'image/jpeg']
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = '/opt/cadasta/media'
MEDIA_URL = '/media/'
STATIC_ROOT = '/opt/cadasta/static'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
'compressor.finders.CompressorFinder',
)
# Required for bootstrap-sass
# https://github.com/jrief/django-sass-processor
SASS_PRECISION = 8
SASS_PROCESSOR_ROOT = os.path.join(STATIC_ROOT, 'cache')
SASS_PROCESSOR_INCLUDE_DIRS = (
'/opt/cadasta/node_modules',
)
SASS_OUTPUT_STYLE = 'compressed'
# django-compressor
# https://django-compressor.readthedocs.io/en/latest/
# change to false for debug
COMPRESS_ENABLED = True
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
)
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_STORAGE = 'compressor.storage.GzipCompressorFileStorage'
COMPRESS_OUTPUT_DIR = 'cache'
JSONATTRS_SCHEMA_SELECTORS = {
'spatial.spatialunit': (
'project.organization.pk',
'project.pk', 'project.current_questionnaire'
),
'spatial.spatialrelationship': (
'project.organization.pk', 'project.pk',
'project.current_questionnaire'
),
'party.party': (
'project.organization.pk', 'project.pk',
'project.current_questionnaire',
'type'
),
'party.partyrelationship': (
'project.organization.pk', 'project.pk',
'project.current_questionnaire'
),
'party.tenurerelationship': (
'project.organization.pk', 'project.pk',
'project.current_questionnaire'
)
}
ATTRIBUTE_GROUPS = {
'location_attributes': {
'app_label': 'spatial',
'model': 'spatialunit',
'label': 'Location'
},
'party_attributes': {
'app_label': 'party',
'model': 'party',
'label': 'Party'
},
'location_relationship_attributes': {
'app_label': 'spatial',
'model': 'spatialrelationship',
'label': 'Spatial relationship'
},
'party_relationship_attributes': {
'app_label': 'party',
'model': 'partyrelationship',
'label': 'Party relationship'
},
'tenure_relationship_attributes': {
'app_label': 'party',
'model': 'tenurerelationship',
'label': 'Tenure Relationship'
}
}
ICON_URL = ('https://s3-us-west-2.amazonaws.com/cadasta-resources'
'/icons/{}.png')
ICON_LOOKUPS = {
'application/pdf': 'pdf',
'audio/1d-interleaved-parityfec': 'audio',
'audio/32kadpcm': 'audio',
'audio/3gpp': 'audio',
'audio/3gpp2': 'audio',
'audio/ac3': 'audio',
'audio/aac': 'audio',
'audio/aacp': 'audio',
'audio/amr': 'audio',
'audio/amr-wb': 'audio',
'audio/amr-wb+': 'audio',
'audio/aptx': 'audio',
'audio/asc': 'audio',
'audio/ATRAC-ADVANCED-LOSSESS': 'audio',
'audio/ATRAC-X': 'audio',
'audio/ATRAC3': 'audio',
'audio/basic': 'audio',
'audio/BV16': 'audio',
'audio/BV32': 'audio',
'audio/clearmode': 'audio',
'audio/CN': 'audio',
'audio/DAT12': 'audio',
'audio/dls': 'dls',
'audio/dsr-es201108': 'audio',
'audio/dsr-es202050': 'audio',
'audio/dsr-es202211': 'audio',
'audio/dsr-es202212': 'audio',
'audio/DV': 'audio',
'audio/DV14': 'audio',
'audio/eac3': 'audio',
'audio/encaprtp': 'audio',
'audio/EVRC': 'audio',
'audio/EVRC-QCP': 'audio',
'audio/EVRC0': 'audio',
'audio/EVRC1': 'audio',
'audio/EVRCB': 'audio',
'audio/EVRCB0': 'audio',
'audio/EVRCB1': 'audio',
'audio/EVRCNW': 'audio',
'audio/EVRCNW0': 'audio',
'audio/EVRCNW1': 'audio',
'audio/EVRCWB': 'audio',
'audio/EVRCWB0': 'audio',
'audio/EVRCWB1': 'audio',
'audio/EVS': 'audio',
'audio/example': 'audio',
'audio/fwdred': 'audio',
'audio/G711-0': 'audio',
'audio/G719': 'audio',
'audio/G7221': 'audio',
'audio/G722': 'audio',
'audio/G723': 'audio',
'audio/G726-16': 'audio',
'audio/G726-24': 'audio',
'audio/G726-32': 'audio',
'audio/G726-40': 'audio',
'audio/G728': 'audio',
'audio/G729': 'audio',
'audio/G7291': 'audio',
'audio/G729D': 'audio',
'audio/G729E': 'audio',
'audio/GSM': 'audio',
'audio/GSM-EFR': 'audio',
'audio/GSM-HR-08': 'audio',
'audio/iLBC': 'audio',
'audio/ip-mr_v2.5': 'audio',
'audio/L8': 'audio',
'audio/L16': 'audio',
'audio/L20': 'audio',
'audio/L24': 'audio',
'audio/LPC': 'audio',
'audio/mobile-xmf': 'audio',
'audio/MPA': 'audio',
'audio/MP4A-LATM': 'audio',
'audio/mpa-robust': 'audio',
'audio/m4a': 'audio',
'audio/midi': 'audio',
'audio/mpeg1': 'audio',
'audio/MPA2': 'audio',
'audio/mpa-robust3': 'audio',
'audio/mpeg3': 'mp3',
'audio/mpeg': 'mp3',
'audio/mp3': 'mp3',
'audio/mp4': 'mp4',
'audio/mpeg4-generic': 'mp4',
'audio/ogg': 'audio',
'audio/opus': 'audio',
'audio/parityfec': 'audio',
'audio/PCMA': 'audio',
'audio/PCMA-WB': 'audio',
'audio/PCMU': 'audio',
'audio/PCMU-WB': 'audio',
'audio/QCELP': 'audio',
'audio/raptorfec': 'audio',
'audio/RED': 'audio',
'audio/rtp-enc-aescm128': 'audio',
'audio/rtploopback': 'audio',
'audio/rtp-midi': 'audio',
'audio/rtx': 'audio',
'audio/SMV': 'audio',
'audio/SMV0': 'audio',
'audio/SMV-QCP': 'audio',
'audio/sp-midi': 'audio',
'audio/speex': 'audio',
'audio/t140c': 'audio',
'audio/t38': 'audio',
'audio/telephone-event': 'audio',
'audio/tone': 'audio',
'audio/UEMCLIP': 'audio',
'audio/ulpfec': 'audio',
'audio/VDVI': 'audio',
'audio/VMR-WB': 'audio',
'audio/vorbis': 'audio',
'audio/vorbis-config': 'audio',
'audio/wav': 'audio',
'audio/wave': 'audio',
'audio/x-flac': 'audio',
'audio/x-mpeg-3': 'mp3',
'audio/x-midi': 'audio',
'audio/x-wav': 'audio',
'video/mpeg': 'mp3',
'video/x-mpeg': 'mp3',
'video/mp4': 'mp4',
'application/msword': 'doc',
'application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document': 'docx',
'application/msexcel': 'xls',
'application/vnd.ms-excel': 'xls',
'application/vnd.openxmlformats-'
'officedocument.spreadsheetml.sheet': 'xlsx',
'text/xml': 'xml',
'application/xml': 'xml',
'text/csv': 'csv',
'text/plain': 'csv',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/gif': 'gif',
'image/tif': 'tiff',
'image/tiff': 'tiff',
'image/bmp': 'image',
'image/x-windows-bmp': 'image',
'application/gpx+xml': 'gpx',
'application/rtf': 'doc',
'application/x-rtf': 'doc',
'application/postscript': 'doc',
'application/x-troff-msvideo': 'video',
'video/avi': 'avi',
'video/msvideo': 'wmv',
'video/x-msvideo': 'wmv',
'video/x-ms-wmv': 'wmv',
'video/quicktime': 'video',
'application/ogg': 'audio',
'image/svg+xml': 'svg',
'audio/x-ms-wma': 'audio',
'application/vnd.oasis.opendocument.spreadsheet': 'ods',
'application/vnd.oasis.opendocument.text': 'odt',
'application/vnd.oasis.opendocument.presentation': 'odd',
'application/vnd.ms-powerpoint': 'ppt',
'application/vnd.openxmlformats-officedocument.presentationml.'
'presentation': 'pptx',
'application/x-iwork-keynote-sffkey': 'key',
'video/x-m4v': 'mp4',
'video/x-matroska': 'video',
}
MIME_LOOKUPS = {
'gpx': 'application/gpx+xml'
}
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
IMPORTERS = {
'csv': 'organization.importers.csv.CSVImporter',
'xls': 'organization.importers.xls.XLSImporter'
}
ES_SCHEME = 'http'
ES_HOST = 'localhost'
ES_PORT = '9200'
ES_MAX_RESULTS = 10000
TOTP_TOKEN_VALIDITY = 3600
TOTP_DIGITS = 6
SMS_GATEWAY = 'accounts.gateways.FakeGateway'
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_PHONE = '+123'
# Async Tooling
CELERY_BROKER_TRANSPORT = 'sqs' if os.environ.get('SQS') else 'memory'
CELERY_QUEUE_PREFIX = os.environ.get('QUEUE_PREFIX', 'dev')
| Cadasta/cadasta-platform | cadasta/config/settings/default.py | Python | agpl-3.0 | 17,780 |
def main():
PEOPLE = insert_people()
sum_salary_all(PEOPLE)
list_people_by_city(PEOPLE)
def insert_people():
PEOPLE = []
while True:
NAMES = {}
NAMES["name"] = name = raw_input("Inserisci nome ")
NAMES["city"] = city = raw_input("Inseriscci citta ")
NAMES["salary"] = salary = int(raw_input("Inseriscci salario "))
PEOPLE.append(NAMES)
# print ("Name {name}, City {city}, Salary {salary} Annual {annual}".format(**NAMES))
while True:
a = raw_input("Vuoi continuare [Y/n]? ").upper()
if a in ["Y", "N"]:
break
if a == "N":
break
return PEOPLE
def sum_salary_all(list_people):
for p in list_people:
sum_salary_single(p)
def sum_salary_single(list_people):
list_people['annual'] = list_people['salary'] * 13
def list_people_by_city(list_people):
list_city = list_people.sort()
if __name__ == '__main__':
main()
| feroda/lessons-python4beginners | students/2016-09-04/federicofioriti/Epeople.py | Python | agpl-3.0 | 987 |
class Breadcrumb:
def __init__(self, text, url):
self.text = text
self.url = url | alienlike/hypertextual | hypertextual/models/breadcrumb.py | Python | agpl-3.0 | 100 |
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.login()
def login(self):
# do a login here
# self.client.post("/login", {"username":"ellen_key", "password":"education"})
pass
@task(2)
def index(self):
self.client.get("/")
@task(1)
def project1(self):
self.client.get("/app/category/featured/")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait=5000
max_wait=9000 | PyBossa/pybossa-locust | mainandprojects.py | Python | agpl-3.0 | 600 |
import nose
import ckanext.dcatapit.harvesters.utils as utils
eq_ = nose.tools.eq_
ok_ = nose.tools.ok_
csw_harvester_config = {
'dataset_themes': 'OP_DATPRO',
'dataset_places': 'ITA_BZO',
'dataset_languages': '{ITA,DEU}',
'frequency': 'UNKNOWN',
'agents': {
'publisher': {
'code': 'p_bz',
'role': 'publisher',
'code_regex': {
'regex': '\\(([^)]+)\\:([^)]+)\\)',
'groups': [2]
},
'name_regex': {
'regex': '([^(]*)(\\(IPa[^)]*\\))(.+)',
'groups': [1, 3]
}
},
'owner': {
'code': 'p_bz',
'role': 'owner',
'code_regex': {
'regex': '\\(([^)]+)\\:([^)]+)\\)',
'groups': [2]
},
'name_regex': {
'regex': '([^(]*)(\\(IPa[^)]*\\))(.+)',
'groups': [1, 3]
}
},
'author': {
'code': 'p_bz',
'role': 'author',
'code_regex': {
'regex': '\\(([^)]+)\\:([^)]+)\\)',
'groups': [2]
},
'name_regex': {
'regex': '([^(]*)(\\(IPa[^)]*\\))(.+)',
'groups': [1, 3]
}
}
},
'controlled_vocabularies': {
'dcatapit_skos_theme_id': 'theme.data-theme-skos',
'dcatapit_skos_places_id': 'theme.places-skos'
}
}
responsiblePartys = [
{
'organisation-name': 'Provincia Autonoma di Bolzano (IPa: p_bz) - Ripartizione 28 - Natura, paesaggio e sviluppo del territorio',
'role': 'publisher'
}, {
'organisation-name': 'Comune di Bolzano (IPa: c_a952) - Ufficio Sistema Informativo Territoriale',
'role': 'author'
}, {
'organisation-name': 'Comune di Bolzano (IPa: c_a952) - Ufficio Sistema Informativo Territoriale',
'role': 'owner'
}
]
def test_get_responsible_party():
name, code = utils.get_responsible_party(responsiblePartys, csw_harvester_config.get('agents').get('publisher'))
eq_(name, 'Provincia Autonoma di Bolzano - Ripartizione 28 - Natura, paesaggio e sviluppo del territorio')
eq_(code, 'p_bz')
name, code = utils.get_responsible_party(responsiblePartys, csw_harvester_config.get('agents').get('owner'))
eq_(name, 'Comune di Bolzano - Ufficio Sistema Informativo Territoriale')
eq_(code, 'c_a952')
name, code = utils.get_responsible_party(responsiblePartys, csw_harvester_config.get('agents').get('author'))
eq_(name, 'Comune di Bolzano - Ufficio Sistema Informativo Territoriale')
eq_(code, 'c_a952')
| geosolutions-it/ckanext-dcatapit | ckanext/dcatapit/tests/test_harvest_utils.py | Python | agpl-3.0 | 2,692 |
# vim:fileencoding=utf8:et:ts=4:sts=4:sw=4:ft=python
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import (login as _login, logout as _logout,
authenticate)
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.forms.models import model_to_dict
from django.http import (HttpResponse, HttpResponseForbidden,
HttpResponseBadRequest)
from django.views.decorators.cache import cache_page
from django.shortcuts import redirect, render
from django.utils.html import format_html
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django_otp.decorators import otp_required
from openid.extensions.ax import FetchRequest, FetchResponse
from openid.extensions.sreg import SRegRequest, SRegResponse
from openid.server.server import (Server, ProtocolError, EncodingError,
CheckIDRequest, ENCODE_URL,
ENCODE_KVFORM, ENCODE_HTML_FORM)
from passlib.hash import ldap_md5_crypt
from urlparse import urljoin
from okupy import OkupyError
from okupy.accounts.forms import (LoginForm, OpenIDLoginForm, SSLCertLoginForm,
OTPForm, SignupForm, SiteAuthForm,
StrongAuthForm, ProfileSettingsForm,
ContactSettingsForm, EmailSettingsForm,
GentooAccountSettingsForm,
PasswordSettingsForm)
from okupy.accounts.models import LDAPUser, OpenID_Attributes, Queue
from okupy.accounts.openid_store import DjangoDBOpenIDStore
from okupy.common.ldap_helpers import (get_bound_ldapuser,
set_secondary_password,
remove_secondary_password)
from okupy.common.decorators import strong_auth_required, anonymous_required
from okupy.common.log import log_extra_data
from okupy.crypto.ciphers import sessionrefcipher
from okupy.crypto.models import RevokedToken
from okupy.otp import init_otp
from okupy.otp.sotp.models import SOTPDevice
from okupy.otp.totp.models import TOTPDevice
# the following two are for exceptions
import openid.yadis.discover
import openid.fetchers
import django_otp
import hashlib
import io
import ldap
import logging
import qrcode
logger = logging.getLogger('okupy')
logger_mail = logging.getLogger('mail_okupy')
@cache_page(60 * 20)
def lists(request, acc_list):
devlist = LDAPUser.objects.all()
if acc_list == 'devlist':
devlist = devlist.filter(is_developer=True)
elif acc_list == 'former-devlist':
devlist = devlist.filter(is_retired=True)
elif acc_list == 'foundation-members':
devlist = devlist.filter(is_foundation=True)
return render(request, '%s.html' % acc_list, {'devlist': devlist})
@otp_required
def index(request):
ldb_user = LDAPUser.objects.filter(username=request.user.username)
return render(request, 'index.html', {
'ldb_user': ldb_user,
})
def login(request):
""" The login page """
user = None
oreq = request.session.get('openid_request', None)
# this can be POST or GET, and can be null or empty
next = request.REQUEST.get('next') or reverse(index)
is_otp = False
login_form = None
strong_auth_req = 'strong_auth_requested' in request.session
if oreq:
login_form_class = OpenIDLoginForm
elif ('strong_auth_requested' in request.session
and request.user.is_authenticated()):
login_form_class = StrongAuthForm
else:
login_form_class = LoginForm
try:
if request.method != 'POST':
pass
elif 'cancel' in request.POST:
# note: this wipes request.session
_logout(request)
if oreq is not None:
oresp = oreq.answer(False)
return render_openid_response(request, oresp)
elif 'otp_token' in request.POST:
# if user's not authenticated, go back to square one
if not request.user.is_authenticated():
raise OkupyError('OTP verification timed out')
is_otp = True
otp_form = OTPForm(request.POST)
if otp_form.is_valid():
token = otp_form.cleaned_data['otp_token']
else:
raise OkupyError('OTP verification failed')
# prevent replay attacks and race conditions
if not RevokedToken.add(token, request.user):
raise OkupyError('OTP verification failed')
dev = django_otp.match_token(request.user, token)
if not dev:
raise OkupyError('OTP verification failed')
django_otp.login(request, dev)
else:
login_form = login_form_class(request.POST)
if login_form.is_valid():
if login_form_class != StrongAuthForm:
username = login_form.cleaned_data['username']
else:
username = request.user.username
password = login_form.cleaned_data['password']
else:
raise OkupyError('Login failed')
"""
Perform authentication, if it retrieves a user object then
it was successful. If it retrieves None then it failed to login
"""
try:
user = authenticate(
request=request,
username=username,
password=password)
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError(
"Can't contact the LDAP server or the database")
if not user:
raise OkupyError('Login failed')
if oreq:
request.session['auto_logout'] = (
login_form.cleaned_data['auto_logout'])
except OkupyError as error:
messages.error(request, str(error))
if user and user.is_active:
_login(request, user)
# prepare devices, and see if OTP is enabled
init_otp(request)
set_secondary_password(request=request, password=password)
if request.user.is_authenticated():
if (strong_auth_req
and not 'secondary_password' in request.session):
if request.method != 'POST':
messages.info(request, 'You need to type in your password'
+ ' again to perform this action')
else:
if request.user.is_verified():
return redirect(next)
login_form = OTPForm()
is_otp = True
if login_form is None:
login_form = login_form_class()
if is_otp or strong_auth_req:
ssl_auth_form = None
ssl_auth_uri = None
ssh_auth_command = None
else:
encrypted_id = sessionrefcipher.encrypt(request.session)
# TODO: it fails when:
# 1. site is accessed via IP (auth.127.0.0.1),
# 2. HTTP used on non-standard port (https://...:8000).
ssl_auth_form = SSLCertLoginForm({
'session': encrypted_id,
'next': request.build_absolute_uri(next),
'login_uri': request.build_absolute_uri(request.get_full_path()),
})
ssl_auth_host = 'auth.' + request.get_host()
ssl_auth_path = reverse(ssl_auth)
ssl_auth_uri = urljoin('https://' + ssl_auth_host, ssl_auth_path)
if settings.SSH_BIND[1] == 22:
ssh_port_opt = ''
else:
ssh_port_opt = '-p %d ' % settings.SSH_BIND[1]
ssh_auth_command = 'ssh %sauth+%s@%s' % (
ssh_port_opt,
encrypted_id,
request.get_host().split(':')[0])
return render(request, 'login.html', {
'login_form': login_form,
'openid_request': oreq,
'next': next,
'ssl_auth_uri': ssl_auth_uri,
'ssl_auth_form': ssl_auth_form,
'ssh_auth_command': ssh_auth_command,
'is_otp': is_otp,
})
@csrf_exempt
@require_POST
def ssl_auth(request):
""" SSL certificate authentication. """
ssl_auth_form = SSLCertLoginForm(request.POST)
if not ssl_auth_form.is_valid():
return HttpResponseBadRequest('400 Bad Request')
session = ssl_auth_form.cleaned_data['session']
next_uri = ssl_auth_form.cleaned_data['login_uri']
user = authenticate(request=request)
if user and user.is_active:
_login(request, user)
init_otp(request)
if request.user.is_verified(): # OTP disabled
next_uri = ssl_auth_form.cleaned_data['next']
else:
messages.error(request, 'Certificate authentication failed')
# so, django will always start a new session for us. we need to copy
# the data to the original session and preferably flush the new one.
session.update(request.session)
# always logout automatically from SSL-based auth
# it's easy enough to log back in anyway
if 'openid_request' in session:
session['auto_logout'] = True
session.save()
request.session.flush()
return redirect(next_uri)
def logout(request):
""" The logout page """
try:
remove_secondary_password(request)
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
finally:
_logout(request)
return redirect(login)
@anonymous_required
def signup(request):
""" The signup page """
signup_form = None
if request.method == "POST":
signup_form = SignupForm(request.POST)
if signup_form.is_valid():
try:
try:
LDAPUser.objects.get(
username=signup_form.cleaned_data['username'])
except LDAPUser.DoesNotExist:
pass
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
else:
raise OkupyError('Username already exists')
try:
LDAPUser.objects.get(
email__contains=signup_form.cleaned_data['email'])
except LDAPUser.DoesNotExist:
pass
else:
raise OkupyError('Email already exists')
queued_user = Queue(
username=signup_form.cleaned_data['username'],
first_name=signup_form.cleaned_data['first_name'],
last_name=signup_form.cleaned_data['last_name'],
email=signup_form.cleaned_data['email'],
password=signup_form.cleaned_data['password_origin'],
)
try:
queued_user.save()
except IntegrityError:
raise OkupyError('Account is already pending activation')
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact the database")
send_mail(
'%sAccount Activation' % settings.EMAIL_SUBJECT_PREFIX,
'To confirm your email address, please click the \
following link:\n%s' % queued_user.encrypted_id,
'%s' % settings.SERVER_EMAIL,
[signup_form.cleaned_data['email']]
)
messages.info(
request, "You will shortly receive an activation mail")
return redirect(login)
except OkupyError as error:
messages.error(request, str(error))
else:
signup_form = SignupForm()
return render(request, 'signup.html', {
'signup_form': signup_form,
})
@anonymous_required
def activate(request, token):
"""
The page that users get to activate their accounts
It is in the form /activate/$TOKEN
"""
try:
try:
queued = Queue.objects.get(encrypted_id=token)
except (Queue.DoesNotExist, OverflowError, TypeError, ValueError):
raise OkupyError('Invalid URL')
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact the database")
# get max uidNumber
try:
uidnumber = LDAPUser.objects.latest('uid').uid + 1
except LDAPUser.DoesNotExist:
uidnumber = 1
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
# add account to ldap
new_user = LDAPUser(
object_class=settings.AUTH_LDAP_USER_OBJECTCLASS,
last_name=queued.last_name,
full_name='%s %s' % (queued.first_name, queued.last_name),
password=[ldap_md5_crypt.encrypt(queued.password)],
first_name=queued.first_name,
email=[queued.email],
username=queued.username,
uid=uidnumber,
gid=100,
gecos='%s %s' % (queued.first_name, queued.last_name),
home_directory='/home/%s' % queued.username,
ACL=['user.group'],
)
new_user.save()
# remove queued account from DB
queued.delete()
messages.success(
request, "Your account has been activated successfully")
except OkupyError as error:
messages.error(request, str(error))
return redirect(login)
# Settings
@strong_auth_required
@otp_required
def profile_settings(request):
""" Primary account settings, """
with get_bound_ldapuser(request) as user_info:
profile_settings = None
if request.method == "POST":
profile_settings = ProfileSettingsForm(request.POST)
if profile_settings.is_valid():
try:
#birthday = profile_settings.cleaned_data['birthday']
first_name = profile_settings.cleaned_data['first_name']
last_name = profile_settings.cleaned_data['last_name']
if user_info.first_name != first_name:
user_info.first_name = first_name
if user_info.last_name != last_name:
user_info.last_name = last_name
user_info.full_name = '%s %s' % (first_name, last_name)
user_info.gecos = '%s %s' % (first_name, last_name)
"""
if user_info.birthday != birthday:
user_info.birthday = birthday
"""
try:
user_info.save()
except IntegrityError:
pass
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
else:
profile_settings = ProfileSettingsForm()
return render(request, 'settings-profile.html', {
'profile_settings': profile_settings,
'user_info': user_info,
})
@strong_auth_required
@otp_required
def password_settings(request):
""" Password settings """
with get_bound_ldapuser(request) as user_info:
password_settings = None
if request.method == "POST":
password_settings = PasswordSettingsForm(request.POST)
if password_settings.is_valid():
try:
new_password = password_settings.cleaned_data[
'new_password']
new_password_verify = password_settings.cleaned_data[
'new_password_verify']
old_password = password_settings.cleaned_data[
'old_password']
if old_password and (new_password == new_password_verify):
for hash in list(user_info.password):
print hash
try:
if ldap_md5_crypt.verify(old_password, hash):
user_info.password.append(
ldap_md5_crypt.encrypt(
new_password_verify))
user_info.password.remove(hash)
break
except ValueError:
# ignore unknown hashes
pass
try:
user_info.save()
except IntegrityError:
pass
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
else:
password_settings = PasswordSettingsForm()
return render(request, 'settings-password.html', {
'password_settings': password_settings,
'user_info': user_info,
})
@strong_auth_required
@otp_required
def email_settings(request):
""" Email Settings """
with get_bound_ldapuser(request) as user_info:
email_settings = None
if request.method == "POST":
email_settings = EmailSettingsForm(request.POST)
if email_settings.is_valid():
try:
email = email_settings.cleaned_data['email']
gravatar_mail = email_settings.cleaned_data['gravatar']
if request.POST.get('delete'):
user_info.email.remove(email)
else:
user_info.email.append(email)
if gravatar_mail:
gravatar_hash = hashlib.md5(gravatar_mail).hexdigest()
user_info.gravatar = gravatar_hash
try:
user_info.save()
except IntegrityError:
pass
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
else:
email_settings = EmailSettingsForm()
return render(request, 'settings-email.html', {
'email_settings': email_settings,
'user_info': user_info,
})
@strong_auth_required
@otp_required
def contact_settings(request):
""" Contact details """
with get_bound_ldapuser(request) as user_info:
contact_settings = None
if request.method == "POST":
contact_settings = ContactSettingsForm(request.POST)
if contact_settings.is_valid():
try:
gpg_fingerprint = contact_settings.cleaned_data[
'gpg_fingerprint']
im = contact_settings.cleaned_data['im']
latitude = contact_settings.cleaned_data['latitude']
location = contact_settings.cleaned_data['location']
longitude = contact_settings.cleaned_data['longitude']
phone = contact_settings.cleaned_data['phone']
website = contact_settings.cleaned_data['website']
if location and user_info.location != location:
user_info.location = location
if user_info.phone != phone:
user_info.phone = phone
if request.POST.get('delete_web'):
user_info.website.remove(website)
elif website and (not website in user_info.website):
user_info.website.append(website)
if request.POST.get('delete_im'):
user_info.im.remove(im)
elif im and (not im in user_info.im):
user_info.im.append(im)
if user_info.longitude != longitude:
user_info.longitude = longitude
if user_info.latitude != latitude:
user_info.latitude = latitude
if request.POST.get('delete_gpg'):
user_info.gpg_fingerprint.remove(gpg_fingerprint)
elif gpg_fingerprint and \
(not gpg_fingerprint in user_info.gpg_fingerprint):
user_info.gpg_fingerprint.append(gpg_fingerprint)
try:
user_info.save()
except IntegrityError:
pass
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
else:
contact_settings = ContactSettingsForm()
return render(request, 'settings-contact.html', {
'contact_settings': contact_settings,
'user_info': user_info,
})
@strong_auth_required
@otp_required
def gentoo_dev_settings(request):
""" Gentoo related information """
with get_bound_ldapuser(request) as user_info:
gentoo_account_settings = None
if request.method == "POST":
gentoo_account_settings = GentooAccountSettingsForm(request.POST)
if gentoo_account_settings.is_valid():
try:
devbug = gentoo_account_settings.cleaned_data[
'developer_bug']
gentoo_join_date = gentoo_account_settings.cleaned_data[
'gentoo_join_date']
gentoo_mentor = gentoo_account_settings.cleaned_data[
'mentor']
gentoo_retire_date = gentoo_account_settings.cleaned_data[
'gentoo_retire_date']
gentoo_mentor = gentoo_account_settings.cleaned_data[
'mentor']
planet_feed = gentoo_account_settings.cleaned_data[
'planet_feed']
universe_feed = gentoo_account_settings.cleaned_data[
'universe_feed']
if request.POST.get('delete_devbug'):
user_info.devbug.remove(devbug)
elif devbug and (not devbug in user_info.developer_bug):
user_info.developer_bug.append(devbug)
if request.POST.get('delete_gjd'):
user_info.gentoo_join_date.remove(gentoo_join_date)
elif gentoo_join_date and (not gentoo_join_date in user_info.gentoo_join_date):
user_info.gentoo_join_date.append(gentoo_join_date)
if request.POST.get('delete_mentor'):
user_info.mentor.remove(gentoo_mentor)
elif gentoo_mentor and \
(not gentoo_mentor in user_info.mentor):
user_info.mentor.append(gentoo_mentor)
if user_info.gentoo_retire_date:
if request.POST.get('delete_grd'):
user_info.gentoo_retire_date.remove(gentoo_retire_date)
elif gentoo_retire_date and (not gentoo_retire_date in user_info.gentoo_retire_date):
user_info.gentoo_retire_date.append(gentoo_retire_date)
if user_info.planet_feed != planet_feed:
user_info.planet_feed = planet_feed
if user_info.universe_feed != universe_feed:
user_info.universe_feed = universe_feed
try:
user_info.save()
except IntegrityError:
pass
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as error:
logger.critical(error, extra=log_extra_data(request))
logger_mail.exception(error)
raise OkupyError("Can't contact LDAP server")
else:
gentoo_account_settings = GentooAccountSettingsForm()
return render(request, 'settings-gentoo.html', {
'gentoo_account_settings': gentoo_account_settings,
'user_info': user_info,
})
@strong_auth_required
@otp_required
def otp_setup(request):
dev = TOTPDevice.objects.get(user=request.user)
secret = None
conf_form = None
skeys = None
if request.method == 'POST':
if 'disable' in request.POST:
with get_bound_ldapuser(request) as user:
dev.disable(user)
elif 'confirm' in request.POST and 'otp_secret' in request.session:
secret = request.session['otp_secret']
conf_form = OTPForm(request.POST)
try:
if not conf_form.is_valid():
raise OkupyError()
token = conf_form.cleaned_data['otp_token']
# prevent reusing the same token to login
if not RevokedToken.add(token, request.user):
raise OkupyError()
if not dev.verify_token(token, secret):
raise OkupyError()
except OkupyError:
messages.error(request, 'Token verification failed.')
conf_form = OTPForm()
else:
with get_bound_ldapuser(request) as user:
dev.enable(user, secret)
secret = None
conf_form = None
sdev = SOTPDevice.objects.get(user=request.user)
skeys = sdev.gen_keys(user)
messages.info(request, 'The new secret has been set.')
elif 'enable' in request.POST:
secret = dev.gen_secret()
request.session['otp_secret'] = secret
conf_form = OTPForm()
elif 'recovery' in request.POST:
sdev = SOTPDevice.objects.get(user=request.user)
with get_bound_ldapuser(request) as user:
skeys = sdev.gen_keys(user)
messages.info(request, 'Your old recovery keys have been revoked.')
elif 'cancel' in request.POST:
messages.info(request, 'Secret change aborted. Previous settings'
'are in effect.')
if secret:
# into groups of four characters
secret = ' '.join([secret[i:i + 4]
for i in range(0, len(secret), 4)])
if skeys:
# xxx xx xxx
def group_key(s):
return ' '.join([s[0:3], s[3:5], s[5:8]])
skeys = list([group_key(k) for k in skeys])
return render(request, 'otp-setup.html', {
'otp_enabled': dev.is_enabled(),
'secret': secret,
'conf_form': conf_form,
'skeys': skeys,
})
def otp_qrcode(request):
dev = TOTPDevice()
secret = request.session.get('otp_secret')
if not secret:
return HttpResponseForbidden()
qr = qrcode.make(dev.get_uri(secret), box_size=5)
f = io.BytesIO()
qr.save(f, 'PNG')
return HttpResponse(f.getvalue(), content_type='image/png')
# OpenID-specific
def endpoint_url(request):
return request.build_absolute_uri(reverse(openid_endpoint))
def get_openid_server(request):
store = DjangoDBOpenIDStore()
return Server(store, endpoint_url(request))
def render_openid_response(request, oresp, srv=None):
if srv is None:
srv = get_openid_server(request)
try:
eresp = srv.encodeResponse(oresp)
except EncodingError as e:
# TODO: do we want some different heading for it?
return render(request, 'openid_endpoint.html', {
'error': str(e),
}, status=500)
dresp = HttpResponse(eresp.body, status=eresp.code)
for h, v in eresp.headers.items():
dresp[h] = v
return dresp
@csrf_exempt
def openid_endpoint(request):
if request.method == 'POST':
req = request.POST
else:
req = request.GET
srv = get_openid_server(request)
try:
oreq = srv.decodeRequest(req)
except ProtocolError as e:
if e.whichEncoding() == ENCODE_URL:
return redirect(e.encodeToURL())
elif e.whichEncoding() == ENCODE_HTML_FORM:
return HttpResponse(e.toHTML())
elif e.whichEncoding() == ENCODE_KVFORM:
return HttpResponse(e.encodeToKVForm(), status=400)
else:
return render(request, 'openid_endpoint.html', {
'error': str(e)
}, status=400)
if oreq is None:
return render(request, 'openid_endpoint.html')
if isinstance(oreq, CheckIDRequest):
# immediate requests not supported yet, so immediately
# reject them.
if oreq.immediate:
oresp = oreq.answer(False)
else:
request.session['openid_request'] = oreq
return redirect(openid_auth_site)
else:
oresp = srv.handleRequest(oreq)
return render_openid_response(request, oresp, srv)
def user_page(request, username):
return render(request, 'user-page.html', {
'endpoint_uri': endpoint_url(request),
})
openid_ax_attribute_mapping = {
# http://web.archive.org/web/20110714025426/http://www.axschema.org/types/
'http://axschema.org/namePerson/friendly': 'nickname',
'http://axschema.org/contact/email': 'email',
'http://axschema.org/namePerson': 'fullname',
'http://axschema.org/birthDate': 'dob',
'http://axschema.org/person/gender': 'gender',
'http://axschema.org/contact/postalCode/home': 'postcode',
'http://axschema.org/contact/country/home': 'country',
'http://axschema.org/pref/language': 'language',
'http://axschema.org/pref/timezone': 'timezone',
# TODO: provide further attributes
}
@otp_required
def openid_auth_site(request):
try:
oreq = request.session['openid_request']
except KeyError:
return render(request, 'openid-auth-site.html', {
'error': 'No OpenID request associated. The request may have \
expired.',
}, status=400)
sreg = SRegRequest.fromOpenIDRequest(oreq)
ax = FetchRequest.fromOpenIDRequest(oreq)
sreg_fields = set(sreg.allRequestedFields())
if ax:
for uri in ax.requested_attributes:
k = openid_ax_attribute_mapping.get(uri)
if k:
sreg_fields.add(k)
ldap_user = LDAPUser.objects.get(username=request.user.username)
if sreg_fields:
sreg_data = {
'nickname': ldap_user.username,
'email': ldap_user.email,
'fullname': ldap_user.full_name,
'dob': ldap_user.birthday,
}
for k in list(sreg_data):
if not sreg_data[k]:
del sreg_data[k]
else:
sreg_data = {}
sreg_fields = sreg_data.keys()
# Read preferences from the db.
try:
saved_pref = OpenID_Attributes.objects.get(
uid=ldap_user.uid,
trust_root=oreq.trust_root,
)
except OpenID_Attributes.DoesNotExist:
saved_pref = None
auto_auth = False
else:
auto_auth = saved_pref.always_auth
if auto_auth or request.POST:
if auto_auth:
# TODO: can we do this nicer?
form_inp = model_to_dict(saved_pref)
else:
form_inp = request.POST
form = SiteAuthForm(form_inp, instance=saved_pref)
# can it be invalid somehow?
assert(form.is_valid())
attrs = form.save(commit=False)
# nullify fields that were not requested
for fn in form.cleaned_data:
if fn in ('always_auth',):
pass
elif hasattr(attrs, fn) and fn not in sreg_fields:
setattr(attrs, fn, None)
if auto_auth or 'accept' in request.POST:
# prepare sreg response
for fn, send in form.cleaned_data.items():
if fn not in sreg_data:
pass
elif not send:
del sreg_data[fn]
elif isinstance(sreg_data[fn], list):
form_key = 'which_%s' % fn
val = form.cleaned_data[form_key]
if val not in sreg_data[fn]:
raise NotImplementedError(
'Changing choices not implemented yet')
sreg_data[fn] = val
if not auto_auth:
setattr(attrs, form_key, val)
if not auto_auth:
# save prefs in the db
# (if auto_auth, then nothing changed)
attrs.uid = ldap_user.uid
attrs.trust_root = oreq.trust_root
attrs.save()
oresp = oreq.answer(True, identity=request.build_absolute_uri(
reverse(user_page, args=(request.user.username,))))
sreg_resp = SRegResponse.extractResponse(sreg, sreg_data)
oresp.addExtension(sreg_resp)
if ax:
ax_resp = FetchResponse(ax)
for uri in ax.requested_attributes:
k = openid_ax_attribute_mapping.get(uri)
if k and k in sreg_data:
ax_resp.addValue(uri, sreg_data[k])
oresp.addExtension(ax_resp)
elif 'reject' in request.POST:
oresp = oreq.answer(False)
else:
return render(request, 'openid-auth-site.html', {
'error': 'Invalid request submitted.',
}, status=400)
if request.session.get('auto_logout', False):
# _logout clears request.session
_logout(request)
else:
del request.session['openid_request']
return render_openid_response(request, oresp)
form = SiteAuthForm(instance=saved_pref)
sreg_form = {}
# Fill in lists for choices
for f in sreg_fields:
if f not in sreg_data:
pass
elif isinstance(sreg_data[f], list):
form.fields['which_%s' % f].widget.choices = [
(x, x) for x in sreg_data[f]
]
sreg_form[f] = form['which_%s' % f]
else:
sreg_form[f] = format_html("<input type='text'"
+ " readonly='readonly'"
+ " value='{0}' />",
sreg_data[f])
try:
# TODO: cache it
if oreq.returnToVerified():
tr_valid = 'Return-To valid and trusted'
else:
tr_valid = 'Return-To untrusted'
except openid.yadis.discover.DiscoveryFailure:
tr_valid = 'Unable to verify trust (Yadis unsupported)'
except openid.fetchers.HTTPFetchingError:
tr_valid = 'Unable to verify trust (HTTP error)'
return render(request, 'openid-auth-site.html', {
'openid_request': oreq,
'return_to_valid': tr_valid,
'form': form,
'sreg': sreg_fields,
'sreg_form': sreg_form,
'policy_url': sreg.policy_url,
})
| gentoo/identity.gentoo.org | okupy/accounts/views.py | Python | agpl-3.0 | 36,590 |
# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# This program is available from Source Forge: at GeneNetwork Project
# (sourceforge.net/projects/genenetwork/).
#
# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
# at [email protected] and [email protected]
#
# This module is used by GeneNetwork project (www.genenetwork.org)
from __future__ import absolute_import, print_function, division
import os
import math
import string
import collections
import codecs
import json
import gzip
import cPickle as pickle
import itertools
from operator import itemgetter
from redis import Redis
Redis = Redis()
from flask import Flask, g
import reaper
from base import webqtlConfig
from base import species
from dbFunction import webqtlDatabaseFunction
from utility import webqtlUtil
from utility.benchmark import Bench
from utility import chunks
from utility.tools import locate, locate_ignore_error
from maintenance import get_group_samplelists
from MySQLdb import escape_string as escape
from pprint import pformat as pf
# Used by create_database to instantiate objects
# Each subclass will add to this
DS_NAME_MAP = {}
def create_dataset(dataset_name, dataset_type = None):
if not dataset_type:
dataset_type = Dataset_Getter(dataset_name)
print("dataset_type is:", dataset_type)
dataset_ob = DS_NAME_MAP[dataset_type]
dataset_class = globals()[dataset_ob]
return dataset_class(dataset_name)
class Dataset_Types(object):
def __init__(self):
self.datasets = {}
file_name = "wqflask/static/new/javascript/dataset_menu_structure.json"
with open(file_name, 'r') as fh:
data = json.load(fh)
print("*" * 70)
for species in data['datasets']:
for group in data['datasets'][species]:
for dataset_type in data['datasets'][species][group]:
for dataset in data['datasets'][species][group][dataset_type]:
short_dataset_name = dataset[1]
if dataset_type == "Phenotypes":
new_type = "Publish"
elif dataset_type == "Genotypes":
new_type = "Geno"
else:
new_type = "ProbeSet"
self.datasets[short_dataset_name] = new_type
def __call__(self, name):
return self.datasets[name]
# Do the intensive work at startup one time only
Dataset_Getter = Dataset_Types()
def create_datasets_list():
key = "all_datasets"
result = Redis.get(key)
if result:
print("Cache hit!!!")
datasets = pickle.loads(result)
else:
datasets = list()
with Bench("Creating DataSets object"):
type_dict = {'Publish': 'PublishFreeze',
'ProbeSet': 'ProbeSetFreeze',
'Geno': 'GenoFreeze'}
for dataset_type in type_dict:
query = "SELECT Name FROM {}".format(type_dict[dataset_type])
for result in g.db.execute(query).fetchall():
#The query at the beginning of this function isn't necessary here, but still would
#rather just reuse it
#print("type: {}\tname: {}".format(dataset_type, result.Name))
dataset = create_dataset(result.Name, dataset_type)
datasets.append(dataset)
Redis.set(key, pickle.dumps(datasets, pickle.HIGHEST_PROTOCOL))
Redis.expire(key, 60*60)
return datasets
def create_in_clause(items):
"""Create an in clause for mysql"""
in_clause = ', '.join("'{}'".format(x) for x in mescape(*items))
in_clause = '( {} )'.format(in_clause)
return in_clause
def mescape(*items):
"""Multiple escape"""
escaped = [escape(str(item)) for item in items]
#print("escaped is:", escaped)
return escaped
class Markers(object):
"""Todo: Build in cacheing so it saves us reading the same file more than once"""
def __init__(self, name):
json_data_fh = open(locate(name + '.json','genotype/json'))
try:
markers = json.load(json_data_fh)
except:
markers = []
for marker in markers:
if (marker['chr'] != "X") and (marker['chr'] != "Y"):
marker['chr'] = int(marker['chr'])
# print("Mb:", marker['Mb'])
marker['Mb'] = float(marker['Mb'])
self.markers = markers
#print("self.markers:", self.markers)
def add_pvalues(self, p_values):
print("length of self.markers:", len(self.markers))
print("length of p_values:", len(p_values))
if type(p_values) is list:
# THIS IS only needed for the case when we are limiting the number of p-values calculated
#if len(self.markers) > len(p_values):
# self.markers = self.markers[:len(p_values)]
for marker, p_value in itertools.izip(self.markers, p_values):
if not p_value:
continue
marker['p_value'] = float(p_value)
if math.isnan(marker['p_value']) or marker['p_value'] <= 0:
marker['lod_score'] = 0
marker['lrs_value'] = 0
else:
marker['lod_score'] = -math.log10(marker['p_value'])
#Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
elif type(p_values) is dict:
filtered_markers = []
for marker in self.markers:
#print("marker[name]", marker['name'])
#print("p_values:", p_values)
if marker['name'] in p_values:
#print("marker {} IS in p_values".format(i))
marker['p_value'] = p_values[marker['name']]
if math.isnan(marker['p_value']) or (marker['p_value'] <= 0):
marker['lod_score'] = 0
marker['lrs_value'] = 0
else:
marker['lod_score'] = -math.log10(marker['p_value'])
#Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
filtered_markers.append(marker)
#else:
#print("marker {} NOT in p_values".format(i))
#self.markers.remove(marker)
#del self.markers[i]
self.markers = filtered_markers
class HumanMarkers(Markers):
def __init__(self, name, specified_markers = []):
marker_data_fh = open(locate('genotype') + '/' + name + '.bim')
self.markers = []
for line in marker_data_fh:
splat = line.strip().split()
#print("splat:", splat)
if len(specified_markers) > 0:
if splat[1] in specified_markers:
marker = {}
marker['chr'] = int(splat[0])
marker['name'] = splat[1]
marker['Mb'] = float(splat[3]) / 1000000
else:
continue
else:
marker = {}
marker['chr'] = int(splat[0])
marker['name'] = splat[1]
marker['Mb'] = float(splat[3]) / 1000000
self.markers.append(marker)
#print("markers is: ", pf(self.markers))
def add_pvalues(self, p_values):
super(HumanMarkers, self).add_pvalues(p_values)
class DatasetGroup(object):
"""
Each group has multiple datasets; each species has multiple groups.
For example, Mouse has multiple groups (BXD, BXA, etc), and each group
has multiple datasets associated with it.
"""
def __init__(self, dataset):
"""This sets self.group and self.group_id"""
print("DATASET NAME2:", dataset.name)
self.name, self.id = g.db.execute(dataset.query_for_group).fetchone()
if self.name == 'BXD300':
self.name = "BXD"
self.f1list = None
self.parlist = None
self.get_f1_parent_strains()
#print("parents/f1s: {}:{}".format(self.parlist, self.f1list))
self.species = webqtlDatabaseFunction.retrieve_species(self.name)
self.incparentsf1 = False
self.allsamples = None
self._datasets = None
def get_specified_markers(self, markers = []):
self.markers = HumanMarkers(self.name, markers)
def get_markers(self):
#print("self.species is:", self.species)
if self.species == "human":
marker_class = HumanMarkers
else:
marker_class = Markers
self.markers = marker_class(self.name)
def datasets(self):
key = "group_dataset_menu:v2:" + self.name
print("key is2:", key)
dataset_menu = []
print("[tape4] webqtlConfig.PUBLICTHRESH:", webqtlConfig.PUBLICTHRESH)
print("[tape4] type webqtlConfig.PUBLICTHRESH:", type(webqtlConfig.PUBLICTHRESH))
results = g.db.execute('''
(SELECT '#PublishFreeze',PublishFreeze.FullName,PublishFreeze.Name
FROM PublishFreeze,InbredSet
WHERE PublishFreeze.InbredSetId = InbredSet.Id
and InbredSet.Name = %s
and PublishFreeze.public > %s)
UNION
(SELECT '#GenoFreeze',GenoFreeze.FullName,GenoFreeze.Name
FROM GenoFreeze, InbredSet
WHERE GenoFreeze.InbredSetId = InbredSet.Id
and InbredSet.Name = %s
and GenoFreeze.public > %s)
UNION
(SELECT Tissue.Name, ProbeSetFreeze.FullName,ProbeSetFreeze.Name
FROM ProbeSetFreeze, ProbeFreeze, InbredSet, Tissue
WHERE ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id
and ProbeFreeze.TissueId = Tissue.Id
and ProbeFreeze.InbredSetId = InbredSet.Id
and InbredSet.Name like %s
and ProbeSetFreeze.public > %s
ORDER BY Tissue.Name, ProbeSetFreeze.CreateTime desc, ProbeSetFreeze.AvgId)
''', (self.name, webqtlConfig.PUBLICTHRESH,
self.name, webqtlConfig.PUBLICTHRESH,
"%" + self.name + "%", webqtlConfig.PUBLICTHRESH))
the_results = results.fetchall()
#for tissue_name, dataset in itertools.groupby(the_results, itemgetter(0)):
for dataset_item in the_results:
tissue_name = dataset_item[0]
dataset = dataset_item[1]
dataset_short = dataset_item[2]
if tissue_name in ['#PublishFreeze', '#GenoFreeze']:
dataset_menu.append(dict(tissue=None, datasets=[(dataset, dataset_short)]))
else:
dataset_sub_menu = [item[1:] for item in dataset]
tissue_already_exists = False
tissue_position = None
for i, tissue_dict in enumerate(dataset_menu):
if tissue_dict['tissue'] == tissue_name:
tissue_already_exists = True
tissue_position = i
break
if tissue_already_exists:
print("dataset_menu:", dataset_menu[i]['datasets'])
dataset_menu[i]['datasets'].append((dataset, dataset_short))
else:
dataset_menu.append(dict(tissue=tissue_name,
datasets=[(dataset, dataset_short)]))
Redis.set(key, pickle.dumps(dataset_menu, pickle.HIGHEST_PROTOCOL))
Redis.expire(key, 60*5)
self._datasets = dataset_menu
return self._datasets
def get_f1_parent_strains(self):
try:
# NL, 07/27/2010. ParInfo has been moved from webqtlForm.py to webqtlUtil.py;
f1, f12, maternal, paternal = webqtlUtil.ParInfo[self.name]
except KeyError:
f1 = f12 = maternal = paternal = None
if f1 and f12:
self.f1list = [f1, f12]
if maternal and paternal:
self.parlist = [maternal, paternal]
def get_samplelist(self):
key = "samplelist:v2:" + self.name
print("key is:", key)
with Bench("Loading cache"):
result = Redis.get(key)
if result:
print("Sample List Cache hit!!!")
print("Before unjsonifying {}: {}".format(type(result), result))
self.samplelist = json.loads(result)
print(" type: ", type(self.samplelist))
print(" self.samplelist: ", self.samplelist)
else:
print("Cache not hit for", self.name)
genotype_fn = locate_ignore_error(self.name+".geno",'genotype')
mapping_fn = locate_ignore_error(self.name+".fam",'mapping')
if mapping_fn:
self.samplelist = get_group_samplelists.get_samplelist("plink", mapping_fn)
elif genotype_fn:
self.samplelist = get_group_samplelists.get_samplelist("geno", genotype_fn)
else:
self.samplelist = None
print("Sample list: ",self.samplelist)
Redis.set(key, json.dumps(self.samplelist))
Redis.expire(key, 60*5)
def all_samples_ordered(self):
result = []
lists = (self.parlist, self.f1list, self.samplelist)
[result.extend(l) for l in lists if l]
return result
def read_genotype_file(self):
'''Read genotype from .geno file instead of database'''
#genotype_1 is Dataset Object without parents and f1
#genotype_2 is Dataset Object with parents and f1 (not for intercross)
genotype_1 = reaper.Dataset()
# reaper barfs on unicode filenames, so here we ensure it's a string
full_filename = str(locate(self.name+'.geno','genotype'))
genotype_1.read(full_filename)
if genotype_1.type == "group" and self.parlist:
genotype_2 = genotype_1.add(Mat=self.parlist[0], Pat=self.parlist[1]) #, F1=_f1)
else:
genotype_2 = genotype_1
#determine default genotype object
if self.incparentsf1 and genotype_1.type != "intercross":
genotype = genotype_2
else:
self.incparentsf1 = 0
genotype = genotype_1
self.samplelist = list(genotype.prgy)
return genotype
class DataSet(object):
"""
DataSet class defines a dataset in webqtl, can be either Microarray,
Published phenotype, genotype, or user input dataset(temp)
"""
def __init__(self, name):
assert name, "Need a name"
self.name = name
self.id = None
self.shortname = None
self.fullname = None
self.type = None
self.setup()
self.check_confidentiality()
self.retrieve_other_names()
self.group = DatasetGroup(self) # sets self.group and self.group_id and gets genotype
self.group.get_samplelist()
self.species = species.TheSpecies(self)
def get_desc(self):
"""Gets overridden later, at least for Temp...used by trait's get_given_name"""
return None
# Delete this eventually
@property
def riset():
Weve_Renamed_This_As_Group
def retrieve_other_names(self):
"""
If the data set name parameter is not found in the 'Name' field of the data set table,
check if it is actually the FullName or ShortName instead.
This is not meant to retrieve the data set info if no name at all is passed.
"""
try:
if self.type == "ProbeSet":
query_args = tuple(escape(x) for x in (
str(webqtlConfig.PUBLICTHRESH),
self.name,
self.name,
self.name))
self.id, self.name, self.fullname, self.shortname, self.tissue = g.db.execute("""
SELECT ProbeSetFreeze.Id, ProbeSetFreeze.Name, ProbeSetFreeze.FullName, ProbeSetFreeze.ShortName, Tissue.Name
FROM ProbeSetFreeze, ProbeFreeze, Tissue
WHERE ProbeSetFreeze.public > %s AND
ProbeSetFreeze.ProbeFreezeId = ProbeFreeze.Id AND
ProbeFreeze.TissueId = Tissue.Id AND
(ProbeSetFreeze.Name = '%s' OR ProbeSetFreeze.FullName = '%s' OR ProbeSetFreeze.ShortName = '%s')
""" % (query_args)).fetchone()
else:
query_args = tuple(escape(x) for x in (
(self.type + "Freeze"),
str(webqtlConfig.PUBLICTHRESH),
self.name,
self.name,
self.name))
self.tissue = "N/A"
self.id, self.name, self.fullname, self.shortname = g.db.execute("""
SELECT Id, Name, FullName, ShortName
FROM %s
WHERE public > %s AND
(Name = '%s' OR FullName = '%s' OR ShortName = '%s')
""" % (query_args)).fetchone()
except TypeError:
print("Dataset {} is not yet available in GeneNetwork.".format(self.name))
pass
def get_trait_data(self, sample_list=None):
if sample_list:
self.samplelist = sample_list
else:
self.samplelist = self.group.samplelist
if self.group.parlist != None and self.group.f1list != None:
if (self.group.parlist + self.group.f1list) in self.samplelist:
self.samplelist += self.group.parlist + self.group.f1list
query = """
SELECT Strain.Name, Strain.Id FROM Strain, Species
WHERE Strain.Name IN {}
and Strain.SpeciesId=Species.Id
and Species.name = '{}'
""".format(create_in_clause(self.samplelist), *mescape(self.group.species))
results = dict(g.db.execute(query).fetchall())
sample_ids = [results[item] for item in self.samplelist]
# MySQL limits the number of tables that can be used in a join to 61,
# so we break the sample ids into smaller chunks
# Postgres doesn't have that limit, so we can get rid of this after we transition
chunk_size = 50
number_chunks = int(math.ceil(len(sample_ids) / chunk_size))
trait_sample_data = []
for sample_ids_step in chunks.divide_into_chunks(sample_ids, number_chunks):
if self.type == "Publish":
dataset_type = "Phenotype"
else:
dataset_type = self.type
temp = ['T%s.value' % item for item in sample_ids_step]
if self.type == "Publish":
query = "SELECT {}XRef.Id,".format(escape(self.type))
else:
query = "SELECT {}.Name,".format(escape(dataset_type))
data_start_pos = 1
query += string.join(temp, ', ')
query += ' FROM ({}, {}XRef, {}Freeze) '.format(*mescape(dataset_type,
self.type,
self.type))
for item in sample_ids_step:
query += """
left join {}Data as T{} on T{}.Id = {}XRef.DataId
and T{}.StrainId={}\n
""".format(*mescape(self.type, item, item, self.type, item, item))
if self.type == "Publish":
query += """
WHERE {}XRef.InbredSetId = {}Freeze.InbredSetId
and {}Freeze.Name = '{}'
and {}.Id = {}XRef.{}Id
order by {}.Id
""".format(*mescape(self.type, self.type, self.type, self.name,
dataset_type, self.type, dataset_type, dataset_type))
else:
query += """
WHERE {}XRef.{}FreezeId = {}Freeze.Id
and {}Freeze.Name = '{}'
and {}.Id = {}XRef.{}Id
order by {}.Id
""".format(*mescape(self.type, self.type, self.type, self.type,
self.name, dataset_type, self.type, self.type, dataset_type))
#print("trait data query: ", query)
results = g.db.execute(query).fetchall()
#print("query results:", results)
trait_sample_data.append(results)
trait_count = len(trait_sample_data[0])
self.trait_data = collections.defaultdict(list)
# put all of the separate data together into a dictionary where the keys are
# trait names and values are lists of sample values
for trait_counter in range(trait_count):
trait_name = trait_sample_data[0][trait_counter][0]
for chunk_counter in range(int(number_chunks)):
self.trait_data[trait_name] += (
trait_sample_data[chunk_counter][trait_counter][data_start_pos:])
class PhenotypeDataSet(DataSet):
DS_NAME_MAP['Publish'] = 'PhenotypeDataSet'
def setup(self):
print("IS A PHENOTYPEDATASET")
# Fields in the database table
self.search_fields = ['Phenotype.Post_publication_description',
'Phenotype.Pre_publication_description',
'Phenotype.Pre_publication_abbreviation',
'Phenotype.Post_publication_abbreviation',
'Phenotype.Lab_code',
'Publication.PubMed_ID',
'Publication.Abstract',
'Publication.Title',
'Publication.Authors',
'PublishXRef.Id']
# Figure out what display_fields is
self.display_fields = ['name',
'pubmed_id',
'pre_publication_description',
'post_publication_description',
'original_description',
'pre_publication_abbreviation',
'post_publication_abbreviation',
'lab_code',
'submitter', 'owner',
'authorized_users',
'authors', 'title',
'abstract', 'journal',
'volume', 'pages',
'month', 'year',
'sequence', 'units', 'comments']
# Fields displayed in the search results table header
self.header_fields = ['Index',
'Record',
'Description',
'Authors',
'Year',
'Max LRS',
'Max LRS Location',
'Additive Effect']
self.type = 'Publish'
self.query_for_group = '''
SELECT
InbredSet.Name, InbredSet.Id
FROM
InbredSet, PublishFreeze
WHERE
PublishFreeze.InbredSetId = InbredSet.Id AND
PublishFreeze.Name = "%s"
''' % escape(self.name)
def check_confidentiality(self):
# (Urgently?) Need to write this
pass
def get_trait_list(self):
query = """
select PublishXRef.Id
from PublishXRef, PublishFreeze
where PublishFreeze.InbredSetId=PublishXRef.InbredSetId
and PublishFreeze.Id = {}
""".format(escape(str(self.id)))
results = g.db.execute(query).fetchall()
trait_data = {}
for trait in results:
trait_data[trait[0]] = self.retrieve_sample_data(trait[0])
return trait_data
def get_trait_info(self, trait_list, species = ''):
for this_trait in trait_list:
if not this_trait.haveinfo:
this_trait.retrieve_info(get_qtl_info=True)
description = this_trait.post_publication_description
#If the dataset is confidential and the user has access to confidential
#phenotype traits, then display the pre-publication description instead
#of the post-publication description
if this_trait.confidential:
this_trait.description_display = ""
continue # for now
if not webqtlUtil.hasAccessToConfidentialPhenotypeTrait(
privilege=self.privilege,
userName=self.userName,
authorized_users=this_trait.authorized_users):
description = this_trait.pre_publication_description
if len(description) > 0:
this_trait.description_display = description.strip()
else:
this_trait.description_display = ""
if not this_trait.year.isdigit():
this_trait.pubmed_text = "N/A"
else:
this_trait.pubmed_text = this_trait.year
if this_trait.pubmed_id:
this_trait.pubmed_link = webqtlConfig.PUBMEDLINK_URL % this_trait.pubmed_id
#LRS and its location
this_trait.LRS_score_repr = "N/A"
this_trait.LRS_score_value = 0
this_trait.LRS_location_repr = "N/A"
this_trait.LRS_location_value = 1000000
if this_trait.lrs:
result = g.db.execute("""
select Geno.Chr, Geno.Mb from Geno, Species
where Species.Name = %s and
Geno.Name = %s and
Geno.SpeciesId = Species.Id
""", (species, this_trait.locus)).fetchone()
if result:
if result[0] and result[1]:
LRS_Chr = result[0]
LRS_Mb = result[1]
#XZ: LRS_location_value is used for sorting
try:
LRS_location_value = int(LRS_Chr)*1000 + float(LRS_Mb)
except:
if LRS_Chr.upper() == 'X':
LRS_location_value = 20*1000 + float(LRS_Mb)
else:
LRS_location_value = ord(str(LRS_chr).upper()[0])*1000 + float(LRS_Mb)
this_trait.LRS_score_repr = LRS_score_repr = '%3.1f' % this_trait.lrs
this_trait.LRS_score_value = LRS_score_value = this_trait.lrs
this_trait.LRS_location_repr = LRS_location_repr = 'Chr%s: %.6f' % (LRS_Chr, float(LRS_Mb))
def retrieve_sample_data(self, trait):
query = """
SELECT
Strain.Name, PublishData.value, PublishSE.error, NStrain.count
FROM
(PublishData, Strain, PublishXRef, PublishFreeze)
left join PublishSE on
(PublishSE.DataId = PublishData.Id AND PublishSE.StrainId = PublishData.StrainId)
left join NStrain on
(NStrain.DataId = PublishData.Id AND
NStrain.StrainId = PublishData.StrainId)
WHERE
PublishXRef.InbredSetId = PublishFreeze.InbredSetId AND
PublishData.Id = PublishXRef.DataId AND PublishXRef.Id = %s AND
PublishFreeze.Id = %s AND PublishData.StrainId = Strain.Id
Order BY
Strain.Name
"""
results = g.db.execute(query, (trait, self.id)).fetchall()
return results
class GenotypeDataSet(DataSet):
DS_NAME_MAP['Geno'] = 'GenotypeDataSet'
def setup(self):
# Fields in the database table
self.search_fields = ['Name',
'Chr']
# Find out what display_fields is
self.display_fields = ['name',
'chr',
'mb',
'source2',
'sequence']
# Fields displayed in the search results table header
self.header_fields = ['Index',
'ID',
'Location']
# Todo: Obsolete or rename this field
self.type = 'Geno'
self.query_for_group = '''
SELECT
InbredSet.Name, InbredSet.Id
FROM
InbredSet, GenoFreeze
WHERE
GenoFreeze.InbredSetId = InbredSet.Id AND
GenoFreeze.Name = "%s"
''' % escape(self.name)
def check_confidentiality(self):
return geno_mrna_confidentiality(self)
def get_trait_list(self):
query = """
select Geno.Name
from Geno, GenoXRef
where GenoXRef.GenoId = Geno.Id
and GenoFreezeId = {}
""".format(escape(str(self.id)))
results = g.db.execute(query).fetchall()
trait_data = {}
for trait in results:
trait_data[trait[0]] = self.retrieve_sample_data(trait[0])
return trait_data
def get_trait_info(self, trait_list, species=None):
for this_trait in trait_list:
if not this_trait.haveinfo:
this_trait.retrieveInfo()
#XZ: trait_location_value is used for sorting
trait_location_repr = 'N/A'
trait_location_value = 1000000
if this_trait.chr and this_trait.mb:
try:
trait_location_value = int(this_trait.chr)*1000 + this_trait.mb
except:
if this_trait.chr.upper() == 'X':
trait_location_value = 20*1000 + this_trait.mb
else:
trait_location_value = ord(str(this_trait.chr).upper()[0])*1000 + this_trait.mb
this_trait.location_repr = 'Chr%s: %.6f' % (this_trait.chr, float(this_trait.mb) )
this_trait.location_value = trait_location_value
def retrieve_sample_data(self, trait):
query = """
SELECT
Strain.Name, GenoData.value, GenoSE.error, GenoData.Id
FROM
(GenoData, GenoFreeze, Strain, Geno, GenoXRef)
left join GenoSE on
(GenoSE.DataId = GenoData.Id AND GenoSE.StrainId = GenoData.StrainId)
WHERE
Geno.SpeciesId = %s AND Geno.Name = %s AND GenoXRef.GenoId = Geno.Id AND
GenoXRef.GenoFreezeId = GenoFreeze.Id AND
GenoFreeze.Name = %s AND
GenoXRef.DataId = GenoData.Id AND
GenoData.StrainId = Strain.Id
Order BY
Strain.Name
"""
results = g.db.execute(query,
(webqtlDatabaseFunction.retrieve_species_id(self.group.name),
trait, self.name)).fetchall()
return results
class MrnaAssayDataSet(DataSet):
'''
An mRNA Assay is a quantitative assessment (assay) associated with an mRNA trait
This used to be called ProbeSet, but that term only refers specifically to the Affymetrix
platform and is far too specific.
'''
DS_NAME_MAP['ProbeSet'] = 'MrnaAssayDataSet'
def setup(self):
# Fields in the database table
self.search_fields = ['Name',
'Description',
'Probe_Target_Description',
'Symbol',
'Alias',
'GenbankId',
'UniGeneId',
'RefSeq_TranscriptId']
# Find out what display_fields is
self.display_fields = ['name', 'symbol',
'description', 'probe_target_description',
'chr', 'mb',
'alias', 'geneid',
'genbankid', 'unigeneid',
'omim', 'refseq_transcriptid',
'blatseq', 'targetseq',
'chipid', 'comments',
'strand_probe', 'strand_gene',
'probe_set_target_region',
'probe_set_specificity',
'probe_set_blat_score',
'probe_set_blat_mb_start',
'probe_set_blat_mb_end',
'probe_set_strand',
'probe_set_note_by_rw',
'flag']
# Fields displayed in the search results table header
self.header_fields = ['Index',
'Record',
'Symbol',
'Description',
'Location',
'Mean',
'Max LRS',
'Max LRS Location',
'Additive Effect']
# Todo: Obsolete or rename this field
self.type = 'ProbeSet'
self.query_for_group = '''
SELECT
InbredSet.Name, InbredSet.Id
FROM
InbredSet, ProbeSetFreeze, ProbeFreeze
WHERE
ProbeFreeze.InbredSetId = InbredSet.Id AND
ProbeFreeze.Id = ProbeSetFreeze.ProbeFreezeId AND
ProbeSetFreeze.Name = "%s"
''' % escape(self.name)
def check_confidentiality(self):
return geno_mrna_confidentiality(self)
def get_trait_list_1(self):
query = """
select ProbeSet.Name
from ProbeSet, ProbeSetXRef
where ProbeSetXRef.ProbeSetId = ProbeSet.Id
and ProbeSetFreezeId = {}
""".format(escape(str(self.id)))
results = g.db.execute(query).fetchall()
trait_data = {}
for trait in results:
print("Retrieving sample_data for ", trait[0])
trait_data[trait[0]] = self.retrieve_sample_data(trait[0])
return trait_data
def get_trait_info(self, trait_list=None, species=''):
# Note: setting trait_list to [] is probably not a great idea.
if not trait_list:
trait_list = []
for this_trait in trait_list:
if not this_trait.haveinfo:
this_trait.retrieveInfo(QTL=1)
if not this_trait.symbol:
this_trait.symbol = "N/A"
#XZ, 12/08/2008: description
#XZ, 06/05/2009: Rob asked to add probe target description
description_string = unicode(str(this_trait.description).strip(codecs.BOM_UTF8), 'utf-8')
target_string = unicode(str(this_trait.probe_target_description).strip(codecs.BOM_UTF8), 'utf-8')
if len(description_string) > 1 and description_string != 'None':
description_display = description_string
else:
description_display = this_trait.symbol
if (len(description_display) > 1 and description_display != 'N/A' and
len(target_string) > 1 and target_string != 'None'):
description_display = description_display + '; ' + target_string.strip()
# Save it for the jinja2 template
this_trait.description_display = description_display
#XZ: trait_location_value is used for sorting
trait_location_repr = 'N/A'
trait_location_value = 1000000
if this_trait.chr and this_trait.mb:
#Checks if the chromosome number can be cast to an int (i.e. isn't "X" or "Y")
#This is so we can convert the location to a number used for sorting
trait_location_value = self.convert_location_to_value(this_trait.chr, this_trait.mb)
#try:
# trait_location_value = int(this_trait.chr)*1000 + this_trait.mb
#except ValueError:
# if this_trait.chr.upper() == 'X':
# trait_location_value = 20*1000 + this_trait.mb
# else:
# trait_location_value = (ord(str(this_trait.chr).upper()[0])*1000 +
# this_trait.mb)
#ZS: Put this in function currently called "convert_location_to_value"
this_trait.location_repr = 'Chr%s: %.6f' % (this_trait.chr,
float(this_trait.mb))
this_trait.location_value = trait_location_value
#Get mean expression value
query = (
"""select ProbeSetXRef.mean from ProbeSetXRef, ProbeSet
where ProbeSetXRef.ProbeSetFreezeId = %s and
ProbeSet.Id = ProbeSetXRef.ProbeSetId and
ProbeSet.Name = '%s'
""" % (escape(str(this_trait.dataset.id)),
escape(this_trait.name)))
#print("query is:", pf(query))
result = g.db.execute(query).fetchone()
mean = result[0] if result else 0
if mean:
this_trait.mean = "%2.3f" % mean
#LRS and its location
this_trait.LRS_score_repr = 'N/A'
this_trait.LRS_score_value = 0
this_trait.LRS_location_repr = 'N/A'
this_trait.LRS_location_value = 1000000
#Max LRS and its Locus location
if this_trait.lrs and this_trait.locus:
query = """
select Geno.Chr, Geno.Mb from Geno, Species
where Species.Name = '{}' and
Geno.Name = '{}' and
Geno.SpeciesId = Species.Id
""".format(species, this_trait.locus)
result = g.db.execute(query).fetchone()
if result:
lrs_chr, lrs_mb = result
#XZ: LRS_location_value is used for sorting
lrs_location_value = self.convert_location_to_value(lrs_chr, lrs_mb)
this_trait.LRS_score_repr = '%3.1f' % this_trait.lrs
this_trait.LRS_score_value = this_trait.lrs
this_trait.LRS_location_repr = 'Chr%s: %.6f' % (lrs_chr, float(lrs_mb))
def convert_location_to_value(self, chromosome, mb):
try:
location_value = int(chromosome)*1000 + float(mb)
except ValueError:
if chromosome.upper() == 'X':
location_value = 20*1000 + float(mb)
else:
location_value = (ord(str(chromosome).upper()[0])*1000 +
float(mb))
return location_value
def get_sequence(self):
query = """
SELECT
ProbeSet.BlatSeq
FROM
ProbeSet, ProbeSetFreeze, ProbeSetXRef
WHERE
ProbeSet.Id=ProbeSetXRef.ProbeSetId and
ProbeSetFreeze.Id = ProbeSetXRef.ProbSetFreezeId and
ProbeSet.Name = %s
ProbeSetFreeze.Name = %s
""" % (escape(self.name), escape(self.dataset.name))
results = g.db.execute(query).fetchone()
return results[0]
def retrieve_sample_data(self, trait):
query = """
SELECT
Strain.Name, ProbeSetData.value, ProbeSetSE.error, ProbeSetData.Id
FROM
(ProbeSetData, ProbeSetFreeze, Strain, ProbeSet, ProbeSetXRef)
left join ProbeSetSE on
(ProbeSetSE.DataId = ProbeSetData.Id AND ProbeSetSE.StrainId = ProbeSetData.StrainId)
WHERE
ProbeSet.Name = '%s' AND ProbeSetXRef.ProbeSetId = ProbeSet.Id AND
ProbeSetXRef.ProbeSetFreezeId = ProbeSetFreeze.Id AND
ProbeSetFreeze.Name = '%s' AND
ProbeSetXRef.DataId = ProbeSetData.Id AND
ProbeSetData.StrainId = Strain.Id
Order BY
Strain.Name
""" % (escape(trait), escape(self.name))
results = g.db.execute(query).fetchall()
#print("RETRIEVED RESULTS HERE:", results)
return results
def retrieve_genes(self, column_name):
query = """
select ProbeSet.Name, ProbeSet.%s
from ProbeSet,ProbeSetXRef
where ProbeSetXRef.ProbeSetFreezeId = %s and
ProbeSetXRef.ProbeSetId=ProbeSet.Id;
""" % (column_name, escape(str(self.id)))
results = g.db.execute(query).fetchall()
return dict(results)
class TempDataSet(DataSet):
'''Temporary user-generated data set'''
def setup(self):
self.search_fields = ['name',
'description']
self.display_fields = ['name',
'description']
self.header_fields = ['Name',
'Description']
self.type = 'Temp'
# Need to double check later how these are used
self.id = 1
self.fullname = 'Temporary Storage'
self.shortname = 'Temp'
@staticmethod
def handle_pca(desc):
if 'PCA' in desc:
# Todo: Modernize below lines
desc = desc[desc.rindex(':')+1:].strip()
else:
desc = desc[:desc.index('entered')].strip()
return desc
def get_desc(self):
g.db.execute('SELECT description FROM Temp WHERE Name=%s', self.name)
desc = g.db.fetchone()[0]
desc = self.handle_pca(desc)
return desc
def get_group(self):
self.cursor.execute("""
SELECT
InbredSet.Name, InbredSet.Id
FROM
InbredSet, Temp
WHERE
Temp.InbredSetId = InbredSet.Id AND
Temp.Name = "%s"
""", self.name)
self.group, self.group_id = self.cursor.fetchone()
#return self.group
def retrieve_sample_data(self, trait):
query = """
SELECT
Strain.Name, TempData.value, TempData.SE, TempData.NStrain, TempData.Id
FROM
TempData, Temp, Strain
WHERE
TempData.StrainId = Strain.Id AND
TempData.Id = Temp.DataId AND
Temp.name = '%s'
Order BY
Strain.Name
""" % escape(trait.name)
results = g.db.execute(query).fetchall()
def geno_mrna_confidentiality(ob):
dataset_table = ob.type + "Freeze"
#print("dataset_table [%s]: %s" % (type(dataset_table), dataset_table))
query = '''SELECT Id, Name, FullName, confidentiality,
AuthorisedUsers FROM %s WHERE Name = %%s''' % (dataset_table)
result = g.db.execute(query, ob.name)
(dataset_id,
name,
full_name,
confidential,
authorized_users) = result.fetchall()[0]
if confidential:
return True
| genenetwork/genenetwork2_diet | wqflask/base/data_set.py | Python | agpl-3.0 | 45,353 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
class Category(models.Model):
_name = 'cl_todo.task.category'
name = fields.Char('Category', required=True, size=128, translate=True)
parent_id = fields.Many2one('cl_todo.task.category', 'Parent Category', select=True, ondelete='restrict')
code = fields.Char('Code', size=128, required=False)
description = fields.Char(string='Description', size=256)
notes = fields.Text(string='Notes')
complete_name = fields.Char(string='Full Category', compute='_name_get_fnc', store=False, readonly=True)
child_ids = fields.One2many('cl_todo.task.category', 'parent_id', 'Child Categories')
active = fields.Boolean('Active',
help="If unchecked, it will allow you to hide the category without removing it.",
default=1)
parent_left = fields.Integer('Left parent', select=True)
parent_right = fields.Integer('Right parent', select=True)
todo_ids = fields.Many2many(
'cl_todo.task',
'cl_todo_task_category_rel',
'category_id',
'todo_task_id',
'Todo Tasks'
)
_sql_constraints = [
('uniq_code', 'unique(code)', "Error! The Code must be unique!"),
]
_constraints = [(
models.Model._check_recursion,
'Error! You can not create recursive categories.',
['parent_id']
)]
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
@api.multi
def name_get(self):
"""Return the category's display name, including their direct parent by default.
:param dict context: the ``category_display`` key can be
used to select the short version of the
category (without the direct parent),
when set to ``'short'``. The default is
the long version."""
if self._context is None:
self._context = {}
if self._context.get('category_display') == 'short':
return super(Category, self).name_get()
if isinstance(self._ids, (int, long)):
self._ids = [self._ids]
reads = self.read(['name', 'parent_id'])
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
if name:
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
categories = self.search(args, limit=limit)
return categories.name_get()
@api.one
def _name_get_fnc(self):
self.refresh_complete_name = 0
complete_name = self.name_get()
if complete_name:
self.complete_name = complete_name[0][1]
else:
self.complete_name = self.name
class TodoTask(models.Model):
_inherit = 'cl_todo.task'
category_ids = fields.Many2many(
'cl_todo.task.category',
'cl_todo_task_category_rel',
'todo_task_id',
'category_id',
'Categories'
)
| CLVsol/odoo_cl_addons | cl_todo/category/cl_todo_category.py | Python | agpl-3.0 | 4,706 |
# !/usr/bin/python
# -*- coding: cp1252 -*-
#
##################################################################################
#
# Copyright 2016-2017 Félix Brezo and Yaiza Rubio (i3visio, [email protected])
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
__author__ = "John Doe <[email protected]>"
__version__ = "1.0"
import argparse
import json
import re
import sys
import urllib2
import osrframework.utils.browser as browser
from osrframework.utils.platforms import Platform
class Bebee(Platform):
"""
A <Platform> object for Bebee.
"""
def __init__(self):
"""
Constructor...
"""
self.platformName = "Bebee"
self.tags = ["jobs"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://bebee.com/bee/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ['<link rel="canonical" href="https://.bebee.com/bees/search">']
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
self.fieldsRegExp["usufy"]["i3visio.fullname"] = {"start": '<title>', "end": '- beBee</title>'}
self.fieldsRegExp["usufy"]["i3visio.location"] = {"start": '<span itemprop="addressRegion">', "end": '</span>'}
self.fieldsRegExp["usufy"]["i3visio.alias.googleplus"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="https://plus.google.com/u/0/', "end": '"'}
self.fieldsRegExp["usufy"]["i3visio.alias.linkedin"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="http://br.linkedin.com/in/', "end": '"'}
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
| i3visio/osrframework | osrframework/wrappers/pending/bebee.py | Python | agpl-3.0 | 4,625 |
# Copyright 2017 ForgeFlow S.L.
# (http://www.forgeflow.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from datetime import datetime, timedelta
from odoo.exceptions import AccessError, ValidationError
from odoo.tests import common
class TestStockCycleCount(common.TransactionCase):
def setUp(self):
super(TestStockCycleCount, self).setUp()
self.res_users_model = self.env["res.users"]
self.cycle_count_model = self.env["stock.cycle.count"]
self.stock_cycle_count_rule_model = self.env["stock.cycle.count.rule"]
self.inventory_model = self.env["stock.inventory"]
self.stock_location_model = self.env["stock.location"]
self.stock_move_model = self.env["stock.move"]
self.stock_warehouse_model = self.env["stock.warehouse"]
self.product_model = self.env["product.product"]
self.quant_model = self.env["stock.quant"]
self.move_model = self.env["stock.move"]
self.company = self.env.ref("base.main_company")
self.partner = self.env.ref("base.res_partner_1")
self.g_stock_manager = self.env.ref("stock.group_stock_manager")
self.g_stock_user = self.env.ref("stock.group_stock_user")
# Create users:
self.manager = self._create_user(
"user_1", [self.g_stock_manager], self.company
).id
self.user = self._create_user("user_2", [self.g_stock_user], self.company).id
# Create warehouses:
self.big_wh = self.stock_warehouse_model.create(
{"name": "BIG", "code": "B", "cycle_count_planning_horizon": 30}
)
self.small_wh = self.stock_warehouse_model.create(
{"name": "SMALL", "code": "S"}
)
# Create rules:
self.rule_periodic = self._create_stock_cycle_count_rule_periodic(
self.manager, "rule_1", [2, 7]
)
self.rule_turnover = self._create_stock_cycle_count_rule_turnover(
self.manager, "rule_2", [100]
)
self.rule_accuracy = self._create_stock_cycle_count_rule_accuracy(
self.manager, "rule_3", [5], self.big_wh.view_location_id.ids
)
self.zero_rule = self._create_stock_cycle_count_rule_zero(
self.manager, "rule_4"
)
# Configure warehouses:
self.rule_ids = [
self.rule_periodic.id,
self.rule_turnover.id,
self.rule_accuracy.id,
self.zero_rule.id,
]
self.big_wh.write({"cycle_count_rule_ids": [(6, 0, self.rule_ids)]})
# Create a location:
self.count_loc = self.stock_location_model.create(
{"name": "Place", "usage": "production"}
)
self.stock_location_model._parent_store_compute()
# Create a cycle count:
self.cycle_count_1 = self.cycle_count_model.with_user(self.manager).create(
{
"name": "Test cycle count",
"cycle_count_rule_id": self.rule_periodic.id,
"location_id": self.count_loc.id,
}
)
# Create a product:
self.product1 = self.product_model.create(
{"name": "Test Product 1", "type": "product", "default_code": "PROD1"}
)
def _create_user(self, login, groups, company):
group_ids = [group.id for group in groups]
user = self.res_users_model.create(
{
"name": login,
"login": login,
"email": "[email protected]",
"company_id": company.id,
"company_ids": [(4, company.id)],
"groups_id": [(6, 0, group_ids)],
}
)
return user
def _create_stock_cycle_count_rule_periodic(self, uid, name, values):
rule = self.stock_cycle_count_rule_model.with_user(uid).create(
{
"name": name,
"rule_type": "periodic",
"periodic_qty_per_period": values[0],
"periodic_count_period": values[1],
}
)
return rule
def _create_stock_cycle_count_rule_turnover(self, uid, name, values):
rule = self.stock_cycle_count_rule_model.with_user(uid).create(
{
"name": name,
"rule_type": "turnover",
"turnover_inventory_value_threshold": values[0],
}
)
return rule
def _create_stock_cycle_count_rule_accuracy(self, uid, name, values, zone_ids):
rule = self.stock_cycle_count_rule_model.with_user(uid).create(
{
"name": name,
"rule_type": "accuracy",
"accuracy_threshold": values[0],
"apply_in": "location",
"location_ids": [(6, 0, zone_ids)],
}
)
return rule
def _create_stock_cycle_count_rule_zero(self, uid, name):
rule = self.stock_cycle_count_rule_model.with_user(uid).create(
{"name": name, "rule_type": "zero"}
)
return rule
def test_cycle_count_planner(self):
"""Tests creation of cycle counts."""
# Common rules:
wh = self.big_wh
locs = self.stock_location_model
for rule in self.big_wh.cycle_count_rule_ids:
locs += wh._search_cycle_count_locations(rule)
locs = locs.exists() # remove duplicated locations.
counts = self.cycle_count_model.search([("location_id", "in", locs.ids)])
self.assertFalse(counts, "Existing cycle counts before execute planner.")
date_pre_existing_cc = datetime.today() + timedelta(days=30)
loc = locs.filtered(lambda l: l.usage != "view")[0]
pre_existing_count = self.cycle_count_model.create(
{
"name": "To be cancelled when running cron job.",
"cycle_count_rule_id": self.rule_periodic.id,
"location_id": loc.id,
"date_deadline": date_pre_existing_cc,
}
)
self.assertEqual(
pre_existing_count.state, "draft", "Testing data not generated properly."
)
date = datetime.today() - timedelta(days=1)
self.inventory_model.create(
{
"name": "Pre-existing inventory",
"location_ids": [(4, loc.id)],
"date": date,
}
)
self.quant_model.create(
{
"product_id": self.product1.id,
"location_id": self.count_loc.id,
"quantity": 1.0,
}
)
move1 = self.stock_move_model.create(
{
"name": "Pre-existing move",
"product_id": self.product1.id,
"product_uom_qty": 1.0,
"product_uom": self.product1.uom_id.id,
"location_id": self.count_loc.id,
"location_dest_id": loc.id,
}
)
move1._action_confirm()
move1._action_assign()
move1.move_line_ids[0].qty_done = 1.0
move1._action_done()
wh.cron_cycle_count()
self.assertNotEqual(
pre_existing_count.date_deadline,
date_pre_existing_cc,
"Date of pre-existing cycle counts has not been " "updated.",
)
counts = self.cycle_count_model.search([("location_id", "in", locs.ids)])
self.assertTrue(counts, "Cycle counts not planned")
# Zero-confirmations:
count = self.cycle_count_model.search(
[
("location_id", "=", loc.id),
("cycle_count_rule_id", "=", self.zero_rule.id),
]
)
self.assertFalse(count, "Unexpected zero confirmation.")
move2 = self.move_model.create(
{
"name": "make the locations to run out of stock.",
"product_id": self.product1.id,
"product_uom_qty": 1.0,
"product_uom": self.product1.uom_id.id,
"location_id": loc.id,
"location_dest_id": self.count_loc.id,
}
)
move2._action_confirm()
move2._action_assign()
move2.move_line_ids[0].qty_done = 1.0
move2._action_done()
count = self.cycle_count_model.search(
[
("location_id", "=", loc.id),
("cycle_count_rule_id", "=", self.zero_rule.id),
]
)
self.assertTrue(count, "Zero confirmation not being created.")
def test_cycle_count_workflow(self):
"""Tests workflow."""
self.cycle_count_1.action_create_inventory_adjustment()
inventory = self.inventory_model.search(
[("cycle_count_id", "=", self.cycle_count_1.id)]
)
self.assertTrue(inventory, "Inventory not created.")
inventory.action_start()
inventory.action_validate()
self.assertEqual(
self.cycle_count_1.state, "done", "Cycle count not set as done."
)
self.cycle_count_1.do_cancel()
self.assertEqual(
self.cycle_count_1.state, "cancelled", "Cycle count not set as cancelled."
)
def test_view_methods(self):
"""Tests the methods used to handle views."""
self.cycle_count_1.action_create_inventory_adjustment()
self.cycle_count_1.sudo().action_view_inventory()
inv_count = self.cycle_count_1.inventory_adj_count
self.assertEqual(inv_count, 1, "View method failing.")
rules = [
self.rule_periodic,
self.rule_turnover,
self.rule_accuracy,
self.zero_rule,
]
for r in rules:
r._compute_rule_description()
self.assertTrue(r.rule_description, "No description provided")
self.rule_accuracy._onchange_locaton_ids()
self.assertEqual(
self.rule_accuracy.warehouse_ids.ids,
self.big_wh.ids,
"Rules defined for zones are not getting the right " "warehouse.",
)
def test_user_security(self):
"""Tests user rights."""
with self.assertRaises(AccessError):
self._create_stock_cycle_count_rule_periodic(self.user, "rule_1b", [2, 7])
with self.assertRaises(AccessError):
self.cycle_count_1.with_user(self.user).unlink()
def test_rule_periodic_constrains(self):
"""Tests the constrains for the periodic rules."""
# constrain: periodic_qty_per_period < 1
with self.assertRaises(ValidationError):
self._create_stock_cycle_count_rule_periodic(self.manager, "rule_0", [0, 0])
# constrain: periodic_count_period < 0
with self.assertRaises(ValidationError):
self._create_stock_cycle_count_rule_periodic(
self.manager, "rule_0", [1, -1]
)
def test_rule_zero_constrains(self):
"""Tests the constrains for the zero-confirmation rule: it might
only exist one zero confirmation rule per warehouse and have just
one warehouse assigned.
"""
zero2 = self._create_stock_cycle_count_rule_zero(self.manager, "zero_rule_2")
with self.assertRaises(ValidationError):
zero2.warehouse_ids = [(4, self.big_wh.id)]
with self.assertRaises(ValidationError):
self.zero_rule.warehouse_ids = [(4, self.small_wh.id)]
def test_auto_link_inventory_to_cycle_count_1(self):
"""Create an inventory that could fit a planned cycle count should
auto-link it to that cycle count."""
self.assertEqual(self.cycle_count_1.state, "draft")
inventory = self.inventory_model.create(
{
"name": "new inventory",
"location_ids": [(4, self.count_loc.id)],
"exclude_sublocation": True,
}
)
self.assertEqual(inventory.cycle_count_id, self.cycle_count_1)
self.assertEqual(self.cycle_count_1.state, "open")
def test_auto_link_inventory_to_cycle_count_2(self):
"""Test auto-link when exclude sublocation is no set."""
self.assertEqual(self.cycle_count_1.state, "draft")
inventory = self.inventory_model.create(
{"name": "new inventory", "location_ids": [(4, self.count_loc.id)]}
)
self.assertEqual(inventory.cycle_count_id, self.cycle_count_1)
self.assertEqual(self.cycle_count_1.state, "open")
| OCA/stock-logistics-warehouse | stock_cycle_count/tests/test_stock_cycle_count.py | Python | agpl-3.0 | 12,526 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Michael Sandoz <[email protected]>, Emanuel Cino
#
# The licence is in the file __openerp__.py
#
##############################################################################
import logging
from datetime import datetime
from datetime import timedelta
from openerp import http
from openerp.http import request
from openerp.addons.connector.queue.job import job
from openerp.addons.connector.session import ConnectorSession
from werkzeug.wrappers import Response
from werkzeug.datastructures import Headers
_logger = logging.getLogger(__name__)
class RestController(http.Controller):
@http.route('/web_children_hold', type='http', auth='public', methods=[
'GET'])
def handler_web_children_hold(self):
headers = request.httprequest.headers
self._validate_headers(headers)
# load children via a research on childpool
child_research = request.env['compassion.childpool.search'].sudo()
research = child_research.create({'take': 5})
research.rich_mix()
# create a hold for all children found
session = ConnectorSession.from_env(request.env)
hold_children_job.delay(session, research.id)
data = ""
# return principal children info
for child in research.global_child_ids:
if child.image_url:
data += '<img src="' + child.image_url + '"/> <br>'
data += child.name + ' ' + child.birthdate + '<br>'
headers = Headers()
response = Response(data, content_type='text/html', headers=headers)
return response
def _validate_headers(self, headers):
pass
##############################################################################
# CONNECTOR METHODS #
##############################################################################
@job(default_channel='root.global_pool')
def hold_children_job(session, research_id):
"""Job for holding requested children on the web."""
child_hold = session.env['child.hold.wizard'].with_context(
active_id=research_id).sudo()
expiration_date = datetime.now() + timedelta(minutes=15)
user_id = session.env['res.users'].\
search([('name', '=', 'Reber Rose-Marie')]).id
holds = child_hold.create({
'type': 'E-Commerce Hold',
'hold_expiration_date': expiration_date.strftime(
"%Y-%m-%dT%H:%M:%SZ"),
'primary_owner': user_id,
'secondary_owner': 'Carole Rochat',
'no_money_yield_rate': '1.1',
'yield_rate': '1.1',
'channel': 'Website',
})
holds.send()
| emgirardin/compassion-modules | child_compassion/controllers/web_children_hold.py | Python | agpl-3.0 | 2,889 |
# Copyright (C) 2015 Forest and Biomass Romania
# Copyright (C) 2020 Terrabit
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
from odoo.osv import expression
class ResCountryZone(models.Model):
_name = "res.country.zone"
_description = "Country Zones"
@api.model
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None
):
args = args or []
if operator == "ilike" and not (name or "").strip():
domain = []
else:
domain = [
"|",
("name", operator, name),
("country_id.code", operator, name),
]
return self._search(
expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid
)
name = fields.Char("Name", required=True, index=True)
country_id = fields.Many2one("res.country", string="Country")
state_ids = fields.One2many("res.country.state", "zone_id", string="State")
siruta = fields.Char("Siruta")
class ResCountryState(models.Model):
_inherit = "res.country.state"
@api.model
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None
):
args = args or []
if operator == "ilike" and not (name or "").strip():
domain = []
else:
domain = ["|", ("name", operator, name), ("zone_id.name", operator, name)]
return self._search(
expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid
)
@api.onchange("zone_id")
def _onchange_zone_id(self):
if self.zone_id:
self.country_id = self.zone_id.country_id.id
zone_id = fields.Many2one("res.country.zone", string="Zone")
commune_ids = fields.One2many(
"res.country.commune", "state_id", string="Cities/Communes"
)
city_ids = fields.One2many("res.city", "state_id", string="Cities")
siruta = fields.Char("Siruta")
class ResCountryCommune(models.Model):
_name = "res.country.commune"
_description = "Country Cities/Communes"
@api.model
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None
):
args = args or []
if operator == "ilike" and not (name or "").strip():
domain = []
else:
domain = ["|", ("name", operator, name), ("state_id.code", operator, name)]
return self._search(
expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid
)
@api.onchange("state_id")
def _onchange_state_id(self):
if self.state_id:
self.zone_id = self.state_id.zone_id.id
self.country_id = self.state_id.country_id.id
@api.onchange("zone_id")
def _onchange_zone_id(self):
if self.zone_id:
self.state_id = False
self.country_id = self.zone_id.country_id.id
name = fields.Char("Name", required=True, index=True)
state_id = fields.Many2one("res.country.state", string="State")
zone_id = fields.Many2one("res.country.zone", string="Zone")
country_id = fields.Many2one("res.country", string="Country")
siruta = fields.Char("Siruta")
city_ids = fields.One2many("res.city", "state_id", string="Cities")
class ResCity(models.Model):
_inherit = "res.city"
@api.model
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None
):
args = args or []
if operator == "ilike" and not (name or "").strip():
domain = []
else:
domain = [
"|",
("name", operator, name),
("commune_id.name", operator, name),
]
return self._search(
expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid
)
@api.onchange("commune_id")
def _onchange_commune_id(self):
if self.commune_id:
self.state_id = self.commune_id.state_id.id
self.zone_id = self.commune_id.zone_id.id
self.country_id = self.commune_id.country_id.id
@api.onchange("state_id")
def _onchange_state_id(self):
if self.state_id:
self.commune_id = False
self.zone_id = self.state_id.zone_id.id
self.country_id = self.state_id.country_id.id
@api.onchange("zone_id")
def _onchange_zone_id(self):
if self.zone_id:
self.commune_id = False
self.state_id = False
self.country_id = self.zone_id.country_id.id
commune_id = fields.Many2one("res.country.commune", string="City/Commune")
zone_id = fields.Many2one("res.country.zone", string="Zone")
municipality = fields.Char(related="commune_id.name")
| OCA/l10n-romania | l10n_ro_siruta/models/siruta.py | Python | agpl-3.0 | 4,887 |
# lachambre.be to json sausage machine
# Copyright (C) 2011 Laurent Peuch <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from lachambre.utils import dump_db
class Command(BaseCommand):
def handle(self, *args, **options):
dump_db()
| Psycojoker/dierentheater | scraper/management/commands/create_db_dumps.py | Python | agpl-3.0 | 932 |
#!/usr/bin/python
import sys
import re
import types
import random
import lxml
from lxml import etree
from copy import deepcopy
def usage():
print "Usage: %s countryfile regionname valuefile clubfile1 [clubfile2...]" % sys.argv[0]
print " countryfile: XML file of the country"
print " regionname: stadium region"
print " valuefile: values of clubs"
print " clubfile[s]: files with the data of the clubs"
print
print "Countryfile will be changed. Club output will be to standard output."
def main():
try:
countryfilename = sys.argv[1]
regionname = sys.argv[2]
valuefilename = sys.argv[3]
clubfilenames = sys.argv[4:]
except IndexError:
usage()
sys.exit(1)
try:
countryfile = open(countryfilename, 'r')
countryroot = etree.parse(countryfile)
countryfile.close()
except IOError:
print "could not open", countryfilename
sys.exit(1)
regions = countryroot.findall(".//region")
countrynode = countryroot.find(".//country")
countryname = countrynode.get("name")
ourregion = None
for element in regions:
if element.get("name") == regionname:
ourregion = element
break
if type(ourregion) == types.NoneType:
# Region not found; creating one
ourregion = etree.Element("region", name=regionname)
regions.append(ourregion)
stadiums = []
stadiums = ourregion.findall(".//stadium")
countrychanged = False
dooutput = True
valuetable = {}
try:
valuefile = open(valuefilename, 'r')
except IOError:
print "could not open %s" % valuefilename
sys.exit(1)
for line in valuefile.readlines():
info = line.split('\t', 1)
valuetable[info[0].strip()] = int(info[1].strip())
valuefile.close()
clubsroot = etree.Element("Clubs")
for element in clubfilenames:
try:
clubfile = open(element, 'r')
clublines = clubfile.read()
clubfile.close()
except IOError:
print "could not open %s" % clubfilenames[0]
sys.exit(1)
clubname = re.compile(r'^ *([a-zA-Z 0-9-\'&]*)$', re.M).search(clublines, 1)
stadiumname = re.compile(r'^ *Ground \(ground history\) *([a-zA-Z 0-9-\'&]*?) *$', re.M).search(clublines, 1)
if type(stadiumname) != types.NoneType:
stadname = stadiumname.groups()[0]
else:
stadname = clubname.groups()[0] + " Stadium"
stadiumnode = etree.Element("stadium", name=stadname)
try:
thisvalue = valuetable[clubname.groups()[0]]
except:
if dooutput == True:
print "Could not find team %s in the values file" % clubname.groups()[0]
print "File that was being processed: %s" % element
print "No changes will be made."
dooutput = False
else:
print "%s - %s" % (clubname.groups()[0], element)
stadfound = False
for element in stadiums:
if element.get("name") == stadname:
stadfound = True
break
if stadfound == False:
countrystadiumnode = deepcopy(stadiumnode)
stadiumcapacity = int(thisvalue**(2.1)/25)/100*100
capnode = etree.Element("capacity", value="%d" % stadiumcapacity)
countrystadiumnode.append(capnode)
ourregion.append(countrystadiumnode)
stadiums.append(countrystadiumnode)
countrychanged = True
clubnode = etree.Element("club", name=clubname.groups()[0])
kit1node = etree.Element("kit")
jerseynode = etree.Element("jersey")
jerseynode.set("type", "0")
shortsnode = etree.Element("shorts")
socksnode = etree.Element("socks")
colornode = etree.Element("color")
colornode.set("r", "255")
colornode.set("g", "255")
colornode.set("b", "255")
imagenode = etree.Element("image", value="")
jerseynode.append(deepcopy(colornode))
jerseynode.append(imagenode)
shortsnode.append(deepcopy(colornode))
socksnode.append(colornode)
kit1node.append(jerseynode)
kit1node.append(shortsnode)
kit1node.append(socksnode)
kitsnode = etree.Element("kits")
kitsnode.append(deepcopy(kit1node))
kitsnode.append(deepcopy(kit1node))
clubnode.append(kitsnode)
clcountrynode = etree.Element("country", name=countryname)
clregionnode = etree.Element("region", name=regionname)
clubnode.append(clcountrynode)
clubnode.append(clregionnode)
clubnode.append(stadiumnode)
clubsroot.append(clubnode)
if dooutput == True:
print (etree.tostring(clubsroot, pretty_print=True, encoding="UTF-8"))
if countrychanged:
parser = etree.XMLParser(remove_blank_text=True)
countrynew = etree.fromstring(etree.tostring(countryroot), parser)
countrystring = etree.tostring(countrynew, pretty_print=True, encoding="UTF-8")
countryfile = open(countryfilename, 'w')
countryfile.write(countrystring)
countryfile.close()
if __name__ == '__main__':
main()
| anttisalonen/freekick | src/tools/Python/stadium/stadium.py | Python | agpl-3.0 | 5,396 |
# -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from datetime import date
from akvo.rsr.tests.base import BaseTestCase
from akvo.rsr.tests.utils import ProjectFixtureBuilder
from akvo.rsr.usecases import change_project_parent as command
class ChangeProjectParentTestCase(BaseTestCase):
def test_change_parent_to_sibling(self):
# Given
root = ProjectFixtureBuilder()\
.with_title('Parent project')\
.with_disaggregations({'Foo': ['Bar']})\
.with_results([{
'title': 'Result #1',
'indicators': [{
'title': 'Indicator #1',
'periods': [{
'period_start': date(2020, 1, 1),
'period_end': date(2020, 12, 31),
}]
}]
}])\
.with_contributors([
{'title': 'Child project'},
{'title': 'New project'}
])\
.build()
child_project = root.get_contributor(title='Child project')
new_project = root.get_contributor(title='New project')
# When
command.change_parent(new_project.object, child_project.object)
# Then
self.assertIsNone(new_project.object.parents_all().filter(id=root.object.id).first())
self.assertIsNotNone(new_project.object.parents_all().filter(id=child_project.object.id).first())
self.assertEqual(
new_project.results.get(title='Result #1').parent_result,
child_project.results.get(title='Result #1')
)
self.assertEqual(
new_project.indicators.get(title='Indicator #1').parent_indicator,
child_project.indicators.get(title='Indicator #1')
)
self.assertEqual(
new_project.periods.get(period_start=date(2020, 1, 1)).parent_period,
child_project.periods.get(period_start=date(2020, 1, 1))
)
self.assertEqual(
new_project.object.dimension_names.get(name='Foo').parent_dimension_name,
child_project.object.dimension_names.get(name='Foo')
)
self.assertEqual(
new_project.get_disaggregation('Foo', 'Bar').parent_dimension_value,
child_project.get_disaggregation('Foo', 'Bar')
)
def test_change_parent_to_parent_sibling(self):
# Given
root = ProjectFixtureBuilder()\
.with_title('Parent project')\
.with_disaggregations({'Foo': ['Bar']})\
.with_results([{
'title': 'Result #1',
'indicators': [{
'title': 'Indicator #1',
'periods': [{
'period_start': date(2020, 1, 1),
'period_end': date(2020, 12, 31),
}]
}]
}])\
.with_contributors([
{'title': 'Child project', 'contributors': [{'title': 'Grand child project'}]},
{'title': 'New project'}
])\
.build()
child_project2 = root.get_contributor(title='New project')
grand_child = root.get_contributor(title='Grand child project')
# When
command.change_parent(grand_child.object, child_project2.object)
# Then
parents = grand_child.object.parents_all()
self.assertEqual(1, len(parents))
self.assertEqual(child_project2.object.id, parents.first().id)
self.assertEqual(
grand_child.results.get(title='Result #1').parent_result,
child_project2.results.get(title='Result #1')
)
self.assertEqual(
grand_child.indicators.get(title='Indicator #1').parent_indicator,
child_project2.indicators.get(title='Indicator #1')
)
self.assertEqual(
grand_child.periods.get(period_start=date(2020, 1, 1)).parent_period,
child_project2.periods.get(period_start=date(2020, 1, 1))
)
self.assertEqual(
grand_child.object.dimension_names.get(name='Foo').parent_dimension_name,
child_project2.object.dimension_names.get(name='Foo')
)
self.assertEqual(
grand_child.get_disaggregation('Foo', 'Bar').parent_dimension_value,
child_project2.get_disaggregation('Foo', 'Bar')
)
| akvo/akvo-rsr | akvo/rsr/tests/usecases/test_change_project_parent.py | Python | agpl-3.0 | 4,628 |
from __future__ import unicode_literals
import pytest
from tests.factories import UniprotFactory
@pytest.fixture
def uniprot_egfr_human():
return UniprotFactory(
uniprot_acc='P00533',
uniprot_id='EGFR_HUMAN',
description='Epidermal growth factor receptor EC=2.7.10.1'
)
| ecolell/pfamserver | tests/fixtures/uniprot.py | Python | agpl-3.0 | 304 |
# -*- coding: utf-8 -*-
# © 2016 Elico Corp (www.elico-corp.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Membership Management - POS Membership',
'version': '8.0.1.0.2',
'category': 'Generic Modules',
'depends': [
'account_membership_balance',
'point_of_sale',
'account_accountant',
'account_voucher',
],
'author': 'Elico Corp',
'license': 'AGPL-3',
'website': 'https://www.elico-corp.com',
'data': [
'views/pos_membership.xml',
'views/partner_view.xml'
],
'qweb': [
'static/src/xml/pos.xml'
],
'installable': True,
'application': False,
}
| Elico-Corp/odoo-addons | pos_membership/__openerp__.py | Python | agpl-3.0 | 694 |
"""
Unit tests for the Mixed Modulestore, with DDT for the various stores (Split, Draft, XML)
"""
import datetime
import itertools
import logging
import mimetypes
from collections import namedtuple
from contextlib import contextmanager
from shutil import rmtree
from tempfile import mkdtemp
from uuid import uuid4
import ddt
import pymongo
import pytest
import six
# Mixed modulestore depends on django, so we'll manually configure some django settings
# before importing the module
# TODO remove this import and the configuration -- xmodule should not depend on django!
from django.conf import settings
from mock import Mock, call, patch
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator, LibraryLocator
from pytz import UTC
from six.moves import range
from web_fragments.fragment import Fragment
from xblock.core import XBlockAside
from xblock.fields import Scope, ScopeIds, String
from xblock.runtime import DictKeyValueStore, KvsFieldData
from xblock.test.tools import TestRuntime
from openedx.core.lib.tests import attr
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES, UnsupportedRevisionError
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.exceptions import (
DuplicateCourseError,
ItemNotFoundError,
NoPathToItem,
ReferentialIntegrityError
)
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.search import navigation_index, path_to_location
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.modulestore.tests.factories import check_exact_number_of_calls, check_mongo_calls, mongo_uses_error_check
from xmodule.modulestore.tests.mongo_connection import MONGO_HOST, MONGO_PORT_NUM
from xmodule.modulestore.tests.test_asides import AsideTestType
from xmodule.modulestore.tests.utils import (
LocationMixin,
MongoContentstoreBuilder,
create_modulestore_instance,
mock_tab_from_json
)
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.tests import DATA_DIR, CourseComparisonTest
from xmodule.x_module import XModuleMixin
if not settings.configured:
settings.configure()
log = logging.getLogger(__name__)
class CommonMixedModuleStoreSetup(CourseComparisonTest):
"""
Quasi-superclass which tests Location based apps against both split and mongo dbs (Locator and
Location-based dbs)
"""
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
COLLECTION = 'modulestore'
ASSET_COLLECTION = 'assetstore'
FS_ROOT = DATA_DIR
DEFAULT_CLASS = 'xmodule.raw_module.RawDescriptor'
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': ''
MONGO_COURSEID = 'MITx/999/2013_Spring'
modulestore_options = {
'default_class': DEFAULT_CLASS,
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': HOST,
'port': PORT,
'db': DB,
'collection': COLLECTION,
'asset_collection': ASSET_COLLECTION,
}
OPTIONS = {
'stores': [
{
'NAME': ModuleStoreEnum.Type.mongo,
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': ModuleStoreEnum.Type.split,
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
],
'xblock_mixins': modulestore_options['xblock_mixins'],
}
def _compare_ignore_version(self, loc1, loc2, msg=None):
"""
AssertEqual replacement for CourseLocator
"""
if loc1.for_branch(None) != loc2.for_branch(None):
self.fail(self._formatMessage(msg, u"{} != {}".format(six.text_type(loc1), six.text_type(loc2))))
def setUp(self):
"""
Set up the database for testing
"""
super(CommonMixedModuleStoreSetup, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.exclude_field(None, 'wiki_slug')
self.exclude_field(None, 'xml_attributes')
self.exclude_field(None, 'parent')
self.ignore_asset_key('_id')
self.ignore_asset_key('uploadDate')
self.ignore_asset_key('content_son')
self.ignore_asset_key('thumbnail_location')
self.options = getattr(self, 'options', self.OPTIONS)
self.connection = pymongo.MongoClient(
host=self.HOST,
port=self.PORT,
tz_aware=True,
)
self.connection.drop_database(self.DB)
self.addCleanup(self.connection.drop_database, self.DB)
self.addCleanup(self.connection.close)
self.addTypeEqualityFunc(BlockUsageLocator, '_compare_ignore_version')
self.addTypeEqualityFunc(CourseLocator, '_compare_ignore_version')
# define attrs which get set in initdb to quell pylint
self.writable_chapter_location = self.store = self.fake_location = None
self.course_locations = {}
self.user_id = ModuleStoreEnum.UserID.test
def _create_course(self, course_key, asides=None):
"""
Create a course w/ one item in the persistence store using the given course & item location.
"""
# create course
with self.store.bulk_operations(course_key):
self.course = self.store.create_course(course_key.org, course_key.course, course_key.run, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
if isinstance(self.course.id, CourseLocator):
self.course_locations[self.MONGO_COURSEID] = self.course.location
else:
assert self.course.id == course_key
# create chapter
chapter = self.store.create_child(self.user_id, self.course.location, 'chapter',
block_id='Overview', asides=asides)
self.writable_chapter_location = chapter.location
def _create_block_hierarchy(self):
"""
Creates a hierarchy of blocks for testing
Each block's (version_agnostic) location is assigned as a field of the class and can be easily accessed
"""
BlockInfo = namedtuple('BlockInfo', 'field_name, category, display_name, sub_tree')
trees = [
BlockInfo(
'chapter_x', 'chapter', 'Chapter_x', [
BlockInfo(
'sequential_x1', 'sequential', 'Sequential_x1', [
BlockInfo(
'vertical_x1a', 'vertical', 'Vertical_x1a', [
BlockInfo('problem_x1a_1', 'problem', 'Problem_x1a_1', []),
BlockInfo('problem_x1a_2', 'problem', 'Problem_x1a_2', []),
BlockInfo('problem_x1a_3', 'problem', 'Problem_x1a_3', []),
BlockInfo('html_x1a_1', 'html', 'HTML_x1a_1', []),
]
),
BlockInfo(
'vertical_x1b', 'vertical', 'Vertical_x1b', []
)
]
),
BlockInfo(
'sequential_x2', 'sequential', 'Sequential_x2', []
)
]
),
BlockInfo(
'chapter_y', 'chapter', 'Chapter_y', [
BlockInfo(
'sequential_y1', 'sequential', 'Sequential_y1', [
BlockInfo(
'vertical_y1a', 'vertical', 'Vertical_y1a', [
BlockInfo('problem_y1a_1', 'problem', 'Problem_y1a_1', []),
BlockInfo('problem_y1a_2', 'problem', 'Problem_y1a_2', []),
BlockInfo('problem_y1a_3', 'problem', 'Problem_y1a_3', []),
]
)
]
)
]
)
]
def create_sub_tree(parent, block_info):
"""
recursive function that creates the given block and its descendants
"""
block = self.store.create_child(
self.user_id, parent.location,
block_info.category, block_id=block_info.display_name,
fields={'display_name': block_info.display_name},
)
for tree in block_info.sub_tree:
create_sub_tree(block, tree)
setattr(self, block_info.field_name, block.location)
with self.store.bulk_operations(self.course.id):
for tree in trees:
create_sub_tree(self.course, tree)
def _course_key_from_string(self, string):
"""
Get the course key for the given course string
"""
return self.course_locations[string].course_key
def _has_changes(self, location):
"""
Helper function that loads the item before calling has_changes
"""
return self.store.has_changes(self.store.get_item(location))
def _initialize_mixed(self, mappings=None, contentstore=None):
"""
initializes the mixed modulestore.
"""
mappings = mappings or {}
self.store = MixedModuleStore(
contentstore, create_modulestore_instance=create_modulestore_instance,
mappings=mappings,
**self.options
)
self.addCleanup(self.store.close_all_connections)
def initdb(self, default):
"""
Initialize the database and create one test course in it
"""
# set the default modulestore
store_configs = self.options['stores']
for index in range(len(store_configs)): # lint-amnesty, pylint: disable=consider-using-enumerate
if store_configs[index]['NAME'] == default:
if index > 0:
store_configs[index], store_configs[0] = store_configs[0], store_configs[index]
break
self._initialize_mixed()
test_course_key = CourseLocator.from_string(self.MONGO_COURSEID)
test_course_key = test_course_key.make_usage_key('course', test_course_key.run).course_key
self.fake_location = self.store.make_course_key(
test_course_key.org,
test_course_key.course,
test_course_key.run
).make_usage_key('vertical', 'fake')
self._create_course(test_course_key)
assert default == self.store.get_modulestore_type(self.course.id)
class AsideFoo(XBlockAside):
"""
Test xblock aside class
"""
FRAG_CONTENT = u"<p>Aside Foo rendered</p>"
field11 = String(default="aside1_default_value1", scope=Scope.content)
field12 = String(default="aside1_default_value2", scope=Scope.settings)
@XBlockAside.aside_for('student_view')
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""Add to the student view"""
return Fragment(self.FRAG_CONTENT)
class AsideBar(XBlockAside):
"""
Test xblock aside class
"""
FRAG_CONTENT = u"<p>Aside Bar rendered</p>"
field21 = String(default="aside2_default_value1", scope=Scope.content)
field22 = String(default="aside2_default_value2", scope=Scope.settings)
@XBlockAside.aside_for('student_view')
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""Add to the student view"""
return Fragment(self.FRAG_CONTENT)
@ddt.ddt
@attr('mongo')
class TestMixedModuleStore(CommonMixedModuleStoreSetup):
"""
Tests of the MixedModulestore interface methods.
"""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_modulestore_type(self, default_ms):
"""
Make sure we get back the store type we expect for given mappings
"""
self.initdb(default_ms)
assert self.store.get_modulestore_type(self._course_key_from_string(self.MONGO_COURSEID)) == default_ms
# try an unknown mapping, it should be the 'default' store
assert self.store.get_modulestore_type(CourseKey.from_string('foo/bar/2012_Fall')) == default_ms
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_modulestore_cache(self, default_ms):
"""
Make sure we cache discovered course mappings
"""
self.initdb(default_ms)
# unset mappings
self.store.mappings = {}
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with check_exact_number_of_calls(self.store.default_modulestore, 'has_course', 1):
assert self.store.default_modulestore == self.store._get_modulestore_for_courselike(course_key) # pylint: disable=protected-access, line-too-long
assert course_key in self.store.mappings
assert self.store.default_modulestore == self.store._get_modulestore_for_courselike(course_key) # pylint: disable=protected-access, line-too-long
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False)
))
@ddt.unpack
def test_duplicate_course_error(self, default_ms, reset_mixed_mappings):
"""
Make sure we get back the store type we expect for given mappings
"""
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
if reset_mixed_mappings:
self.store.mappings = {}
with pytest.raises(DuplicateCourseError):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_duplicate_course_error_with_different_case_ids(self, default_store):
"""
Verify that course can not be created with same course_id with different case.
"""
self._initialize_mixed(mappings={})
with self.store.default_store(default_store):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
with pytest.raises(DuplicateCourseError):
self.store.create_course('ORG_X', 'COURSE_Y', 'RUN_Z', self.user_id)
# Draft:
# problem: One lookup to locate an item that exists
# fake: one w/ wildcard version
# split has one lookup for the course and then one for the course items
@ddt.data((ModuleStoreEnum.Type.mongo, [1, 1], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_has_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
with check_mongo_calls(max_find.pop(0), max_send):
assert self.store.has_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
# try negative cases
with check_mongo_calls(max_find.pop(0), max_send):
assert not self.store.has_item(self.fake_location)
# verify that an error is raised when the revision is not valid
with pytest.raises(UnsupportedRevisionError):
self.store.has_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# draft queries:
# problem: find draft item, find all items pertinent to inheritance computation, find parent
# non-existent problem: find draft, find published
# split:
# problem: active_versions, structure
# non-existent problem: ditto
@ddt.data((ModuleStoreEnum.Type.mongo, [3, 2], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_get_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
with check_mongo_calls(max_find.pop(0), max_send):
assert self.store.get_item(self.problem_x1a_1) is not None # lint-amnesty, pylint: disable=no-member
# try negative cases
with check_mongo_calls(max_find.pop(0), max_send):
with pytest.raises(ItemNotFoundError):
self.store.get_item(self.fake_location)
# verify that an error is raised when the revision is not valid
with pytest.raises(UnsupportedRevisionError):
self.store.get_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Draft:
# wildcard query, 6! load pertinent items for inheritance calls, load parents, course root fetch (why)
# Split:
# active_versions (with regex), structure, and spurious active_versions refetch
@ddt.data((ModuleStoreEnum.Type.mongo, 14, 0), (ModuleStoreEnum.Type.split, 4, 0))
@ddt.unpack
def test_get_items(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
course_locn = self.course_locations[self.MONGO_COURSEID]
with check_mongo_calls(max_find, max_send):
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'problem'})
assert len(modules) == 6
# verify that an error is raised when the revision is not valid
with pytest.raises(UnsupportedRevisionError):
self.store.get_items(
self.course_locations[self.MONGO_COURSEID].course_key,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_version_on_block(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
course = self.store.get_course(self.course.id)
course_version = course.course_version
if default_ms == ModuleStoreEnum.Type.split:
assert course_version is not None
else:
assert course_version is None
blocks = self.store.get_items(self.course.id, qualifiers={'category': 'problem'})
blocks.append(self.store.get_item(self.problem_x1a_1)) # lint-amnesty, pylint: disable=no-member
assert len(blocks) == 7
for block in blocks:
assert block.course_version == course_version
# ensure that when the block is retrieved from the runtime cache,
# the course version is still present
cached_block = course.runtime.load_item(block.location)
assert cached_block.course_version == block.course_version
@ddt.data((ModuleStoreEnum.Type.split, 2, False), (ModuleStoreEnum.Type.mongo, 3, True))
@ddt.unpack
def test_get_items_include_orphans(self, default_ms, expected_items_in_tree, orphan_in_items):
"""
Test `include_orphans` option helps in returning only those items which are present in course tree.
It tests that orphans are not fetched when calling `get_item` with `include_orphans`.
Params:
expected_items_in_tree:
Number of items that will be returned after `get_items` would be called with `include_orphans`.
In split, it would not get orphan items.
In mongo, it would still get orphan items because `include_orphans` would not have any impact on mongo
modulestore which will return same number of items as called without `include_orphans` kwarg.
orphan_in_items:
When `get_items` is called with `include_orphans` kwarg, then check if an orphan is returned or not.
False when called in split modulestore because in split get_items is expected to not retrieve orphans
now because of `include_orphans`.
True when called in mongo modulstore because `include_orphans` does not have any effect on mongo.
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
course_key = test_course.id
items = self.store.get_items(course_key)
# Check items found are either course or about type
assert set(['course', 'about']).issubset(set([item.location.block_type for item in items])) # pylint: disable=consider-using-set-comprehension, line-too-long
# Assert that about is a detached category found in get_items
assert [item.location.block_type for item in items if item.location.block_type == 'about'][0]\
in DETACHED_XBLOCK_TYPES
assert len(items) == 2
# Check that orphans are not found
orphans = self.store.get_orphans(course_key)
assert len(orphans) == 0
# Add an orphan to test course
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(self.user_id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Check that now an orphan is found
orphans = self.store.get_orphans(course_key)
assert orphan in orphans
assert len(orphans) == 1
# Check now `get_items` retrieves an extra item added above which is an orphan.
items = self.store.get_items(course_key)
assert orphan in [item.location for item in items]
assert len(items) == 3
# Check now `get_items` with `include_orphans` kwarg does not retrieves an orphan block.
items_in_tree = self.store.get_items(course_key, include_orphans=False)
# Check that course and about blocks are found in get_items
assert set(['course', 'about']).issubset({item.location.block_type for item in items_in_tree})
# Check orphan is found or not - this is based on mongo/split modulestore. It should be found in mongo.
assert (orphan in [item.location for item in items_in_tree]) == orphan_in_items
assert len(items_in_tree) == expected_items_in_tree
# draft: get draft, get ancestors up to course (2-6), compute inheritance
# sends: update problem and then each ancestor up to course (edit info)
# split: active_versions, definitions (calculator field), structures
# 2 sends to update index & structure (note, it would also be definition if a content field changed)
@ddt.data((ModuleStoreEnum.Type.mongo, 7, 5), (ModuleStoreEnum.Type.split, 3, 2))
@ddt.unpack
def test_update_item(self, default_ms, max_find, max_send):
"""
Update should succeed for r/w dbs
"""
self.initdb(default_ms)
self._create_block_hierarchy()
problem = self.store.get_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
# if following raised, then the test is really a noop, change it
assert problem.max_attempts != 2, 'Default changed making test meaningless'
problem.max_attempts = 2
with check_mongo_calls(max_find, max_send):
problem = self.store.update_item(problem, self.user_id)
assert problem.max_attempts == 2, "Update didn't persist"
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_direct_only(self, default_ms):
"""
Tests that has_changes() returns false when a new xblock in a direct only category is checked
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create dummy direct only xblocks
chapter = self.store.create_item(
self.user_id,
test_course.id,
'chapter',
block_id='vertical_container'
)
# Check that neither xblock has changes
assert not self.store.has_changes(test_course)
assert not self.store.has_changes(chapter)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes(self, default_ms):
"""
Tests that has_changes() only returns true when changes are present
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
assert self.store.has_changes(xblock)
# Publish and verify that there are no unpublished changes
newXBlock = self.store.publish(xblock.location, self.user_id)
assert not self.store.has_changes(newXBlock)
# Change the component, then check that there now are changes
component = self.store.get_item(xblock.location)
component.display_name = 'Changed Display Name'
component = self.store.update_item(component, self.user_id)
assert self.store.has_changes(component)
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
assert not self.store.has_changes(component)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_draft_mode(self, default_ms):
"""
After revert_to_published() the has_changes() should return false if draft has no changes
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
assert self.store.has_changes(xblock)
# Publish and verify that there are no unpublished changes
component = self.store.publish(xblock.location, self.user_id)
assert not self.store.has_changes(component)
self.store.revert_to_published(component.location, self.user_id)
component = self.store.get_item(component.location)
assert not self.store.has_changes(component)
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
assert not self.store.has_changes(component)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_published_mode(self, default_ms):
"""
After revert_to_published() the has_changes() should return true if draft has changes
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
assert self.store.has_changes(xblock)
# Publish and verify that there are no unpublished changes
component = self.store.publish(xblock.location, self.user_id)
assert not self.store.has_changes(component)
# Discard changes and verify that there are no changes
self.store.revert_to_published(component.location, self.user_id)
component = self.store.get_item(component.location)
assert not self.store.has_changes(component)
# Change the component, then check that there now are changes
component = self.store.get_item(component.location)
component.display_name = 'Changed Display Name'
self.store.update_item(component, self.user_id)
# Verify that changes are present
assert self.store.has_changes(component)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_published_mode_after_delete(self, default_ms):
"""
Test that a unit does not get stuck in published mode
after discarding a component changes and deleting a component
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy vertical & html component to test against
vertical = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
component = self.store.create_child(
self.user_id,
vertical.location,
'html',
block_id='html_component'
)
# publish vertical changes
self.store.publish(vertical.location, self.user_id)
assert not self._has_changes(vertical.location)
# Change a component, then check that there now are changes
component = self.store.get_item(component.location)
component.display_name = 'Changed Display Name'
self.store.update_item(component, self.user_id)
assert self._has_changes(vertical.location)
# Discard changes and verify that there are no changes
self.store.revert_to_published(vertical.location, self.user_id)
assert not self._has_changes(vertical.location)
# Delete the component and verify that the unit has changes
self.store.delete_item(component.location, self.user_id)
vertical = self.store.get_item(vertical.location)
assert self._has_changes(vertical.location)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_publish_automatically_after_delete_unit(self, default_ms):
"""
Check that sequential publishes automatically after deleting a unit
"""
self.initdb(default_ms)
test_course = self.store.create_course('test_org', 'test_course', 'test_run', self.user_id)
# create sequential and vertical to test against
sequential = self.store.create_child(self.user_id, test_course.location, 'sequential', 'test_sequential')
vertical = self.store.create_child(self.user_id, sequential.location, 'vertical', 'test_vertical')
# publish sequential changes
self.store.publish(sequential.location, self.user_id)
assert not self._has_changes(sequential.location)
# delete vertical and check sequential has no changes
self.store.delete_item(vertical.location, self.user_id)
assert not self._has_changes(sequential.location)
def setup_has_changes(self, default_ms):
"""
Common set up for has_changes tests below.
Returns a dictionary of useful location maps for testing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
locations = {
'grandparent': self.chapter_x, # lint-amnesty, pylint: disable=no-member
'parent_sibling': self.sequential_x2, # lint-amnesty, pylint: disable=no-member
'parent': self.sequential_x1, # lint-amnesty, pylint: disable=no-member
'child_sibling': self.vertical_x1b, # lint-amnesty, pylint: disable=no-member
'child': self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
}
# Publish the vertical units
self.store.publish(locations['parent_sibling'], self.user_id)
self.store.publish(locations['parent'], self.user_id)
return locations
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_ancestors(self, default_ms):
"""
Tests that has_changes() returns true on ancestors when a child is changed
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
assert not self._has_changes(locations[key])
# Change the child
child = self.store.get_item(locations['child'])
child.display_name = 'Changed Display Name'
self.store.update_item(child, self.user_id)
# All ancestors should have changes, but not siblings
assert self._has_changes(locations['grandparent'])
assert self._has_changes(locations['parent'])
assert self._has_changes(locations['child'])
assert not self._has_changes(locations['parent_sibling'])
assert not self._has_changes(locations['child_sibling'])
# Publish the unit with changes
self.store.publish(locations['parent'], self.user_id)
# Verify that there are no unpublished changes
for key in locations:
assert not self._has_changes(locations[key])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_publish_ancestors(self, default_ms):
"""
Tests that has_changes() returns false after a child is published only if all children are unchanged
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
assert not self._has_changes(locations[key])
# Change both children
child = self.store.get_item(locations['child'])
child_sibling = self.store.get_item(locations['child_sibling'])
child.display_name = 'Changed Display Name'
child_sibling.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
self.store.update_item(child_sibling, user_id=self.user_id)
# Verify that ancestors have changes
assert self._has_changes(locations['grandparent'])
assert self._has_changes(locations['parent'])
# Publish one child
self.store.publish(locations['child_sibling'], self.user_id)
# Verify that ancestors still have changes
assert self._has_changes(locations['grandparent'])
assert self._has_changes(locations['parent'])
# Publish the other child
self.store.publish(locations['child'], self.user_id)
# Verify that ancestors now have no changes
assert not self._has_changes(locations['grandparent'])
assert not self._has_changes(locations['parent'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_add_remove_child(self, default_ms):
"""
Tests that has_changes() returns true for the parent when a child with changes is added
and false when that child is removed.
"""
locations = self.setup_has_changes(default_ms)
# Test that the ancestors don't have changes
assert not self._has_changes(locations['grandparent'])
assert not self._has_changes(locations['parent'])
# Create a new child and attach it to parent
self.store.create_child(
self.user_id,
locations['parent'],
'vertical',
block_id='new_child',
)
# Verify that the ancestors now have changes
assert self._has_changes(locations['grandparent'])
assert self._has_changes(locations['parent'])
# Remove the child from the parent
parent = self.store.get_item(locations['parent'])
parent.children = [locations['child'], locations['child_sibling']]
self.store.update_item(parent, user_id=self.user_id)
# Verify that ancestors now have no changes
assert not self._has_changes(locations['grandparent'])
assert not self._has_changes(locations['parent'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_non_direct_only_children(self, default_ms):
"""
Tests that has_changes() returns true after editing the child of a vertical (both not direct only categories).
"""
self.initdb(default_ms)
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
child = self.store.create_child(
self.user_id,
parent.location,
'html',
block_id='child',
)
self.store.publish(parent.location, self.user_id)
# Verify that there are no changes
assert not self._has_changes(parent.location)
assert not self._has_changes(child.location)
# Change the child
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
# Verify that both parent and child have changes
assert self._has_changes(parent.location)
assert self._has_changes(child.location)
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(ModuleStoreEnum.Branch.draft_preferred, ModuleStoreEnum.Branch.published_only)
))
@ddt.unpack
def test_has_changes_missing_child(self, default_ms, default_branch):
"""
Tests that has_changes() does not throw an exception when a child doesn't exist.
"""
self.initdb(default_ms)
with self.store.branch_setting(default_branch, self.course.id):
# Create the parent and point it to a fake child
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
parent.children += [self.course.id.make_usage_key('vertical', 'does_not_exist')]
parent = self.store.update_item(parent, self.user_id)
# Check the parent for changes should return True and not throw an exception
assert self.store.has_changes(parent)
# Draft
# Find: find parents (definition.children query), get parent, get course (fill in run?),
# find parents of the parent (course), get inheritance items,
# get item (to delete subtree), get inheritance again.
# Sends: delete item, update parent
# Split
# Find: active_versions, 2 structures (published & draft), definition (unnecessary)
# Sends: updated draft and published structures and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 7, 2), (ModuleStoreEnum.Type.split, 3, 3))
@ddt.unpack
def test_delete_item(self, default_ms, max_find, max_send):
"""
Delete should reject on r/o db and work on r/w one
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.writable_chapter_location.course_key): # lint-amnesty, pylint: disable=line-too-long
with check_mongo_calls(max_find, max_send):
self.store.delete_item(self.writable_chapter_location, self.user_id)
# verify it's gone
with pytest.raises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location)
# verify it's gone from published too
with pytest.raises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location, revision=ModuleStoreEnum.RevisionOption.published_only)
# Draft:
# queries: find parent (definition.children), count versions of item, get parent, count grandparents,
# inheritance items, draft item, draft child, inheritance
# sends: delete draft vertical and update parent
# Split:
# queries: active_versions, draft and published structures, definition (unnecessary)
# sends: update published (why?), draft, and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 9, 2), (ModuleStoreEnum.Type.split, 4, 3))
@ddt.unpack
def test_delete_private_vertical(self, default_ms, max_find, max_send):
"""
Because old mongo treated verticals as the first layer which could be draft, it has some interesting
behavioral properties which this deletion test gets at.
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID],
'vertical', block_id='private'
)
private_leaf = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, private_vert.location, 'html', block_id='private_leaf'
)
# verify pre delete state (just to verify that the test is valid)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(private_leaf.location.version_guid)
else:
vert_loc = private_vert.location
assert self.store.has_item(vert_loc)
assert self.store.has_item(private_leaf.location)
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
assert vert_loc in course.children
# delete the vertical and ensure the course no longer points to it
with check_mongo_calls(max_find, max_send):
self.store.delete_item(vert_loc, self.user_id)
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(course.location.version_guid)
leaf_loc = private_leaf.location.for_version(course.location.version_guid)
else:
vert_loc = private_vert.location
leaf_loc = private_leaf.location
assert not self.store.has_item(vert_loc)
assert not self.store.has_item(leaf_loc)
assert vert_loc not in course.children
# Draft:
# find: find parent (definition.children) 2x, find draft item, get inheritance items
# send: one delete query for specific item
# Split:
# find: active_version & structure (cached)
# send: update structure and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 4, 1), (ModuleStoreEnum.Type.split, 2, 2))
@ddt.unpack
def test_delete_draft_vertical(self, default_ms, max_find, max_send):
"""
Test deleting a draft vertical which has a published version.
"""
self.initdb(default_ms)
# reproduce bug STUD-1965
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID], 'vertical', block_id='publish'
)
private_leaf = self.store.create_child(
self.user_id, private_vert.location, 'html', block_id='bug_leaf'
)
# verify that an error is raised when the revision is not valid
with pytest.raises(UnsupportedRevisionError):
self.store.delete_item(
private_leaf.location,
self.user_id,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
self.store.publish(private_vert.location, self.user_id)
private_leaf.display_name = 'change me'
private_leaf = self.store.update_item(private_leaf, self.user_id)
# test succeeds if delete succeeds w/o error
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
with check_mongo_calls(max_find, max_send):
self.store.delete_item(private_leaf.location, self.user_id)
# Draft:
# 1) find all courses (wildcard),
# 2) get each course 1 at a time (1 course),
# 3) wildcard split if it has any (1) but it doesn't
# Split:
# 1) wildcard split search,
# 2-4) active_versions, structure, definition (s/b lazy; so, unnecessary)
# 5) wildcard draft mongo which has none
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0), (ModuleStoreEnum.Type.split, 6, 0))
@ddt.unpack
def test_get_courses(self, default_ms, max_find, max_send):
self.initdb(default_ms)
# we should have one course across all stores
with check_mongo_calls(max_find, max_send):
courses = self.store.get_courses()
course_ids = [course.location for course in courses]
assert len(courses) == 1, 'Not one course: {}'.format(course_ids)
assert self.course_locations[self.MONGO_COURSEID] in course_ids
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
draft_courses = self.store.get_courses(remove_branch=True)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
published_courses = self.store.get_courses(remove_branch=True)
assert [c.id for c in draft_courses] == [c.id for c in published_courses]
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_child_detached_tabs(self, default_ms):
"""
test 'create_child' method with a detached category ('static_tab')
to check that new static tab is not a direct child of the course
"""
self.initdb(default_ms)
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
assert len(mongo_course.children) == 1
# create a static tab of the course
self.store.create_child(
self.user_id,
self.course.location,
'static_tab'
)
# now check that the course has same number of children
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
assert len(mongo_course.children) == 1
# draft is 2: find out which ms owns course, get item
# split: active_versions, structure, definition (to load course wiki string)
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 0), (ModuleStoreEnum.Type.split, 3, 0))
@ddt.unpack
def test_get_course(self, default_ms, max_find, max_send):
"""
This test is here for the performance comparison not functionality. It tests the performance
of getting an item whose scope.content fields are looked at.
"""
self.initdb(default_ms)
with check_mongo_calls(max_find, max_send):
course = self.store.get_item(self.course_locations[self.MONGO_COURSEID])
assert course.id == self.course_locations[self.MONGO_COURSEID].course_key
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_library(self, default_ms):
"""
Test that create_library and get_library work regardless of the default modulestore.
Other tests of MixedModulestore support are in test_libraries.py but this one must
be done here so we can test the configuration where Draft/old is the first modulestore.
"""
self.initdb(default_ms)
with self.store.default_store(ModuleStoreEnum.Type.split): # The CMS also wraps create_library like this
library = self.store.create_library("org", "lib", self.user_id, {"display_name": "Test Library"})
library_key = library.location.library_key
assert isinstance(library_key, LibraryLocator)
# Now load with get_library and make sure it works:
library = self.store.get_library(library_key)
assert library.location.library_key == library_key
# Clear the mappings so we can test get_library code path without mapping set:
self.store.mappings.clear()
library = self.store.get_library(library_key)
assert library.location.library_key == library_key
# notice this doesn't test getting a public item via draft_preferred which draft would have 2 hits (split
# still only 2)
# Draft: get_parent
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_parent_locations(self, default_ms, max_find, max_send):
"""
Test a simple get parent for a direct only category (i.e, always published)
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with check_mongo_calls(max_find, max_send):
parent = self.store.get_parent_location(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert parent == self.vertical_x1a # lint-amnesty, pylint: disable=no-member
def verify_get_parent_locations_results(self, expected_results):
"""
Verifies the results of calling get_parent_locations matches expected_results.
"""
for child_location, parent_location, revision in expected_results:
assert parent_location == self.store.get_parent_location(child_location, revision=revision)
def verify_item_parent(self, item_location, expected_parent_location, old_parent_location, is_reverted=False):
"""
Verifies that item is placed under expected parent.
Arguments:
item_location (BlockUsageLocator) : Locator of item.
expected_parent_location (BlockUsageLocator) : Expected parent block locator.
old_parent_location (BlockUsageLocator) : Old parent block locator.
is_reverted (Boolean) : A flag to notify that item was reverted.
"""
with self.store.bulk_operations(self.course.id):
source_item = self.store.get_item(item_location)
old_parent = self.store.get_item(old_parent_location)
expected_parent = self.store.get_item(expected_parent_location)
assert expected_parent_location == source_item.get_parent().location
# If an item is reverted, it means it's actual parent was the one that is the current parent now
# i.e expected_parent_location otherwise old_parent_location.
published_parent_location = expected_parent_location if is_reverted else old_parent_location
# Check parent locations wrt branches
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
assert expected_parent_location == self.store.get_item(item_location).get_parent().location
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
assert published_parent_location == self.store.get_item(item_location).get_parent().location
# Make location specific to published branch for verify_get_parent_locations_results call.
published_parent_location = published_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
# Verify expected item parent locations
self.verify_get_parent_locations_results([
(item_location, expected_parent_location, None),
(item_location, expected_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(item_location, published_parent_location, ModuleStoreEnum.RevisionOption.published_only),
])
# Also verify item.parent has correct parent location set.
assert source_item.parent == expected_parent_location
assert source_item.parent == self.store.get_parent_location(item_location)
# Item should be present in new parent's children list but not in old parent's children list.
assert item_location in expected_parent.children
assert item_location not in old_parent.children
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_item_parent(self, store_type):
"""
Test that when we move an item from old to new parent, the item should be present in new parent.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Publish the course.
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Move child problem_x1a_1 to vertical_y1a.
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_revert(self, store_type):
"""
Test that when we move an item to new parent and then discard the original parent, the item should be present
back in original parent.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Publish the course
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Move child problem_x1a_1 to vertical_y1a.
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Now discard changes in old_parent_location i.e original parent.
self.store.revert_to_published(old_parent_location, self.user_id)
self.verify_item_parent(
item_location=item_location,
expected_parent_location=old_parent_location,
old_parent_location=new_parent_location,
is_reverted=True
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_delete_revert(self, store_type):
"""
Test that when we move an item and delete it and then discard changes for original parent, item should be
present back in original parent.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Publish the course
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Move child problem_x1a_1 to vertical_y1a.
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Now delete the item.
self.store.delete_item(item_location, self.user_id)
# Now discard changes in old_parent_location i.e original parent.
self.store.revert_to_published(old_parent_location, self.user_id)
self.verify_item_parent(
item_location=item_location,
expected_parent_location=old_parent_location,
old_parent_location=new_parent_location,
is_reverted=True
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_revert_move(self, store_type):
"""
Test that when we move an item to new parent and discard changes for the old parent, then the item should be
present in the old parent and then moving an item from old parent to new parent should place that item under
new parent.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Publish the course
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Move child problem_x1a_1 to vertical_y1a.
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Now discard changes in old_parent_location i.e original parent.
self.store.revert_to_published(old_parent_location, self.user_id)
self.verify_item_parent(
item_location=item_location,
expected_parent_location=old_parent_location,
old_parent_location=new_parent_location,
is_reverted=True
)
# Again try to move from x1 to y1
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_edited_revert(self, store_type):
"""
Test that when we move an edited item from old parent to new parent and then discard changes in old parent,
item should be placed under original parent with initial state.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Publish the course.
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Move child problem_x1a_1 to vertical_y1a.
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
problem = self.store.get_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
orig_display_name = problem.display_name
# Change display name of problem and update just it.
problem.display_name = 'updated'
self.store.update_item(problem, self.user_id)
updated_problem = self.store.get_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert updated_problem.display_name == 'updated'
# Now, move from x1 to y1.
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Now discard changes in old_parent_location i.e original parent.
self.store.revert_to_published(old_parent_location, self.user_id)
# Check that problem has the original name back.
reverted_problem = self.store.get_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert orig_display_name == reverted_problem.display_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_1_moved_1_unchanged(self, store_type):
"""
Test that when we move an item from an old parent which have multiple items then only moved item's parent
is changed while other items are still present inside old parent.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Create some children in vertical_x1a
problem_item2 = self.store.create_child(self.user_id, self.vertical_x1a, 'problem', 'Problem_Item2') # lint-amnesty, pylint: disable=no-member
# Publish the course.
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
# Move problem_x1a_1 from x1 to y1.
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Check that problem_item2 is still present in vertical_x1a
problem_item2 = self.store.get_item(problem_item2.location)
assert problem_item2.parent == self.vertical_x1a # lint-amnesty, pylint: disable=no-member
assert problem_item2.location in problem_item2.get_parent().children
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_1_moved_1_edited(self, store_type):
"""
Test that when we move an item inside an old parent having multiple items, we edit one item and move
other item from old to new parent, then discard changes in old parent would discard the changes of the
edited item and move back the moved item to old location.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Create some children in vertical_x1a
problem_item2 = self.store.create_child(self.user_id, self.vertical_x1a, 'problem', 'Problem_Item2') # lint-amnesty, pylint: disable=no-member
orig_display_name = problem_item2.display_name
# Publish the course.
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Edit problem_item2.
problem_item2.display_name = 'updated'
self.store.update_item(problem_item2, self.user_id)
updated_problem2 = self.store.get_item(problem_item2.location)
assert updated_problem2.display_name == 'updated'
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
# Move problem_x1a_1 from x1 to y1.
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Now discard changes in old_parent_location i.e original parent.
self.store.revert_to_published(old_parent_location, self.user_id)
# Check that problem_item2 has the original name back.
reverted_problem2 = self.store.get_item(problem_item2.location)
assert orig_display_name == reverted_problem2.display_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_1_moved_1_deleted(self, store_type):
"""
Test that when we move an item inside an old parent having multiple items, we delete one item and move
other item from old to new parent, then discard changes in old parent would undo delete the deleted
item and move back the moved item to old location.
"""
self.initdb(store_type)
self._create_block_hierarchy()
# Create some children in vertical_x1a
problem_item2 = self.store.create_child(self.user_id, self.vertical_x1a, 'problem', 'Problem_Item2') # lint-amnesty, pylint: disable=no-member
orig_display_name = problem_item2.display_name # lint-amnesty, pylint: disable=unused-variable
# Publish the course.
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
# Now delete other problem problem_item2.
self.store.delete_item(problem_item2.location, self.user_id)
# Move child problem_x1a_1 to vertical_y1a.
item_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
# Move problem_x1a_1 from x1 to y1.
updated_item_location = self.store.update_item_parent(
item_location, new_parent_location, old_parent_location, self.user_id
)
assert updated_item_location == item_location
self.verify_item_parent(
item_location=item_location,
expected_parent_location=new_parent_location,
old_parent_location=old_parent_location
)
# Now discard changes in old_parent_location i.e original parent.
self.store.revert_to_published(old_parent_location, self.user_id)
# Check that problem_item2 is also back in vertical_x1a
problem_item2 = self.store.get_item(problem_item2.location)
assert problem_item2.parent == self.vertical_x1a # lint-amnesty, pylint: disable=no-member
assert problem_item2.location in problem_item2.get_parent().children
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_parent_locations_moved_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.course = self.store.publish(self.course.location, self.user_id) # lint-amnesty, pylint: disable=attribute-defined-outside-init
with self.store.bulk_operations(self.course.id):
# make drafts of verticals
self.store.convert_to_draft(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
self.store.convert_to_draft(self.vertical_y1a, self.user_id) # lint-amnesty, pylint: disable=no-member
# move child problem_x1a_1 to vertical_y1a
child_to_move_location = self.problem_x1a_1 # lint-amnesty, pylint: disable=no-member
new_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_x1a # lint-amnesty, pylint: disable=no-member
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
old_parent = self.store.get_item(child_to_move_location).get_parent()
assert old_parent_location == old_parent.location
child_to_move_contextualized = child_to_move_location.map_into_course(old_parent.location.course_key)
old_parent.children.remove(child_to_move_contextualized)
self.store.update_item(old_parent, self.user_id)
new_parent = self.store.get_item(new_parent_location)
new_parent.children.append(child_to_move_location)
self.store.update_item(new_parent, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
assert new_parent_location == self.store.get_item(child_to_move_location).get_parent().location
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
assert old_parent_location == self.store.get_item(child_to_move_location).get_parent().location
old_parent_published_location = old_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, old_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
new_parent_published_location = new_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, new_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_parent_locations_deleted_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.store.publish(self.course.location, self.user_id)
# make draft of vertical
self.store.convert_to_draft(self.vertical_y1a, self.user_id) # lint-amnesty, pylint: disable=no-member
# delete child problem_y1a_1
child_to_delete_location = self.problem_y1a_1 # lint-amnesty, pylint: disable=no-member
old_parent_location = self.vertical_y1a # lint-amnesty, pylint: disable=no-member
self.store.delete_item(child_to_delete_location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, old_parent_location, None),
# Note: The following could be an unexpected result, but we want to avoid an extra database call
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, None, None),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_parent_location_draft(self, default_ms):
"""
Test that "get_parent_location" method returns first published parent
for a draft component, if it has many possible parents (including
draft parents).
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
# add another parent (unit) "vertical_x1b" for problem "problem_x1a_1"
mongo_store.collection.update_one(
self.vertical_x1b.to_deprecated_son('_id.'), # lint-amnesty, pylint: disable=no-member
{'$push': {'definition.children': six.text_type(self.problem_x1a_1)}} # lint-amnesty, pylint: disable=no-member
)
# convert first parent (unit) "vertical_x1a" of problem "problem_x1a_1" to draft
self.store.convert_to_draft(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
item = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
assert self.store.has_published_version(item)
# now problem "problem_x1a_1" has 3 parents [vertical_x1a (draft),
# vertical_x1a (published), vertical_x1b (published)]
# check that "get_parent_location" method of draft branch returns first
# published parent "vertical_x1a" without raising "AssertionError" for
# problem location revision
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert parent == self.vertical_x1a # lint-amnesty, pylint: disable=no-member
# Draft:
# Problem path:
# 1. Get problem
# 2-6. get parent and rest of ancestors up to course
# 7-8. get sequential, compute inheritance
# 8-9. get vertical, compute inheritance
# 10-11. get other vertical_x1b (why?) and compute inheritance
# Split: active_versions & structure
@ddt.data((ModuleStoreEnum.Type.mongo, [12, 3], 0), (ModuleStoreEnum.Type.split, [3, 2], 0))
@ddt.unpack
def test_path_to_location(self, default_ms, num_finds, num_sends):
"""
Make sure that path_to_location works
"""
self.initdb(default_ms)
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self._create_block_hierarchy()
should_work = (
(self.problem_x1a_2, # lint-amnesty, pylint: disable=no-member
(course_key, u"Chapter_x", u"Sequential_x1", u'Vertical_x1a', '1', self.problem_x1a_2)), # lint-amnesty, pylint: disable=no-member
(self.chapter_x, # lint-amnesty, pylint: disable=no-member
(course_key, "Chapter_x", None, None, None, self.chapter_x)), # lint-amnesty, pylint: disable=no-member
)
for location, expected in should_work:
# each iteration has different find count, pop this iter's find count
with check_mongo_calls(num_finds.pop(0), num_sends):
path = path_to_location(self.store, location)
assert path == expected
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with pytest.raises(ItemNotFoundError):
path_to_location(self.store, location)
# Orphaned items should not be found.
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(
self.user_id,
orphan.course_key,
orphan.block_type,
block_id=orphan.block_id
)
with pytest.raises(NoPathToItem):
path_to_location(self.store, orphan)
def test_navigation_index(self):
"""
Make sure that navigation_index correctly parses the various position values that we might get from calls to
path_to_location
"""
assert 1 == navigation_index('1')
assert 10 == navigation_index('10')
assert navigation_index(None) is None
assert 1 == navigation_index('1_2')
assert 5 == navigation_index('5_2')
assert 7 == navigation_index('7_3_5_6_')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_root_draft(self, default_ms):
"""
Test calling revert_to_published on draft vertical.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
vertical_children_num = len(vertical.children)
self.store.publish(self.course.location, self.user_id)
assert not self._has_changes(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
# delete leaf problem (will make parent vertical a draft)
self.store.delete_item(self.problem_x1a_1, self.user_id) # lint-amnesty, pylint: disable=no-member
assert self._has_changes(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
draft_parent = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
assert (vertical_children_num - 1) == len(draft_parent.children)
published_parent = self.store.get_item(
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
revision=ModuleStoreEnum.RevisionOption.published_only
)
assert vertical_children_num == len(published_parent.children)
self.store.revert_to_published(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
reverted_parent = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
assert vertical_children_num == len(published_parent.children)
self.assertBlocksEqualByFields(reverted_parent, published_parent)
assert not self._has_changes(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_root_published(self, default_ms):
"""
Test calling revert_to_published on a published vertical with a draft child.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
problem = self.store.get_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
orig_display_name = problem.display_name
# Change display name of problem and update just it (so parent remains published)
problem.display_name = "updated before calling revert"
self.store.update_item(problem, self.user_id)
self.store.revert_to_published(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
reverted_problem = self.store.get_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert orig_display_name == reverted_problem.display_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_no_draft(self, default_ms):
"""
Test calling revert_to_published on vertical with no draft content does nothing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
orig_vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
self.store.revert_to_published(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
reverted_vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
self.assertBlocksEqualByFields(orig_vertical, reverted_vertical)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_no_published(self, default_ms):
"""
Test calling revert_to_published on vertical with no published version errors.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with pytest.raises(InvalidVersionError):
self.store.revert_to_published(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_direct_only(self, default_ms):
"""
Test calling revert_to_published on a direct-only item is a no-op.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
num_children = len(self.store.get_item(self.sequential_x1).children) # lint-amnesty, pylint: disable=no-member
self.store.revert_to_published(self.sequential_x1, self.user_id) # lint-amnesty, pylint: disable=no-member
reverted_parent = self.store.get_item(self.sequential_x1) # lint-amnesty, pylint: disable=no-member
# It does not discard the child vertical, even though that child is a draft (with no published version)
assert num_children == len(reverted_parent.children)
def test_reset_course_to_version(self):
"""
Test calling `DraftVersioningModuleStore.test_reset_course_to_version`.
"""
# Set up test course.
self.initdb(ModuleStoreEnum.Type.split) # Old Mongo does not support this operation.
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
# Get children of a vertical as a set.
# We will use this set as a basis for content comparision in this test.
original_vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
original_vertical_children = set(original_vertical.children)
# Find the version_guid of our course by diving into Split Mongo.
split = self._get_split_modulestore()
course_index = split.get_course_index(self.course.location.course_key)
original_version_guid = course_index["versions"]["published-branch"]
# Reset course to currently-published version.
# This should be a no-op.
self.store.reset_course_to_version(
self.course.location.course_key,
original_version_guid,
self.user_id,
)
noop_reset_vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
assert set(noop_reset_vertical.children) == original_vertical_children
# Delete a problem from the vertical and publish.
# Vertical should have one less problem than before.
self.store.delete_item(self.problem_x1a_1, self.user_id) # lint-amnesty, pylint: disable=no-member
self.store.publish(self.course.location, self.user_id)
modified_vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
assert set(modified_vertical.children) == (
original_vertical_children - {self.problem_x1a_1} # lint-amnesty, pylint: disable=no-member
)
# Add a couple more children to the vertical.
# and publish a couple more times.
# We want to make sure we can restore from something a few versions back.
self.store.create_child(
self.user_id,
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
'problem',
block_id='new_child1',
)
self.store.publish(self.course.location, self.user_id)
self.store.create_child(
self.user_id,
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
'problem',
block_id='new_child2',
)
self.store.publish(self.course.location, self.user_id)
# Add another child, but don't publish.
# We want to make sure that this works with a dirty draft branch.
self.store.create_child(
self.user_id,
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
'problem',
block_id='new_child3',
)
# Reset course to original version.
# The restored vertical should have the same children as it did originally.
self.store.reset_course_to_version(
self.course.location.course_key,
original_version_guid,
self.user_id,
)
restored_vertical = self.store.get_item(self.vertical_x1a) # lint-amnesty, pylint: disable=no-member
assert set(restored_vertical.children) == original_vertical_children
def _get_split_modulestore(self):
"""
Grab the SplitMongo modulestore instance from within the Mixed modulestore.
Assumption: There is a SplitMongo modulestore within the Mixed modulestore.
This assumpion is hacky, but it seems OK because we're removing the
Old (non-Split) Mongo modulestores soon.
Returns: SplitMongoModuleStore
"""
for store in self.store.modulestores:
if isinstance(store, SplitMongoModuleStore):
return store
assert False, "SplitMongoModuleStore was not found in MixedModuleStore"
# Draft: get all items which can be or should have parents
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_orphans(self, default_ms, max_find, max_send):
"""
Test finding orphans.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
# orphans
orphan_locations = [
course_id.make_usage_key('chapter', 'OrphanChapter'),
course_id.make_usage_key('vertical', 'OrphanVertical'),
course_id.make_usage_key('problem', 'OrphanProblem'),
course_id.make_usage_key('html', 'OrphanHTML'),
]
# detached items (not considered as orphans)
detached_locations = [
course_id.make_usage_key('static_tab', 'StaticTab'),
course_id.make_usage_key('course_info', 'updates'),
]
for location in orphan_locations + detached_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
with check_mongo_calls(max_find, max_send):
found_orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
six.assertCountEqual(self, found_orphans, orphan_locations)
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_non_orphan_parents(self, default_ms):
"""
Test finding non orphan parents from many possible parents.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
# test that problem "problem_x1a_1" has only one published parent
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert parent == self.vertical_x1a # lint-amnesty, pylint: disable=no-member
# add some published orphans
orphan_sequential = course_id.make_usage_key('sequential', 'OrphanSequential')
orphan_vertical = course_id.make_usage_key('vertical', 'OrphanVertical')
orphan_locations = [orphan_sequential, orphan_vertical]
for location in orphan_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
self.store.publish(location, self.user_id)
found_orphans = mongo_store.get_orphans(course_id)
assert set(found_orphans) == set(orphan_locations)
assert len(set(found_orphans)) == 2
# add orphan vertical and sequential as another parents of problem "problem_x1a_1"
mongo_store.collection.update_one(
orphan_sequential.to_deprecated_son('_id.'),
{'$push': {'definition.children': six.text_type(self.problem_x1a_1)}} # lint-amnesty, pylint: disable=no-member
)
mongo_store.collection.update_one(
orphan_vertical.to_deprecated_son('_id.'),
{'$push': {'definition.children': six.text_type(self.problem_x1a_1)}} # lint-amnesty, pylint: disable=no-member
)
# test that "get_parent_location" method of published branch still returns the correct non-orphan parent for
# problem "problem_x1a_1" since the two other parents are orphans
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
assert parent == self.vertical_x1a # lint-amnesty, pylint: disable=no-member
# now add valid published vertical as another parent of problem
mongo_store.collection.update_one(self.sequential_x1.to_deprecated_son('_id.'), {'$push': {'definition.children': six.text_type(self.problem_x1a_1)}}) # lint-amnesty, pylint: disable=no-member, line-too-long
# now check that "get_parent_location" method of published branch raises "ReferentialIntegrityError" for
# problem "problem_x1a_1" since it has now 2 valid published parents
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
assert self.store.has_item(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
with pytest.raises(ReferentialIntegrityError):
self.store.get_parent_location(self.problem_x1a_1) # lint-amnesty, pylint: disable=no-member
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_create_item_from_parent_location(self, default_ms):
"""
Test a code path missed by the above: passing an old-style location as parent but no
new location for the child
"""
self.initdb(default_ms)
self.store.create_child(
self.user_id,
self.course_locations[self.MONGO_COURSEID],
'problem',
block_id='orphan'
)
orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
assert len(orphans) == 0, 'unexpected orphans: {}'.format(orphans)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_item_populates_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
assert self.user_id == block.edited_by
assert datetime.datetime.now(UTC) > block.edited_on
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_item_populates_subtree_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
assert self.user_id == block.subtree_edited_by
assert datetime.datetime.now(UTC) > block.subtree_edited_on
# Draft: wildcard search of draft and split
# Split: wildcard search of draft and split
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_courses_for_wiki(self, default_ms, max_find, max_send):
"""
Test the get_courses_for_wiki method
"""
self.initdb(default_ms)
# Test Mongo wiki
with check_mongo_calls(max_find, max_send):
wiki_courses = self.store.get_courses_for_wiki('999')
assert len(wiki_courses) == 1
assert self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None) in wiki_courses
assert len(self.store.get_courses_for_wiki('edX.simple.2012_Fall')) == 0
assert len(self.store.get_courses_for_wiki('no_such_wiki')) == 0
# Draft:
# Find: find vertical, find children
# Sends:
# 1. delete all of the published nodes in subtree
# 2. insert vertical as published (deleted in step 1) w/ the deleted problems as children
# 3-6. insert the 3 problems and 1 html as published
# Split: active_versions, 2 structures (pre & post published?)
# Sends:
# - insert structure
# - write index entry
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 6), (ModuleStoreEnum.Type.split, 3, 2))
@ddt.unpack
def test_unpublish(self, default_ms, max_find, max_send):
"""
Test calling unpublish
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
self._create_block_hierarchy()
# publish
self.store.publish(self.course.location, self.user_id)
published_xblock = self.store.get_item(
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
revision=ModuleStoreEnum.RevisionOption.published_only
)
assert published_xblock is not None
# unpublish
with check_mongo_calls(max_find, max_send):
self.store.unpublish(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
with pytest.raises(ItemNotFoundError):
self.store.get_item(
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
revision=ModuleStoreEnum.RevisionOption.published_only
)
# make sure draft version still exists
draft_xblock = self.store.get_item(
self.vertical_x1a, # lint-amnesty, pylint: disable=no-member
revision=ModuleStoreEnum.RevisionOption.draft_only
)
assert draft_xblock is not None
# Draft: specific query for revision None
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_has_published_version(self, default_ms, max_find, max_send):
"""
Test the has_published_version method
"""
self.initdb(default_ms)
self._create_block_hierarchy()
# start off as Private
item = self.store.create_child(self.user_id, self.writable_chapter_location, 'problem', 'test_compute_publish_state') # lint-amnesty, pylint: disable=line-too-long
item_location = item.location
with check_mongo_calls(max_find, max_send):
assert not self.store.has_published_version(item)
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
assert self.store.has_published_version(item)
# Public -> Private
self.store.unpublish(item_location, self.user_id)
item = self.store.get_item(item_location)
assert not self.store.has_published_version(item)
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
assert self.store.has_published_version(item)
# Public -> Draft with NO changes
self.store.convert_to_draft(item_location, self.user_id)
item = self.store.get_item(item_location)
assert self.store.has_published_version(item)
# Draft WITH changes
item.display_name = 'new name'
item = self.store.update_item(item, self.user_id)
assert self.store.has_changes(item)
assert self.store.has_published_version(item)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_edit_info_ancestors(self, default_ms):
"""
Tests that edited_on, edited_by, subtree_edited_on, and subtree_edited_by are set correctly during update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
def check_node(location_key, after, before, edited_by, subtree_after, subtree_before, subtree_by):
"""
Checks that the node given by location_key matches the given edit_info constraints.
"""
node = self.store.get_item(location_key)
if after:
assert after < node.edited_on
assert node.edited_on < before
assert node.edited_by == edited_by
if subtree_after:
assert subtree_after < node.subtree_edited_on
assert node.subtree_edited_on < subtree_before
assert node.subtree_edited_by == subtree_by
with self.store.bulk_operations(test_course.id):
# Create a dummy vertical & html to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
block_id='test_vertical'
)
child = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html'
)
sibling = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html_no_change'
)
after_create = datetime.datetime.now(UTC)
# Verify that all nodes were last edited in the past by create_user
for block in [component, child, sibling]:
check_node(block.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the component, then check that there now are changes
component.display_name = 'Changed Display Name'
editing_user = self.user_id - 2
with self.store.bulk_operations(test_course.id): # TNL-764 bulk ops disabled ancestor updates
component = self.store.update_item(component, editing_user)
after_edit = datetime.datetime.now(UTC)
check_node(component.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# but child didn't change
check_node(child.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the child
child = self.store.get_item(child.location)
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=editing_user)
after_edit = datetime.datetime.now(UTC)
# Verify that child was last edited between after_create and after_edit by edit_user
check_node(child.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# Verify that ancestors edit info is unchanged, but their subtree edit info matches child
check_node(test_course.location, None, after_create, self.user_id, after_create, after_edit, editing_user)
# Verify that others have unchanged edit info
check_node(sibling.location, None, after_create, self.user_id, None, after_create, self.user_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_edit_info(self, default_ms):
"""
Tests that edited_on and edited_by are set correctly during an update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current edit time and verify that user created the component
assert component.edited_by == self.user_id
old_edited_on = component.edited_on
edit_user = self.user_id - 2
# Change the component
component.display_name = 'Changed'
self.store.update_item(component, edit_user)
updated_component = self.store.get_item(component.location)
# Verify the ordering of edit times and that dummy_user made the edit
assert old_edited_on < updated_component.edited_on
assert updated_component.edited_by == edit_user
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_published_info(self, default_ms):
"""
Tests that published_on and published_by are set correctly
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
publish_user = 456
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current time, then publish
old_time = datetime.datetime.now(UTC)
self.store.publish(component.location, publish_user)
updated_component = self.store.get_item(component.location)
# Verify the time order and that publish_user caused publication
assert old_time <= updated_component.published_on
assert updated_component.published_by == publish_user
# Verify that changing the item doesn't unset the published info
updated_component.display_name = 'changed'
self.store.update_item(updated_component, self.user_id)
updated_component = self.store.get_item(updated_component.location)
assert old_time <= updated_component.published_on
assert updated_component.published_by == publish_user
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_auto_publish(self, default_ms):
"""
Test that the correct things have been published automatically
Assumptions:
* we auto-publish courses, chapters, sequentials
* we don't auto-publish problems
"""
self.initdb(default_ms)
# test create_course to make sure we are autopublishing
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
assert self.store.has_published_version(test_course)
test_course_key = test_course.id
# test create_item of direct-only category to make sure we are autopublishing
chapter = self.store.create_child(self.user_id, test_course.location, 'chapter', 'Overview')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
assert chapter.location in self.store.get_item(test_course.location).children
assert self.store.has_published_version(chapter)
chapter_location = chapter.location
# test create_child of direct-only category to make sure we are autopublishing
sequential = self.store.create_child(self.user_id, chapter_location, 'sequential', 'Sequence')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
assert sequential.location in self.store.get_item(chapter_location).children
assert self.store.has_published_version(sequential)
# test update_item of direct-only category to make sure we are autopublishing
sequential.display_name = 'sequential1'
sequential = self.store.update_item(sequential, self.user_id)
assert self.store.has_published_version(sequential)
# test delete_item of direct-only category to make sure we are autopublishing
self.store.delete_item(sequential.location, self.user_id, revision=ModuleStoreEnum.RevisionOption.all)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
assert sequential.location not in self.store.get_item(chapter_location).children
chapter = self.store.get_item(chapter.location.for_branch(None))
assert self.store.has_published_version(chapter)
# test create_child of NOT direct-only category to make sure we aren't autopublishing
problem_child = self.store.create_child(self.user_id, chapter_location, 'problem', 'Problem_Child')
assert not self.store.has_published_version(problem_child)
# test create_item of NOT direct-only category to make sure we aren't autopublishing
problem_item = self.store.create_item(self.user_id, test_course_key, 'problem', 'Problem_Item')
assert not self.store.has_published_version(problem_item)
# test update_item of NOT direct-only category to make sure we aren't autopublishing
problem_item.display_name = 'Problem_Item1'
problem_item = self.store.update_item(problem_item, self.user_id)
assert not self.store.has_published_version(problem_item)
# test delete_item of NOT direct-only category to make sure we aren't autopublishing
self.store.delete_item(problem_child.location, self.user_id)
chapter = self.store.get_item(chapter.location.for_branch(None))
assert self.store.has_published_version(chapter)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_courses_for_wiki_shared(self, default_ms):
"""
Test two courses sharing the same wiki
"""
self.initdb(default_ms)
# verify initial state - initially, we should have a wiki for the Mongo course
wiki_courses = self.store.get_courses_for_wiki('999')
assert self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None) in wiki_courses
# set Mongo course to share the wiki with simple course
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'simple'
self.store.update_item(mongo_course, self.user_id)
# now mongo_course should not be retrievable with old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('999')
assert len(wiki_courses) == 0
# but there should be one course with wiki_slug 'simple'
wiki_courses = self.store.get_courses_for_wiki('simple')
assert len(wiki_courses) == 1
assert self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None) in wiki_courses
# configure mongo course to use unique wiki_slug.
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'MITx.999.2013_Spring'
self.store.update_item(mongo_course, self.user_id)
# it should be retrievable with its new wiki_slug
wiki_courses = self.store.get_courses_for_wiki('MITx.999.2013_Spring')
assert len(wiki_courses) == 1
assert self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None) in wiki_courses
# and NOT retriveable with its old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('simple')
assert len(wiki_courses) == 0
assert self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None) not in wiki_courses
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_branch_setting(self, default_ms):
"""
Test the branch_setting context manager
"""
self.initdb(default_ms)
self._create_block_hierarchy()
problem_location = self.problem_x1a_1.for_branch(None) # lint-amnesty, pylint: disable=no-member
problem_original_name = 'Problem_x1a_1'
course_key = problem_location.course_key
problem_new_name = 'New Problem Name'
def assertNumProblems(display_name, expected_number):
"""
Asserts the number of problems with the given display name is the given expected number.
"""
assert len(self.store.get_items(course_key.for_branch(None), settings={'display_name': display_name})) ==\
expected_number
def assertProblemNameEquals(expected_display_name):
"""
Asserts the display_name of the xblock at problem_location matches the given expected value.
"""
# check the display_name of the problem
problem = self.store.get_item(problem_location)
assert problem.display_name == expected_display_name
# there should be only 1 problem with the expected_display_name
assertNumProblems(expected_display_name, 1)
# verify Draft problem
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assert self.store.has_item(problem_location)
assertProblemNameEquals(problem_original_name)
# verify Published problem doesn't exist
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assert not self.store.has_item(problem_location)
with pytest.raises(ItemNotFoundError):
self.store.get_item(problem_location)
# PUBLISH the problem
self.store.publish(self.vertical_x1a, self.user_id) # lint-amnesty, pylint: disable=no-member
self.store.publish(problem_location, self.user_id)
# verify Published problem
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assert self.store.has_item(problem_location)
assertProblemNameEquals(problem_original_name)
# verify Draft-preferred
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_original_name)
# EDIT name
problem = self.store.get_item(problem_location)
problem.display_name = problem_new_name
self.store.update_item(problem, self.user_id)
# verify Draft problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_new_name)
# verify Published problem still has old name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_original_name)
# there should be no published problems with the new name
assertNumProblems(problem_new_name, 0)
# PUBLISH the problem
self.store.publish(problem_location, self.user_id)
# verify Published problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_new_name)
# there should be no published problems with the old name
assertNumProblems(problem_original_name, 0)
# verify branch setting is published-only in manager
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
assert self.store.get_branch_setting() == ModuleStoreEnum.Branch.published_only
# verify branch setting is draft-preferred in manager
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
assert self.store.get_branch_setting() == ModuleStoreEnum.Branch.draft_preferred
def verify_default_store(self, store_type):
"""
Verifies the default_store property
"""
assert self.store.default_modulestore.get_modulestore_type() == store_type
# verify internal helper method
store = self.store._get_modulestore_for_courselike() # pylint: disable=protected-access
assert store.get_modulestore_type() == store_type
# verify store used for creating a course
course = self.store.create_course("org", "course{}".format(uuid4().hex[:5]), "run", self.user_id)
assert course.system.modulestore.get_modulestore_type() == store_type
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_default_store(self, default_ms):
"""
Test the default store context manager
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.verify_default_store(default_ms)
def test_default_store_nested(self):
"""
Test the default store context manager, nested within one another
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(ModuleStoreEnum.Type.mongo):
self.verify_default_store(ModuleStoreEnum.Type.mongo)
with self.store.default_store(ModuleStoreEnum.Type.split):
self.verify_default_store(ModuleStoreEnum.Type.split)
self.verify_default_store(ModuleStoreEnum.Type.mongo)
def test_default_store_fake(self):
"""
Test the default store context manager, asking for a fake store
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
fake_store = "fake"
with self.assertRaisesRegex(Exception, "Cannot find store of type {}".format(fake_store)):
with self.store.default_store(fake_store):
pass # pragma: no cover
def save_asset(self, asset_key):
"""
Load and save the given file. (taken from test_contentstore)
"""
with open("{}/static/{}".format(DATA_DIR, asset_key.block_id), "rb") as f:
content = StaticContent(
asset_key, "Funky Pix", mimetypes.guess_type(asset_key.block_id)[0], f.read(),
)
self.store.contentstore.save(content)
@ddt.data(
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.mongo],
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split],
[ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.split]
)
@ddt.unpack
def test_clone_course(self, source_modulestore, destination_modulestore):
"""
Test clone course
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(source_modulestore):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
self.save_asset(source_course_key.make_asset_key('asset', 'picture1.jpg'))
with self.store.default_store(destination_modulestore):
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(source_course_key, dest_course_id, self.user_id)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(source_modulestore)
dest_store = self.store._get_modulestore_by_type(destination_modulestore)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_operations_signal_firing(self, default):
""" Signals should be fired right before bulk_operations() exits. """
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
course_key = course.id
def _clear_bulk_ops_record(course_key): # pylint: disable=unused-argument
"""
Check if the signal has been fired.
The course_published signal fires before the _clear_bulk_ops_record.
"""
signal_handler.send.assert_called_with('course_published', course_key=course.id)
with patch.object(
self.store.thread_cache.default_store, '_clear_bulk_ops_record', wraps=_clear_bulk_ops_record
) as mock_clear_bulk_ops_record:
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_not_called()
assert mock_clear_bulk_ops_record.call_count == 1
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test non-draftable block types. The block should be published with every change.
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
signal_handler.reset_mock()
block = self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
block.display_name = block_type
self.store.update_item(block, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.publish(block.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_rerun_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test course re-runs
signal_handler.reset_mock()
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(course_key, dest_course_id, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=dest_course_id)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_import_firing(self, default, _from_json):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Test course imports
# Note: The signal is fired once when the course is created and
# a second time after the actual data import.
import_course_from_xml(
self.store, self.user_id, DATA_DIR, ['toy'], load_error_modules=False,
static_content_store=contentstore,
create_if_not_present=True,
)
signal_handler.send.assert_has_calls([
call('pre_publish', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('course_published', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('pre_publish', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('course_published', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
signal_handler.reset_mock()
section = self.store.create_item(self.user_id, course.id, 'chapter')
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# 'units' and 'blocks' are draftable types
signal_handler.reset_mock()
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.reset_mock()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.unpublish(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.delete_item(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test non-draftable block types. No signals should be received until
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
block = self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_not_called()
block.display_name = block_type
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
self.store.publish(block.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
section = self.store.create_item(self.user_id, course_key, 'chapter')
signal_handler.send.assert_not_called()
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
signal_handler.send.assert_not_called()
# 'units' and 'blocks' are draftable types
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# Test editing draftable block type without publish
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
signal_handler.send.assert_not_called()
unit.display_name = "Change this unit"
self.store.update_item(unit, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_not_called()
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_deleted_signal(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
assert self.store.thread_cache.default_store.signal_handler is not None
signal_handler.send.assert_not_called()
# Create a course
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
course_key = course.id
# Delete the course
course = self.store.delete_course(course_key, self.user_id)
# Verify that the signal was emitted
signal_handler.send.assert_called_with('course_deleted', course_key=course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_published_item_orphans(self, default_store):
"""
Tests delete published item dont create any oprhans in course
"""
self.initdb(default_store)
course_locator = self.course.id
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
problem = self.store.create_child(
self.user_id, vertical.location, 'problem', block_id='problem'
)
self.store.publish(chapter.location, self.user_id)
# Verify that there are no changes
assert not self._has_changes(chapter.location)
assert not self._has_changes(sequential.location)
assert not self._has_changes(vertical.location)
assert not self._has_changes(problem.location)
# No orphans in course
course_orphans = self.store.get_orphans(course_locator)
assert len(course_orphans) == 0
self.store.delete_item(vertical.location, self.user_id)
# No orphans in course after delete, except
# in old mongo, which still creates orphans
course_orphans = self.store.get_orphans(course_locator)
if default_store == ModuleStoreEnum.Type.mongo:
assert len(course_orphans) == 1
else:
assert len(course_orphans) == 0
course_locator_publish = course_locator.for_branch(ModuleStoreEnum.BranchName.published)
# No published oprhans after delete, except
# in old mongo, which still creates orphans
course_publish_orphans = self.store.get_orphans(course_locator_publish)
if default_store == ModuleStoreEnum.Type.mongo:
assert len(course_publish_orphans) == 1
else:
assert len(course_publish_orphans) == 0
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_draft_item_orphans(self, default_store):
"""
Tests delete draft item create no orphans in course
"""
self.initdb(default_store)
course_locator = self.course.id
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
problem = self.store.create_child(
self.user_id, vertical.location, 'problem', block_id='problem'
)
self.store.publish(chapter.location, self.user_id)
# Verify that there are no changes
assert not self._has_changes(chapter.location)
assert not self._has_changes(sequential.location)
assert not self._has_changes(vertical.location)
assert not self._has_changes(problem.location)
# No orphans in course
course_orphans = self.store.get_orphans(course_locator)
assert len(course_orphans) == 0
problem.display_name = 'changed'
problem = self.store.update_item(problem, self.user_id)
assert self._has_changes(vertical.location)
assert self._has_changes(problem.location)
self.store.delete_item(vertical.location, self.user_id)
# No orphans in course after delete, except
# in old mongo, which still creates them
course_orphans = self.store.get_orphans(course_locator)
if default_store == ModuleStoreEnum.Type.mongo:
assert len(course_orphans) == 1
else:
assert len(course_orphans) == 0
course_locator_publish = course_locator.for_branch(ModuleStoreEnum.BranchName.published)
# No published orphans after delete, except
# in old mongo, which still creates them
course_publish_orphans = self.store.get_orphans(course_locator_publish)
if default_store == ModuleStoreEnum.Type.mongo:
assert len(course_publish_orphans) == 1
else:
assert len(course_publish_orphans) == 0
@ddt.ddt
@attr('mongo')
class TestPublishOverExportImport(CommonMixedModuleStoreSetup):
"""
Tests which publish (or don't publish) items - and then export/import the course,
checking the state of the imported items.
"""
def setUp(self):
"""
Set up the database for testing
"""
super(TestPublishOverExportImport, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user_id = ModuleStoreEnum.UserID.test
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
def _export_import_course_round_trip(self, modulestore, contentstore, source_course_key, export_dir):
"""
Export the course from a modulestore and then re-import the course.
"""
top_level_export_dir = 'exported_source_course'
export_course_to_xml(
modulestore,
contentstore,
source_course_key,
export_dir,
top_level_export_dir,
)
import_course_from_xml(
modulestore,
'test_user',
export_dir,
source_dirs=[top_level_export_dir],
static_content_store=contentstore,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
@contextmanager
def _build_store(self, default_ms):
"""
Perform the modulestore-building and course creation steps for a mixed modulestore test.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(default_ms):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
yield contentstore, source_course_key
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_draft_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an unpublished unit remains with no changes across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and don't publish it.
draft_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
assert self._has_changes(draft_xblock.location)
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Verify that the imported block still is a draft, i.e. has changes.
assert self._has_changes(draft_xblock.location)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
assert not self._has_changes(published_xblock.location)
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that it still is published, i.e. has no changes.
assert not self._has_changes(published_xblock.location)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_changed_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
assert not self._has_changes(published_xblock.location)
updated_display_name = 'Changed Display Name'
component = self.store.get_item(published_xblock.location)
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
assert self.store.has_changes(component)
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
assert self._has_changes(published_xblock.location)
# Verify that the changes in the draft vertical still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(published_xblock.location)
assert component.display_name == updated_display_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_seq_with_unpublished_vertical_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical - don't publish it!
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Retrieve the published block and make sure it's published.
# Chapter is published - but the changes in vertical below means it "has_changes".
assert self._has_changes(chapter.location)
# Sequential is published - but the changes in vertical below means it "has_changes".
assert self._has_changes(sequential.location)
# Vertical is unpublished - so it "has_changes".
assert self._has_changes(vertical.location)
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
assert self._has_changes(chapter.location)
assert self._has_changes(sequential.location)
assert self._has_changes(vertical.location)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_draft_and_published_unit_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
assert self._has_changes(vertical.location)
self.store.publish(vertical.location, self.user_id)
assert not self._has_changes(vertical.location)
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Vertical has a new child -and- unit is unpublished. So both have changes.
assert self._has_changes(vertical.location)
assert self._has_changes(unit.location)
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
assert not self._has_changes(vertical.location)
assert not self._has_changes(unit.location)
# Publishing the unit separately has no effect on whether it has changes - it's already published.
self.store.publish(unit.location, self.user_id)
assert not self._has_changes(vertical.location)
assert not self._has_changes(unit.location)
# Retrieve the published block and make sure it's published.
self.store.publish(chapter.location, self.user_id)
assert not self._has_changes(chapter.location)
assert not self._has_changes(sequential.location)
assert not self._has_changes(vertical.location)
assert not self._has_changes(unit.location)
# Now make changes to the unit - but don't publish them.
component = self.store.get_item(unit.location)
updated_display_name = 'Changed Display Name'
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
assert self._has_changes(component.location)
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
assert self._has_changes(chapter.location)
assert self._has_changes(sequential.location)
assert self._has_changes(vertical.location)
assert self._has_changes(unit.location)
# Verify that the changes in the draft unit still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(unit.location)
assert component.display_name == updated_display_name
# Verify that the draft changes don't exist in the published unit - it still uses the default name.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
assert component.display_name == 'Text'
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_published_unit_remains_published_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
assert self._has_changes(vertical.location)
self.store.publish(vertical.location, self.user_id)
assert not self._has_changes(vertical.location)
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Now make changes to the unit.
updated_display_name = 'Changed Display Name'
unit.display_name = updated_display_name
unit = self.store.update_item(unit, self.user_id)
assert self._has_changes(unit.location)
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
assert not self._has_changes(vertical.location)
assert not self._has_changes(unit.location)
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
assert not self._has_changes(chapter.location)
assert not self._has_changes(sequential.location)
assert not self._has_changes(vertical.location)
assert not self._has_changes(unit.location)
# Verify that the published changes exist in the published unit.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
assert component.display_name == updated_display_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside'])
def test_aside_crud(self, default_store):
"""
Check that asides could be imported from XML and the modulestores handle asides crud
"""
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default_store):
dest_course_key = self.store.make_course_key('edX', "aside_test", "2012_Fall")
courses = import_course_from_xml(
self.store, self.user_id, DATA_DIR, ['aside'],
load_error_modules=False,
static_content_store=contentstore,
target_id=dest_course_key,
create_if_not_present=True,
)
# check that the imported blocks have the right asides and values
def check_block(block):
"""
Check whether block has the expected aside w/ its fields and then recurse to the block's children
"""
asides = block.runtime.get_asides(block)
assert len(asides) == 1, 'Found {} asides but expected only test_aside'.format(asides)
assert isinstance(asides[0], AsideTestType)
category = block.scope_ids.block_type
assert asides[0].data_field == '{} aside data'.format(category)
assert asides[0].content == '{} Aside'.format(category.capitalize())
for child in block.get_children():
check_block(child)
check_block(courses[0])
# create a new block and ensure its aside magically appears with the right fields
new_chapter = self.store.create_child(self.user_id, courses[0].location, 'chapter', 'new_chapter')
asides = new_chapter.runtime.get_asides(new_chapter)
assert len(asides) == 1, 'Found {} asides but expected only test_aside'.format(asides)
chapter_aside = asides[0]
assert isinstance(chapter_aside, AsideTestType)
assert not chapter_aside.fields['data_field'].is_set_on(chapter_aside), \
f"data_field says it's assigned to {chapter_aside.data_field}"
assert not chapter_aside.fields['content'].is_set_on(chapter_aside), \
f"content says it's assigned to {chapter_aside.content}"
# now update the values
chapter_aside.data_field = 'new value'
self.store.update_item(new_chapter, self.user_id, asides=[chapter_aside])
new_chapter = self.store.get_item(new_chapter.location)
chapter_aside = new_chapter.runtime.get_asides(new_chapter)[0]
assert 'new value' == chapter_aside.data_field
# update the values the second time
chapter_aside.data_field = 'another one value'
self.store.update_item(new_chapter, self.user_id, asides=[chapter_aside])
new_chapter2 = self.store.get_item(new_chapter.location)
chapter_aside2 = new_chapter2.runtime.get_asides(new_chapter2)[0]
assert 'another one value' == chapter_aside2.data_field
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside'])
def test_export_course_with_asides(self, default_store):
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default_store):
dest_course_key = self.store.make_course_key('edX', "aside_test", "2012_Fall")
dest_course_key2 = self.store.make_course_key('edX', "aside_test_2", "2012_Fall_2")
courses = import_course_from_xml(
self.store,
self.user_id,
DATA_DIR,
['aside'],
load_error_modules=False,
static_content_store=contentstore,
target_id=dest_course_key,
create_if_not_present=True,
)
def update_block_aside(block):
"""
Check whether block has the expected aside w/ its fields and then recurse to the block's children
"""
asides = block.runtime.get_asides(block)
asides[0].data_field = ''.join(['Exported data_field ', asides[0].data_field])
asides[0].content = ''.join(['Exported content ', asides[0].content])
self.store.update_item(block, self.user_id, asides=[asides[0]])
for child in block.get_children():
update_block_aside(child)
update_block_aside(courses[0])
# export course to xml
top_level_export_dir = 'exported_source_course_with_asides'
export_course_to_xml(
self.store,
contentstore,
dest_course_key,
self.export_dir,
top_level_export_dir,
)
# and restore the new one from the exported xml
courses2 = import_course_from_xml(
self.store,
self.user_id,
self.export_dir,
source_dirs=[top_level_export_dir],
static_content_store=contentstore,
target_id=dest_course_key2,
create_if_not_present=True,
raise_on_failure=True,
)
assert 1 == len(courses2)
# check that the imported blocks have the right asides and values
def check_block(block):
"""
Check whether block has the expected aside w/ its fields and then recurse to the block's children
"""
asides = block.runtime.get_asides(block)
assert len(asides) == 1, 'Found {} asides but expected only test_aside'.format(asides)
assert isinstance(asides[0], AsideTestType)
category = block.scope_ids.block_type
assert asides[0].data_field == 'Exported data_field {} aside data'.format(category)
assert asides[0].content == 'Exported content {} Aside'.format(category.capitalize())
for child in block.get_children():
check_block(child)
check_block(courses2[0])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside'])
def test_export_course_after_creating_new_items_with_asides(self, default_store): # pylint: disable=too-many-statements
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default_store):
dest_course_key = self.store.make_course_key('edX', "aside_test", "2012_Fall")
dest_course_key2 = self.store.make_course_key('edX', "aside_test_2", "2012_Fall_2")
courses = import_course_from_xml(
self.store,
self.user_id,
DATA_DIR,
['aside'],
load_error_modules=False,
static_content_store=contentstore,
target_id=dest_course_key,
create_if_not_present=True,
)
# create new chapter and modify aside for it
new_chapter_display_name = 'New Chapter'
new_chapter = self.store.create_child(self.user_id, courses[0].location, 'chapter', 'new_chapter')
new_chapter.display_name = new_chapter_display_name
asides = new_chapter.runtime.get_asides(new_chapter)
assert len(asides) == 1, 'Found {} asides but expected only test_aside'.format(asides)
chapter_aside = asides[0]
assert isinstance(chapter_aside, AsideTestType)
chapter_aside.data_field = 'new value'
self.store.update_item(new_chapter, self.user_id, asides=[chapter_aside])
# create new problem and modify aside for it
sequence = courses[0].get_children()[0].get_children()[0]
new_problem_display_name = 'New Problem'
new_problem = self.store.create_child(self.user_id, sequence.location, 'problem', 'new_problem')
new_problem.display_name = new_problem_display_name
asides = new_problem.runtime.get_asides(new_problem)
assert len(asides) == 1, 'Found {} asides but expected only test_aside'.format(asides)
problem_aside = asides[0]
assert isinstance(problem_aside, AsideTestType)
problem_aside.data_field = 'new problem value'
problem_aside.content = 'new content value'
self.store.update_item(new_problem, self.user_id, asides=[problem_aside])
# export course to xml
top_level_export_dir = 'exported_source_course_with_asides'
export_course_to_xml(
self.store,
contentstore,
dest_course_key,
self.export_dir,
top_level_export_dir,
)
# and restore the new one from the exported xml
courses2 = import_course_from_xml(
self.store,
self.user_id,
self.export_dir,
source_dirs=[top_level_export_dir],
static_content_store=contentstore,
target_id=dest_course_key2,
create_if_not_present=True,
raise_on_failure=True,
)
assert 1 == len(courses2)
# check that aside for the new chapter was exported/imported properly
chapters = courses2[0].get_children()
assert 2 == len(chapters)
assert new_chapter_display_name in [item.display_name for item in chapters]
found = False
for child in chapters:
if new_chapter.display_name == child.display_name:
found = True
asides = child.runtime.get_asides(child)
assert len(asides) == 1
child_aside = asides[0]
assert isinstance(child_aside, AsideTestType)
assert child_aside.data_field == 'new value'
break
assert found, 'new_chapter not found'
# check that aside for the new problem was exported/imported properly
sequence_children = courses2[0].get_children()[0].get_children()[0].get_children()
assert 2 == len(sequence_children)
assert new_problem_display_name in [item.display_name for item in sequence_children]
found = False
for child in sequence_children:
if new_problem.display_name == child.display_name:
found = True
asides = child.runtime.get_asides(child)
assert len(asides) == 1
child_aside = asides[0]
assert isinstance(child_aside, AsideTestType)
assert child_aside.data_field == 'new problem value'
assert child_aside.content == 'new content value'
break
assert found, 'new_chapter not found'
@ddt.ddt
@attr('mongo')
class TestAsidesWithMixedModuleStore(CommonMixedModuleStoreSetup):
"""
Tests of the MixedModulestore interface methods with XBlock asides.
"""
def setUp(self):
"""
Setup environment for testing
"""
super(TestAsidesWithMixedModuleStore, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
key_store = DictKeyValueStore()
field_data = KvsFieldData(key_store)
self.runtime = TestRuntime(services={'field-data': field_data})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@XBlockAside.register_temp_plugin(AsideFoo, 'test_aside1')
@XBlockAside.register_temp_plugin(AsideBar, 'test_aside2')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside1', 'test_aside2'])
def test_get_and_update_asides(self, default_store):
"""
Tests that connected asides could be stored, received and updated along with connected course items
"""
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
self.initdb(default_store)
block_type1 = 'test_aside1'
def_id = self.runtime.id_generator.create_definition(block_type1)
usage_id = self.runtime.id_generator.create_usage(def_id)
# the first aside item
aside1 = AsideFoo(scope_ids=ScopeIds('user', block_type1, def_id, usage_id), runtime=self.runtime)
aside1.field11 = 'new_value11'
aside1.field12 = 'new_value12'
block_type2 = 'test_aside2'
def_id = self.runtime.id_generator.create_definition(block_type1)
usage_id = self.runtime.id_generator.create_usage(def_id)
# the second aside item
aside2 = AsideBar(scope_ids=ScopeIds('user', block_type2, def_id, usage_id), runtime=self.runtime)
aside2.field21 = 'new_value21'
# create new item with two asides
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical',
asides=[aside1, aside2]
)
def _check_asides(asides, field11, field12, field21, field22):
""" Helper function to check asides """
assert len(asides) == 2
assert {type(asides[0]), type(asides[1])} == {AsideFoo, AsideBar}
assert asides[0].field11 == field11
assert asides[0].field12 == field12
assert asides[1].field21 == field21
assert asides[1].field22 == field22
# get saved item and check asides
component = self.store.get_item(published_xblock.location)
asides = component.runtime.get_asides(component)
_check_asides(asides, 'new_value11', 'new_value12', 'new_value21', 'aside2_default_value2')
asides[0].field11 = 'other_value11'
# update the first aside item and check that it was stored correctly
self.store.update_item(component, self.user_id, asides=[asides[0]])
cached_asides = component.runtime.get_asides(component)
_check_asides(cached_asides, 'other_value11', 'new_value12', 'new_value21', 'aside2_default_value2')
new_component = self.store.get_item(published_xblock.location)
new_asides = new_component.runtime.get_asides(new_component)
_check_asides(new_asides, 'other_value11', 'new_value12', 'new_value21', 'aside2_default_value2')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@XBlockAside.register_temp_plugin(AsideFoo, 'test_aside1')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside1'])
def test_clone_course_with_asides(self, default_store):
"""
Tests that connected asides will be cloned together with the parent courses
"""
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(default_store):
block_type1 = 'test_aside1'
def_id = self.runtime.id_generator.create_definition(block_type1)
usage_id = self.runtime.id_generator.create_usage(def_id)
aside1 = AsideFoo(scope_ids=ScopeIds('user', block_type1, def_id, usage_id), runtime=self.runtime)
aside1.field11 = 'test1'
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key, asides=[aside1])
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(source_course_key, dest_course_id, self.user_id)
source_store = self.store._get_modulestore_by_type(default_store) # pylint: disable=protected-access
self.assertCoursesEqual(source_store, source_course_key, source_store, dest_course_id)
# after clone get connected aside and check that it was cloned correctly
actual_items = source_store.get_items(dest_course_id,
revision=ModuleStoreEnum.RevisionOption.published_only)
chapter_is_found = False
for block in actual_items:
if block.scope_ids.block_type == 'chapter':
asides = block.runtime.get_asides(block)
assert len(asides) == 1
assert asides[0].field11 == 'test1'
assert asides[0].field12 == 'aside1_default_value2'
chapter_is_found = True
break
assert chapter_is_found
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@XBlockAside.register_temp_plugin(AsideFoo, 'test_aside1')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside1'])
def test_delete_item_with_asides(self, default_store):
"""
Tests that connected asides will be removed together with the connected items
"""
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
self.initdb(default_store)
block_type1 = 'test_aside1'
def_id = self.runtime.id_generator.create_definition(block_type1)
usage_id = self.runtime.id_generator.create_usage(def_id)
aside1 = AsideFoo(scope_ids=ScopeIds('user', block_type1, def_id, usage_id), runtime=self.runtime)
aside1.field11 = 'new_value11'
aside1.field12 = 'new_value12'
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical',
asides=[aside1]
)
asides = published_xblock.runtime.get_asides(published_xblock)
assert asides[0].field11 == 'new_value11'
assert asides[0].field12 == 'new_value12'
# remove item
self.store.delete_item(published_xblock.location, self.user_id)
# create item again
published_xblock2 = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
# check that aside has default values
asides2 = published_xblock2.runtime.get_asides(published_xblock2)
assert asides2[0].field11 == 'aside1_default_value1'
assert asides2[0].field12 == 'aside1_default_value2'
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@XBlockAside.register_temp_plugin(AsideFoo, 'test_aside1')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside1'])
@ddt.unpack
def test_published_and_unpublish_item_with_asides(self, default_store, max_find, max_send):
"""
Tests that public/unpublish doesn't affect connected stored asides
"""
if default_store == ModuleStoreEnum.Type.mongo:
pytest.skip("asides not supported in old mongo")
self.initdb(default_store)
block_type1 = 'test_aside1'
def_id = self.runtime.id_generator.create_definition(block_type1)
usage_id = self.runtime.id_generator.create_usage(def_id)
aside1 = AsideFoo(scope_ids=ScopeIds('user', block_type1, def_id, usage_id), runtime=self.runtime)
aside1.field11 = 'new_value11'
aside1.field12 = 'new_value12'
def _check_asides(item):
""" Helper function to check asides """
asides = item.runtime.get_asides(item)
assert asides[0].field11 == 'new_value11'
assert asides[0].field12 == 'new_value12'
# start off as Private
item = self.store.create_child(self.user_id, self.writable_chapter_location, 'problem',
'test_compute_publish_state', asides=[aside1])
item_location = item.location
with check_mongo_calls(max_find, max_send):
assert not self.store.has_published_version(item)
_check_asides(item)
# Private -> Public
published_block = self.store.publish(item_location, self.user_id)
_check_asides(published_block)
item = self.store.get_item(item_location)
assert self.store.has_published_version(item)
_check_asides(item)
# Public -> Private
unpublished_block = self.store.unpublish(item_location, self.user_id)
_check_asides(unpublished_block)
item = self.store.get_item(item_location)
assert not self.store.has_published_version(item)
_check_asides(item)
| stvstnfrd/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_mixed_modulestore.py | Python | agpl-3.0 | 178,445 |
# -*- coding: utf-8 -*-
# © 2017 Houssine BAKKALI - Coop IT Easy
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
default_code = fields.Char(related='product_variant_ids.default_code', string='Internal Reference', store=True) | houssine78/addons | product_internal_ref/models/product.py | Python | agpl-3.0 | 368 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# AppTalk documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 25 00:32:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AppTalk'
copyright = '2017, Thomas Lee'
author = 'Thomas Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AppTalkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AppTalk.tex', 'AppTalk Documentation',
'Thomas Lee', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'apptalk', 'AppTalk Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AppTalk', 'AppTalk Documentation',
author, 'AppTalk', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Projet logo and icon
html_logo = '_static/apptalk_logo_200x160.png'
html_favicon = '_static/favicon.ico'
| apptalk/apptalk | docs/conf.py | Python | agpl-3.0 | 5,545 |
# -*- coding: utf-8 -*-
import os
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
from skylines.model import User, IGCFile
from tests.data import users, igcs
def test_user_delete_deletes_user(db_session):
john = users.john()
db_session.add(john)
db_session.commit()
john_id = john.id
assert john_id is not None
assert db_session.query(User).get(john_id) is not None
john.delete()
db_session.commit()
assert db_session.query(User).get(john_id) is None
@pytest.mark.usefixtures("files_folder")
def test_user_delete_deletes_owned_igc_files(db_session):
with open(igcs.simple_path, "rb") as f:
filename = files.add_file("simple.igc", f)
assert filename is not None
assert os.path.isfile(files.filename_to_path(filename))
john = users.john()
igc = igcs.simple(owner=john, filename=filename)
db_session.add(igc)
db_session.commit()
assert db_session.query(IGCFile).count() == 1
assert db_session.query(IGCFile).get(igc.id).owner_id == john.id
john.delete()
db_session.commit()
assert db_session.query(IGCFile).count() == 0
assert not os.path.isfile(files.filename_to_path(filename))
def test_repr_is_str(db_session):
john = users.john(last_name=u"Müller")
db_session.add(john)
db_session.commit()
assert isinstance(repr(john), str)
assert repr(john) == "<User: [email protected], display=John Müller>"
def test_hash_password():
hash = User._hash_password(u"secret123", salt=b"abcdef")
assert (
hash
== "bef57ec7f53a6d40beb640a780a639c83bc29ac8a9816f1fc6c5c6dcd93c4721272b82aa344691fb4037f20617b1d19212042e7e6cb39f4ba0dad95d8137104a"
)
assert is_unicode(hash)
| skylines-project/skylines | tests/model/test_user.py | Python | agpl-3.0 | 1,765 |
from __future__ import absolute_import, unicode_literals
import time
from datetime import timedelta
from djcelery_transactions import task
from django.utils import timezone
from redis_cache import get_redis_connection
from .models import CreditAlert, Invitation, Org, TopUpCredits
@task(track_started=True, name='send_invitation_email_task')
def send_invitation_email_task(invitation_id):
invitation = Invitation.objects.get(pk=invitation_id)
invitation.send_email()
@task(track_started=True, name='send_alert_email_task')
def send_alert_email_task(alert_id):
alert = CreditAlert.objects.get(pk=alert_id)
alert.send_email()
@task(track_started=True, name='check_credits_task')
def check_credits_task():
CreditAlert.check_org_credits()
@task(track_started=True, name='calculate_credit_caches')
def calculate_credit_caches():
"""
Repopulates the active topup and total credits for each organization
that received messages in the past week.
"""
# get all orgs that have sent a message in the past week
last_week = timezone.now() - timedelta(days=7)
# for every org that has sent a message in the past week
for org in Org.objects.filter(msgs__created_on__gte=last_week).distinct('pk'):
start = time.time()
org._calculate_credit_caches()
print " -- recalculated credits for %s in %0.2f seconds" % (org.name, time.time() - start)
@task(track_started=True, name="squash_topupcredits")
def squash_topupcredits():
r = get_redis_connection()
key = 'squash_topupcredits'
if not r.get(key):
with r.lock(key, timeout=900):
TopUpCredits.squash_credits()
| ewheeler/rapidpro | temba/orgs/tasks.py | Python | agpl-3.0 | 1,662 |
from . import test_remote_printer
from . import test_printer
| OCA/report-print-send | remote_report_to_printer/tests/__init__.py | Python | agpl-3.0 | 61 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) cgstudiomap <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Frontend Shop',
'version': 'beta',
'author': 'cgstudiomap',
'maintainer': 'cgstudiomap',
'license': 'AGPL-3',
'category': 'Web',
'summary': 'Shop Modules',
'depends': [
'website',
'website_menu_by_user_status',
],
'data': [
'templates/template_shop.xml',
'data/website_menus.xml',
],
'installable': True,
}
| cgstudiomap/cgstudiomap | main/local_modules/frontend_shop/__openerp__.py | Python | agpl-3.0 | 1,388 |
from flask_wtf import FlaskForm
from saylua.utils.form import sl_validators
from saylua.utils.form.fields import SlField, SlTextAreaField
class ForumThreadForm(FlaskForm):
title = SlField('Thread Title', [
sl_validators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(3)])
body = SlTextAreaField('Thread Body', [
sl_validators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(2)])
class ForumPostForm(FlaskForm):
body = SlTextAreaField('Post Content', [
sl_validators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(2)])
| LikeMyBread/Saylua | saylua/modules/forums/forms/main.py | Python | agpl-3.0 | 632 |
# This file is part of FNP-Redakcja, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from django import template
register = template.Library()
@register.filter
def username(user):
return ("%s %s" % (user.first_name, user.last_name)).lstrip() or user.username
| fnp/redakcja | src/documents/templatetags/common_tags.py | Python | agpl-3.0 | 336 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-23 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0005_trainingrecordtype'),
]
operations = [
migrations.AlterField(
model_name='volunteer',
name='portrait',
field=models.ImageField(blank=True, max_length=256, null=True, upload_to='volunteers'),
),
]
| BenMotz/cubetoolkit | toolkit/members/migrations/0006_auto_20180223_2138.py | Python | agpl-3.0 | 506 |
import os
import shutil
from hashlib import md5
from io import StringIO
from PIL import Image
from photonix.photos.models import LibraryPath
from photonix.photos.utils.db import record_photo
from photonix.photos.utils.fs import (determine_destination,
find_new_file_name, mkdir_p)
from photonix.photos.utils.metadata import get_datetime
SYNOLOGY_THUMBNAILS_DIR_NAME = '/@eaDir'
class FileHashCache(object):
'''
Used with determine_same_file() function. Can keep hold of the previously
opened orig and dest file contents. Can keep hold of all file-based and
image-based hashes per file.
'''
file_hash_cache = {}
file_data = {'orig': (None, None), 'dest': (None, None)}
def reset(self):
self.file_hash_cache = {}
def get_file_hash(self, fn, hash_type):
if fn in self.file_hash_cache and hash_type in self.file_hash_cache[fn]:
return self.file_hash_cache[fn][hash_type]
return None
def set_file_hash(self, fn, hash_type, hash_val):
if fn not in self.file_hash_cache:
self.file_hash_cache[fn] = {}
self.file_hash_cache[fn][hash_type] = hash_val
def get_file(self, fn, file_type):
if self.file_data[file_type][0] != fn:
self.file_data[file_type] = (fn, open(fn, 'rb').read())
return self.file_data[file_type][1]
def determine_same_file(origpath, destpath, fhc=None):
'''
First check if hashes of the two files match. If they don't match, they
could still be the same image if metadata has changed so open the pixel
data using PIL and compare hashes of that.
'''
if not fhc:
fhc = FileHashCache()
if len(fhc.file_hash_cache) > 1000:
fhc.reset()
orig_hash = fhc.get_file_hash(origpath, 'file')
if not orig_hash:
orig_hash = md5(fhc.get_file(origpath, 'orig')).hexdigest()
fhc.set_file_hash(origpath, 'file', orig_hash)
dest_hash = fhc.get_file_hash(destpath, 'file')
if not dest_hash:
dest_hash = md5(fhc.get_file(destpath, 'dest')).hexdigest()
fhc.set_file_hash(destpath, 'file', dest_hash)
if orig_hash == dest_hash:
return True
# Try matching on image data (ignoring EXIF)
if os.path.splitext(origpath)[1][1:].lower() in ['jpg', 'jpeg', 'png', ]:
orig_hash = fhc.get_file_hash(origpath, 'image')
if not orig_hash:
orig_hash = md5(Image.open(StringIO(fhc.get_file(origpath, 'orig'))).tobytes()).hexdigest()
fhc.set_file_hash(origpath, 'image', orig_hash)
dest_hash = fhc.get_file_hash(destpath, 'image')
if not dest_hash:
dest_hash = md5(Image.open(StringIO(fhc.get_file(destpath, 'dest'))).tobytes()).hexdigest()
fhc.set_file_hash(destpath, 'image', dest_hash)
if orig_hash == dest_hash:
return True
# TODO: Convert raw photos into temp jpgs to do proper comparison
return False
def blacklisted_type(file):
if file[-4:].lower() == '.mov' or file[-4:].lower() == '.mp4' or file[-4:].lower() == '.mkv':
return True
if file == '.DS_Store':
return True
return False
def import_photos_from_dir(orig, move=False):
imported = 0
were_duplicates = 0
were_bad = 0
for r, d, f in os.walk(orig):
if SYNOLOGY_THUMBNAILS_DIR_NAME in r:
continue
for fn in sorted(f):
filepath = os.path.join(r, fn)
dest = determine_destination(filepath)
if blacklisted_type(fn):
# Blacklisted type
were_bad += 1
elif not dest:
# No filters match this file type
pass
else:
t = get_datetime(filepath)
if t:
destpath = '%02d/%02d/%02d' % (t.year, t.month, t.day)
destpath = os.path.join(dest, destpath)
mkdir_p(destpath)
destpath = os.path.join(destpath, fn)
if filepath == destpath:
# File is already in the right place so be very careful not to do anything like delete it
pass
elif not os.path.exists(destpath):
if move:
shutil.move(filepath, destpath)
else:
shutil.copyfile(filepath, destpath)
record_photo(destpath)
imported += 1
print('IMPORTED {} -> {}'.format(filepath, destpath))
else:
print('PATH EXISTS {} -> {}'.format(filepath, destpath))
same = determine_same_file(filepath, destpath)
print('PHOTO IS THE SAME')
if same:
if move:
os.remove(filepath)
were_duplicates += 1
print('DELETED FROM SOURCE')
else:
print('NEED TO IMPORT UNDER DIFFERENT NAME')
exit(1)
destpath = find_new_file_name(destpath)
shutil.move(filepath, destpath)
record_photo(destpath)
imported += 1
# print 'IMPORTED {} -> {}'.format(filepath, destpath)
else:
print('ERROR READING DATE: {}'.format(filepath))
were_bad += 1
if imported or were_duplicates:
print('\n{} PHOTOS IMPORTED\n{} WERE DUPLICATES\n{} WERE BAD'.format(imported, were_duplicates, were_bad))
def import_photos_in_place(library_path):
orig = library_path.path
imported = 0
were_bad = 0
for r, d, f in os.walk(orig):
if SYNOLOGY_THUMBNAILS_DIR_NAME in r:
continue
for fn in sorted(f):
filepath = os.path.join(r, fn)
if blacklisted_type(fn):
# Blacklisted type
were_bad += 1
else:
modified = record_photo(filepath, library_path.library)
if modified:
imported += 1
print('IMPORTED {}'.format(filepath))
if imported:
print('\n{} PHOTOS IMPORTED\n{} WERE BAD'.format(imported, were_bad))
def rescan_photo_libraries(paths=[]):
library_paths = LibraryPath.objects.filter(type='St', backend_type='Lo')
if paths:
library_paths = library_paths.filter(path__in=paths)
for library_path in library_paths:
print(f'Searching path for changes {library_path.path}')
library_path.rescan()
| damianmoore/photo-manager | photonix/photos/utils/organise.py | Python | agpl-3.0 | 6,846 |
from django.apps import apps
from django.dispatch import receiver
from django.db.models.signals import post_migrate
@receiver(post_migrate, sender=apps.get_app_config('autodidact'))
def create_homepage(sender, **kwargs):
'''Receiver function that populates the database with a homepage in case it doesn't exist'''
from .models import Page
if not Page.objects.exists():
Page(content='***Hello, world!***').save()
| JaapJoris/autodidact | autodidact/management.py | Python | agpl-3.0 | 434 |
import requests
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
@csrf_exempt
@require_http_methods(["POST"])
def post_service_request(request):
payload = request.POST.copy()
outgoing = payload.dict()
if outgoing.get("internal_feedback", False):
if "internal_feedback" in outgoing:
del outgoing["internal_feedback"]
api_key = settings.OPEN311["INTERNAL_FEEDBACK_API_KEY"]
else:
api_key = settings.OPEN311["API_KEY"]
outgoing["api_key"] = api_key
url = settings.OPEN311["URL_BASE"]
session = requests.Session()
# Modify parameters for request in case of City of Turku
if "smbackend_turku" in settings.INSTALLED_APPS:
outgoing.pop("service_request_type")
outgoing.pop("can_be_published")
outgoing["address_string"] = "null"
outgoing["service_code"] = settings.OPEN311["SERVICE_CODE"]
r = session.post(url, data=outgoing)
if r.status_code != 200:
return HttpResponseBadRequest()
return HttpResponse(r.content, content_type="application/json")
| City-of-Helsinki/smbackend | services/views.py | Python | agpl-3.0 | 1,229 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`ESHM20Craton`
"""
import numpy as np
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib.imt import PGA, SA
from openquake.hazardlib import const
from openquake.hazardlib.gsim.nga_east import (
get_tau_at_quantile, get_phi_ss_at_quantile, TAU_EXECUTION, TAU_SETUP,
PHI_SETUP, get_phi_ss, NGAEastGMPE, _get_f760, get_nonlinear_stddev,
get_linear_stddev, _get_fv, get_fnl)
from openquake.hazardlib.gsim.usgs_ceus_2019 import get_stewart_2019_phis2s
CONSTANTS = {"Mref": 4.5, "Rref": 1., "Mh": 6.2, "h": 5.0}
def get_distance_term(C, mag, rrup):
"""
Returns the distance attenuation factor
"""
rval = np.sqrt(rrup ** 2. + CONSTANTS["h"] ** 2.)
rref_val = np.sqrt(CONSTANTS["Rref"] ** 2. +
CONSTANTS["h"] ** 2.)
f_r = (C["c1"] + C["c2"] * (mag - CONSTANTS["Mref"])) *\
np.log(rval / rref_val) + (C["c3"] * (rval - rref_val) / 100.)
return f_r
def get_hard_rock_mean(C, ctx):
"""
Returns the mean and standard deviations for the reference very hard
rock condition (Vs30 = 3000 m/s)
"""
return get_magnitude_scaling(C, ctx.mag) + get_distance_term(
C, ctx.mag, ctx.rrup)
def get_magnitude_scaling(C, mag):
"""
Returns the magnitude scaling term
"""
d_m = mag - CONSTANTS["Mh"]
return np.where(mag <= CONSTANTS["Mh"],
C["e1"] + C["b1"] * d_m + C["b2"] * d_m ** 2.0,
C["e1"] + C["b3"] * d_m)
def get_site_amplification(site_epsilon, imt, pga_r, ctx):
"""
Returns the sum of the linear (Stewart et al., 2019) and non-linear
(Hashash et al., 2019) amplification terms
"""
# Get the coefficients for the IMT
C_LIN = NGAEastGMPE.COEFFS_LINEAR[imt]
C_F760 = NGAEastGMPE.COEFFS_F760[imt]
C_NL = NGAEastGMPE.COEFFS_NONLINEAR[imt]
if str(imt).startswith("PGA"):
period = 0.01
elif str(imt).startswith("PGV"):
period = 0.5
else:
period = imt.period
# Get f760
f760 = _get_f760(C_F760, ctx.vs30,
NGAEastGMPE.CONSTANTS)
# Get the linear amplification factor
f_lin = _get_fv(C_LIN, ctx, f760,
NGAEastGMPE.CONSTANTS)
# Get the nonlinear amplification from Hashash et al., (2017)
f_nl, f_rk = get_fnl(C_NL, pga_r, ctx.vs30, period)
# Mean amplification
ampl = f_lin + f_nl
# If an epistemic uncertainty is required then retrieve the epistemic
# sigma of both models and multiply by the input epsilon
if site_epsilon:
# In the case of the linear model sigma_f760 and sigma_fv are
# assumed independent and the resulting sigma_flin is the root
# sum of squares (SRSS)
f760_stddev = _get_f760(C_F760, ctx.vs30,
NGAEastGMPE.CONSTANTS,
is_stddev=True)
f_lin_stddev = np.sqrt(
f760_stddev ** 2. + get_linear_stddev(
C_LIN, ctx.vs30, NGAEastGMPE.CONSTANTS) ** 2)
# Likewise, the epistemic uncertainty on the linear and nonlinear
# model are assumed independent and the SRSS is taken
f_nl_stddev = get_nonlinear_stddev(
C_NL, ctx.vs30) * f_rk
site_epistemic = np.sqrt(f_lin_stddev ** 2. + f_nl_stddev ** 2.)
ampl += (site_epsilon * site_epistemic)
return ampl
def get_stddevs(ergodic, tau_model, TAU, PHI_SS, imt, ctx):
"""
Returns the standard deviations for either the ergodic or
non-ergodic models
"""
phi = get_phi_ss(imt, ctx.mag, PHI_SS)
if ergodic:
phi_s2s = get_stewart_2019_phis2s(imt, ctx.vs30)
phi = np.sqrt(phi ** 2. + phi_s2s ** 2.)
tau = TAU_EXECUTION[tau_model](imt, ctx.mag, TAU)
sigma = np.sqrt(tau ** 2. + phi ** 2.)
return [sigma, tau, phi]
class ESHM20Craton(GMPE):
"""
Implements a scalable backbone GMPE for application to stable cratonic
regions (primarily intended for cratonic Europe). The median ground motion
is determined by fitting a parametric model to an extensive set of ground
motion scenarios from the suite of NGA East ground motion models for 800
m/s site class. The form of the parametric model is based on that of
:class:`openquake.hazardlib.gsim.kotha_2019.KothaEtAl2019`, and the
scaling in terms of the number of standard deviations of the epistemic
uncertainty (sigma).
The aleatory uncertainty model is that of Al Atik (2015), which is common
to all NGA East ground motion models and configurable by the user.
:param float epsilon:
Number of standard deviations above or below the median to be applied
to the epistemic uncertainty sigma
:param str tau_model:
Choice of model for the inter-event standard deviation (tau), selecting
from "global" {default}, "cena" or "cena_constant"
:param str phi_model:
Choice of model for the single-station intra-event standard deviation
(phi_ss), selecting from "global" {default}, "cena" or "cena_constant"
:param TAU:
Inter-event standard deviation model
:param PHI_SS:
Single-station standard deviation model
:param PHI_S2SS:
Station term for ergodic standard deviation model
:param bool ergodic:
True if an ergodic model is selected, False otherwise
:param float tau_quantile:
Epistemic uncertainty quantile for the inter-event standard
deviation models. Float in the range 0 to 1, or None (mean value
used)
:param float phi_ss_quantile:
Epistemic uncertainty quantile for the intra-event standard
deviation models. Float in the range 0 to 1, or None (mean value
used)
:param float phi_s2ss_quantile:
Epistemic uncertainty quantile for the site-to-site standard
deviation models. Float in the range 0 to 1, or None (mean value
used)
:param float site_epsilon:
Number of standard deviations above or below median for the uncertainty
in the site amplification model
"""
experimental = True
#: Supported tectonic region type is 'active shallow crust'
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
#: The GMPE is defined only for PGA and SA
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, SA}
#: Supported intensity measure component is the geometric mean of two
#: horizontal components
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RotD50
#: Supported standard deviation types are inter-event, intra-event
#: and total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
#: Median calibrated for Vs30 3000 m/s Vs30, no site term required Vs30
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Requires only magnitude
REQUIRES_RUPTURE_PARAMETERS = {'mag'}
#: Required distance measure is Rrup
REQUIRES_DISTANCES = {'rrup'}
#: Defined for a reference velocity of 3000 m/s
DEFINED_FOR_REFERENCE_VELOCITY = 3000.0
def __init__(self, **kwargs):
"""
Instantiates the class with additional terms controlling both the
epistemic uncertainty in the median and the preferred aleatory
uncertainty model ('global', 'cena_constant', 'cena'), and the quantile
of the epistemic uncertainty model (float in the range 0 to 1, or None)
"""
super().__init__(**kwargs)
self.epsilon = kwargs.get("epsilon", 0.0)
self.tau_model = kwargs.get("tau_model", "global")
self.phi_model = kwargs.get("phi_model", "global")
self.ergodic = kwargs.get("ergodic", True)
self.tau_quantile = kwargs.get("tau_quantile", None)
self.phi_ss_quantile = kwargs.get("phi_ss_quantile", None)
self.site_epsilon = kwargs.get("site_epsilon", 0.0)
self.PHI_S2SS = None
# define the standard deviation model from the NGA East aleatory
# uncertainty model according to the calibrations specified by the user
# setup tau
self.TAU = get_tau_at_quantile(TAU_SETUP[self.tau_model]["MEAN"],
TAU_SETUP[self.tau_model]["STD"],
self.tau_quantile)
# setup phi
self.PHI_SS = get_phi_ss_at_quantile(PHI_SETUP[self.phi_model],
self.phi_ss_quantile)
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
"""
Returns the mean and standard deviations
"""
C_ROCK = self.COEFFS[PGA()]
pga_r = get_hard_rock_mean(C_ROCK, ctx)
for m, imt in enumerate(imts):
C = self.COEFFS[imt]
# Get the desired spectral acceleration on rock
if imt.string != "PGA":
# Calculate the ground motion at required spectral period for
# the reference rock
mean[m] = get_hard_rock_mean(C, ctx)
else:
# Avoid re-calculating PGA if that was already done!
mean[m] = np.copy(pga_r)
mean[m] += get_site_amplification(
self.site_epsilon, imt, np.exp(pga_r), ctx)
# Get standard deviation model
sig[m], tau[m], phi[m] = get_stddevs(
self.ergodic, self.tau_model, self.TAU, self.PHI_SS,
imt, ctx)
if self.epsilon:
# If requested, apply epistemic uncertainty
mean[m] += (self.epsilon * C["sigma_mu"])
COEFFS = CoeffsTable(sa_damping=5, table="""\
imt e1 b1 b2 b3 c1 c2 c3 sigma_mu
pga 0.129433711217154 0.516399476752765 -0.1203218740054820 0.209372712495698 -1.49820100429001 0.220432033342701 -0.2193114966960720 0.467518017234970
0.010 0.441910295918064 0.507166125004641 -0.1018797167490890 0.184282079939229 -1.56753763950638 0.222961320838036 -0.2173850863710700 0.424145087820724
0.020 0.979123809125496 0.464490220614734 -0.1137734938103270 0.167233525048116 -1.62825571194736 0.226150925046427 -0.2441521749125150 0.453414267627762
0.025 1.043340609418350 0.469670674909745 -0.1134508651616400 0.174065913292435 -1.60908830139611 0.224104272434454 -0.2576680445215000 0.456276006752802
0.030 1.046568495683850 0.476295173341630 -0.1145295451766630 0.188789464775533 -1.57834523952911 0.220697857317202 -0.2700129055991920 0.442617576906802
0.040 1.007663453495640 0.493809587666455 -0.1150108357853370 0.208535847120219 -1.52232244977795 0.215223039177726 -0.2874767187616130 0.432692547164462
0.050 0.951568976547282 0.507030793387879 -0.1169997424043950 0.227662857289356 -1.47612267464663 0.210020976504110 -0.2982691158785990 0.436894676747672
0.075 0.766898926868941 0.537817749890152 -0.1257930384357200 0.255897568366613 -1.39013641948231 0.198935495001160 -0.3062526875169160 0.445048551267241
0.100 0.566921463821433 0.563265477669262 -0.1390887741365440 0.285966324295526 -1.32905052927637 0.189118846081288 -0.2963709612002850 0.445057073756783
0.150 0.316925422496063 0.627617718350029 -0.1689678154012890 0.338414772067430 -1.25211993705245 0.167801937655424 -0.2665003749714420 0.408938323358624
0.200 0.116888680130253 0.691136578143751 -0.1911386191534560 0.377390002770526 -1.20586644897371 0.154400113563626 -0.2365399916865360 0.396717600597790
0.250 -0.043842379857700 0.744829702492645 -0.2085160327338160 0.406488784261977 -1.18352051545358 0.146981292282198 -0.2083030844596630 0.385803497323193
0.300 -0.198476724421674 0.799805296458131 -0.2231548236155840 0.433865912912985 -1.16557023447139 0.140633373085703 -0.1797968441826460 0.386776049771811
0.400 -0.441747369972888 0.897281226627442 -0.2422049150995460 0.483912433515021 -1.15156734492077 0.133979350791855 -0.1362509955087160 0.395064995993542
0.500 -0.637444825872443 0.992673274984355 -0.2539089461326410 0.526938715295978 -1.14419843291335 0.129943753235505 -0.1121349311669610 0.416676943629526
0.750 -1.032362404718110 1.237960033431780 -0.2483534410193260 0.613138137400433 -1.12728314803895 0.121478497518643 -0.0735664802614733 0.424883714950325
1.000 -1.372802902796470 1.445803895497810 -0.2291157391507420 0.691619273496051 -1.10947364377839 0.116810841150476 -0.0583506072267647 0.435248946431388
1.500 -1.888467249398300 1.730211169117530 -0.1937203497378370 0.805618949392974 -1.10238976578388 0.114304314269286 -0.0390002103787838 0.494395041361088
2.000 -2.334523112985840 1.920451196131200 -0.1617462515371870 0.908051097334214 -1.09476613327876 0.113858927938807 -0.0296892844443899 0.529656872094865
3.000 -3.034920080151080 2.146848246139110 -0.1148224554001390 1.085140635646810 -1.09084212215003 0.115716684506372 -0.0198059757373382 0.550851605706151
4.000 -3.576616283968620 2.262687822224390 -0.0885264828734587 1.227765676724790 -1.09028991715414 0.117770415095847 -0.0135787505915478 0.547911773655132
5.000 -4.022628827670580 2.318743563950980 -0.0777038034207444 1.346637420710540 -1.09024942151365 0.118983393877196 -0.0083301911092432 0.536941450716745
7.500 -4.876430881706430 2.373219226144200 -0.0645988540118558 1.529692859278580 -1.10750011821578 0.131643152520841 -0.0000488890402107 0.531853282981450
10.00 -5.489149076214530 2.381480607871230 -0.0633541563175792 1.620019767639500 -1.12740443208222 0.141291747206530 0.0059559626930461 0.560198970449326
""")
| gem/oq-engine | openquake/hazardlib/gsim/eshm20_craton.py | Python | agpl-3.0 | 14,799 |
# Miro - an RSS based video player application
# Copyright 2009 - Participatory Culture Foundation
#
# This file is part of vidscraper.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import re
import urllib
from lxml import builder
from lxml import etree
from lxml.html import builder as E
from lxml.html import tostring
import oauth2
import simplejson
from vidscraper.decorators import provide_shortmem, parse_url, returns_unicode
from vidscraper import util
from vidscraper.errors import Error
from django.conf import settings
class WistiaError(Error):
pass
WISTIA_OEMBED_API_URL = 'http://fast.wistia.com/oembed?embedType=seo&url='
#'http://fast.wistia.com/oembed?url=http://home.wistia.com/medias/'
EMaker = builder.ElementMaker()
EMBED = EMaker.embed
EMBED_WIDTH = 425
EMBED_HEIGHT = 344
def get_shortmem(url):
shortmem = {}
video_id = WISTIA_REGEX.match(url).groupdict()['video_id']
apiurl = '%s?%s' % (WISTIA_OEMBED_API_URL, urllib.quote(url))
finalexcept = None
backoff = util.random_exponential_backoff(2)
for i in range(3):
try:
reponse = urllib.urlopen(apiurl)
api_raw_data = response.read()
api_data = simplejson.loads(api_raw_data)
except Exception as e:
finalexcept = e
continue
else:
shortmem['oembed'] = api_data
break
backoff.next()
if 'oembed' in shortmem:
return shortmem
errmsg = u'Wistia API error : '
if finalexcept is not None:
"""if isinstance(finalexcept, urllib.HTTPError):
errmsg += finalexcept.code + " - " + HTTPResponseMessages[ finalexcept.code ][0]
elif isinstance(finalexcept, urllib.URLError):
errmsg += "Could not connect - " + finalexcept.reason
else:"""
errmsg += str(finalexcept)
else:
errmsg += u' Unrecognized error. Sorry about that, chief.'
return None
def parse_api(scraper_func, shortmem=None):
def new_scraper_func(url, shortmem={}, *args, **kwargs):
if not shortmem:
shortmem = get_shortmem(url)
return scraper_func(url, shortmem=shortmem, *args, **kwargs)
return new_scraper_func
@parse_api
@returns_unicode
def scrape_title(url, shortmem={}):
try:
return shortmem['oembed']['title'] or u''
except KeyError:
return u''
@parse_api
@returns_unicode
def scrape_description(url, shortmem={}):
try:
description = shortmem['oembed']['title'] # No desc provided in oembed. Use title.
except KeyError:
description = ''
return util.clean_description_html(description)
@returns_unicode
def get_embed(url, shortmem={}, width=EMBED_WIDTH, height=EMBED_HEIGHT):
return shortmem['oembed']['html']
@parse_api
@returns_unicode
def get_thumbnail_url(url, shortmem={}):
return shortmem['oembed']['thumbnail_url']
@parse_api
@returns_unicode
def get_user(url, shortmem={}):
return shortmem['oembed']['provider_name']
@parse_api
@returns_unicode
def get_user_url(url, shortmem={}):
return shortmem['oembed']['provider_url']
@parse_api
@returns_unicode
def get_duration(url, shortmem={}):
return shortmem['oembed']['duration']
WISTIA_REGEX = re.compile(r'https?://(.+)?(wistia\.com|wi\.st|wistia\.net)/(medias|embed/iframe)/(?P<video_id>\w+)')
SUITE = {
'regex': WISTIA_REGEX,
'funcs': {
'title': scrape_title,
'description': scrape_description,
'embed': get_embed,
'thumbnail_url': get_thumbnail_url,
'user': get_user,
'user_url': get_user_url,
'duration': get_duration
},
'order': ['title', 'description', 'file_url', 'embed']}
| ujdhesa/unisubs | libs/vidscraper/sites/wistia.py | Python | agpl-3.0 | 4,955 |
import openerp.addons.website.tests.test_ui as test_ui
def load_tests(loader, base, _):
base.addTest(test_ui.WebsiteUiSuite(test_ui.full_path(__file__,'website_sale-add_product-test.js'),
{'redirect': '/page/website.homepage'}))
base.addTest(test_ui.WebsiteUiSuite(test_ui.full_path(__file__,'website_sale-sale_process-test.js'),
{'redirect': '/page/website.homepage'}))
base.addTest(test_ui.WebsiteUiSuite(test_ui.full_path(__file__,'website_sale-sale_process-test.js'),
{'redirect': '/page/website.homepage', 'user': 'demo', 'password': 'demo'}))
# Test has been commented in SAAS-3 ONLY, it must be activated in trunk.
# Log for test JS has been improved in trunk, so we stop to loss time in saas-3 and debug it directly in trunk.
# Tech Saas & AL agreement
# base.addTest(test_ui.WebsiteUiSuite(test_ui.full_path(__file__,'website_sale-sale_process-test.js'), {'path': '/', 'user': None}))
return base | browseinfo/odoo_saas3_nicolas | addons/website_sale/tests/test_ui.py | Python | agpl-3.0 | 963 |
from __future__ import print_function
from __future__ import unicode_literals
from django.contrib import admin
from .models import Conference
from .models import Paper
from .models import Author
from .models import Attachment
from .actions import paper_actions
class AttachInline(admin.TabularInline):
model = Attachment
class ConferenceAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'place', 'date')
search_fields = ('name', 'place')
date_hierarchy = 'date'
class PaperAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = ('title', 'conference', 'status', 'pauthors',
'hasattach')
list_filter = ('status', 'conference')
search_fields = ('title', 'conference__name', 'conference__place',
'authors__first_name',
'authors__last_name', 'authors__email')
filter_horizontal = ('authors', )
inlines = [AttachInline, ]
actions = paper_actions
def pauthors(self, obj):
return ', '.join(i.get_full_name() for i in obj.authors.all())
pauthors.short_description = 'Authors'
def hasattach(self, obj):
return obj.attachs.exists()
hasattach.short_description = 'Attach?'
hasattach.boolean = True
class AuthorAdmin(admin.ModelAdmin):
list_display = ('email', 'first_name', 'last_name')
search_fields = ('email', 'first_name', 'last_name')
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('attach', 'paper', 'uploaded')
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(Paper, PaperAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Attachment, AttachmentAdmin)
| wadobo/papersplease | papersplease/papers/admin.py | Python | agpl-3.0 | 1,742 |
# Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import anki.lang
import aqt
from aqt import AnkiQt
from aqt.profiles import RecordingDriver, VideoDriver
from aqt.qt import *
from aqt.utils import (
TR,
HelpPage,
disable_help_button,
openHelp,
showInfo,
showWarning,
tr,
)
def video_driver_name_for_platform(driver: VideoDriver) -> str:
if driver == VideoDriver.ANGLE:
return tr(TR.PREFERENCES_VIDEO_DRIVER_ANGLE)
elif driver == VideoDriver.Software:
if isMac:
return tr(TR.PREFERENCES_VIDEO_DRIVER_SOFTWARE_MAC)
else:
return tr(TR.PREFERENCES_VIDEO_DRIVER_SOFTWARE_OTHER)
else:
if isMac:
return tr(TR.PREFERENCES_VIDEO_DRIVER_OPENGL_MAC)
else:
return tr(TR.PREFERENCES_VIDEO_DRIVER_OPENGL_OTHER)
class Preferences(QDialog):
def __init__(self, mw: AnkiQt) -> None:
QDialog.__init__(self, mw, Qt.Window)
self.mw = mw
self.prof = self.mw.pm.profile
self.form = aqt.forms.preferences.Ui_Preferences()
self.form.setupUi(self)
disable_help_button(self)
self.form.buttonBox.button(QDialogButtonBox.Help).setAutoDefault(False)
self.form.buttonBox.button(QDialogButtonBox.Close).setAutoDefault(False)
qconnect(
self.form.buttonBox.helpRequested, lambda: openHelp(HelpPage.PREFERENCES)
)
self.silentlyClose = True
self.prefs = self.mw.col.get_preferences()
self.setupLang()
self.setupCollection()
self.setupNetwork()
self.setupBackup()
self.setupOptions()
self.show()
def accept(self) -> None:
# avoid exception if main window is already closed
if not self.mw.col:
return
self.updateCollection()
self.updateNetwork()
self.updateBackup()
self.updateOptions()
self.mw.pm.save()
self.mw.reset()
self.done(0)
aqt.dialogs.markClosed("Preferences")
def reject(self) -> None:
self.accept()
# Language
######################################################################
def setupLang(self) -> None:
f = self.form
f.lang.addItems([x[0] for x in anki.lang.langs])
f.lang.setCurrentIndex(self.langIdx())
qconnect(f.lang.currentIndexChanged, self.onLangIdxChanged)
def langIdx(self) -> int:
codes = [x[1] for x in anki.lang.langs]
lang = anki.lang.currentLang
if lang in anki.lang.compatMap:
lang = anki.lang.compatMap[lang]
else:
lang = lang.replace("-", "_")
try:
return codes.index(lang)
except:
return codes.index("en_US")
def onLangIdxChanged(self, idx: int) -> None:
code = anki.lang.langs[idx][1]
self.mw.pm.setLang(code)
showInfo(
tr(TR.PREFERENCES_PLEASE_RESTART_ANKI_TO_COMPLETE_LANGUAGE), parent=self
)
# Collection options
######################################################################
def setupCollection(self) -> None:
import anki.consts as c
f = self.form
qc = self.mw.col.conf
self.setup_video_driver()
f.newSpread.addItems(list(c.newCardSchedulingLabels(self.mw.col).values()))
f.useCurrent.setCurrentIndex(int(not qc.get("addToCur", True)))
s = self.prefs.sched
f.lrnCutoff.setValue(int(s.learn_ahead_secs / 60.0))
f.timeLimit.setValue(int(s.time_limit_secs / 60.0))
f.showEstimates.setChecked(s.show_intervals_on_buttons)
f.showProgress.setChecked(s.show_remaining_due_counts)
f.newSpread.setCurrentIndex(s.new_review_mix)
f.dayLearnFirst.setChecked(s.day_learn_first)
f.dayOffset.setValue(s.rollover)
if s.scheduler_version < 2:
f.dayLearnFirst.setVisible(False)
f.legacy_timezone.setVisible(False)
else:
f.legacy_timezone.setChecked(not s.new_timezone)
def setup_video_driver(self) -> None:
self.video_drivers = VideoDriver.all_for_platform()
names = [
tr(TR.PREFERENCES_VIDEO_DRIVER, driver=video_driver_name_for_platform(d))
for d in self.video_drivers
]
self.form.video_driver.addItems(names)
self.form.video_driver.setCurrentIndex(
self.video_drivers.index(self.mw.pm.video_driver())
)
def update_video_driver(self) -> None:
new_driver = self.video_drivers[self.form.video_driver.currentIndex()]
if new_driver != self.mw.pm.video_driver():
self.mw.pm.set_video_driver(new_driver)
showInfo(tr(TR.PREFERENCES_CHANGES_WILL_TAKE_EFFECT_WHEN_YOU))
def updateCollection(self) -> None:
f = self.form
d = self.mw.col
self.update_video_driver()
qc = d.conf
qc["addToCur"] = not f.useCurrent.currentIndex()
s = self.prefs.sched
s.show_remaining_due_counts = f.showProgress.isChecked()
s.show_intervals_on_buttons = f.showEstimates.isChecked()
s.new_review_mix = f.newSpread.currentIndex()
s.time_limit_secs = f.timeLimit.value() * 60
s.learn_ahead_secs = f.lrnCutoff.value() * 60
s.day_learn_first = f.dayLearnFirst.isChecked()
s.rollover = f.dayOffset.value()
s.new_timezone = not f.legacy_timezone.isChecked()
self.mw.col.set_preferences(self.prefs)
d.setMod()
# Network
######################################################################
def setupNetwork(self) -> None:
self.form.media_log.setText(tr(TR.SYNC_MEDIA_LOG_BUTTON))
qconnect(self.form.media_log.clicked, self.on_media_log)
self.form.syncOnProgramOpen.setChecked(self.prof["autoSync"])
self.form.syncMedia.setChecked(self.prof["syncMedia"])
self.form.autoSyncMedia.setChecked(self.mw.pm.auto_sync_media_minutes() != 0)
if not self.prof["syncKey"]:
self._hideAuth()
else:
self.form.syncUser.setText(self.prof.get("syncUser", ""))
qconnect(self.form.syncDeauth.clicked, self.onSyncDeauth)
self.form.syncDeauth.setText(tr(TR.SYNC_LOG_OUT_BUTTON))
def on_media_log(self) -> None:
self.mw.media_syncer.show_sync_log()
def _hideAuth(self) -> None:
self.form.syncDeauth.setVisible(False)
self.form.syncUser.setText("")
self.form.syncLabel.setText(
tr(TR.PREFERENCES_SYNCHRONIZATIONNOT_CURRENTLY_ENABLED_CLICK_THE_SYNC)
)
def onSyncDeauth(self) -> None:
if self.mw.media_syncer.is_syncing():
showWarning("Can't log out while sync in progress.")
return
self.prof["syncKey"] = None
self.mw.col.media.force_resync()
self._hideAuth()
def updateNetwork(self) -> None:
self.prof["autoSync"] = self.form.syncOnProgramOpen.isChecked()
self.prof["syncMedia"] = self.form.syncMedia.isChecked()
self.mw.pm.set_auto_sync_media_minutes(
self.form.autoSyncMedia.isChecked() and 15 or 0
)
if self.form.fullSync.isChecked():
self.mw.col.modSchema(check=False)
self.mw.col.setMod()
# Backup
######################################################################
def setupBackup(self) -> None:
self.form.numBackups.setValue(self.prof["numBackups"])
def updateBackup(self) -> None:
self.prof["numBackups"] = self.form.numBackups.value()
# Basic & Advanced Options
######################################################################
def setupOptions(self) -> None:
self.form.pastePNG.setChecked(self.prof.get("pastePNG", False))
self.form.uiScale.setValue(int(self.mw.pm.uiScale() * 100))
self.form.pasteInvert.setChecked(self.prof.get("pasteInvert", False))
self.form.showPlayButtons.setChecked(self.prof.get("showPlayButtons", True))
self.form.nightMode.setChecked(self.mw.pm.night_mode())
self.form.interrupt_audio.setChecked(self.mw.pm.interrupt_audio())
self._recording_drivers = [
RecordingDriver.QtAudioInput,
RecordingDriver.PyAudio,
]
# The plan is to phase out PyAudio soon, so will hold off on
# making this string translatable for now.
self.form.recording_driver.addItems(
[
f"Voice recording driver: {driver.value}"
for driver in self._recording_drivers
]
)
self.form.recording_driver.setCurrentIndex(
self._recording_drivers.index(self.mw.pm.recording_driver())
)
def updateOptions(self) -> None:
restart_required = False
self.prof["pastePNG"] = self.form.pastePNG.isChecked()
self.prof["pasteInvert"] = self.form.pasteInvert.isChecked()
newScale = self.form.uiScale.value() / 100
if newScale != self.mw.pm.uiScale():
self.mw.pm.setUiScale(newScale)
restart_required = True
self.prof["showPlayButtons"] = self.form.showPlayButtons.isChecked()
if self.mw.pm.night_mode() != self.form.nightMode.isChecked():
self.mw.pm.set_night_mode(not self.mw.pm.night_mode())
restart_required = True
self.mw.pm.set_interrupt_audio(self.form.interrupt_audio.isChecked())
new_audio_driver = self._recording_drivers[
self.form.recording_driver.currentIndex()
]
if self.mw.pm.recording_driver() != new_audio_driver:
self.mw.pm.set_recording_driver(new_audio_driver)
if new_audio_driver == RecordingDriver.PyAudio:
showInfo(
"""\
The PyAudio driver will likely be removed in a future update. If you find it works better \
for you than the default driver, please let us know on the Anki forums."""
)
if restart_required:
showInfo(tr(TR.PREFERENCES_CHANGES_WILL_TAKE_EFFECT_WHEN_YOU))
| simgunz/anki | qt/aqt/preferences.py | Python | agpl-3.0 | 10,181 |
# -*- coding: utf-8 -*-
# © 2015 Compassion CH (Nicolas Tran)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, models
class AccountPaymentOrder(models.Model):
_inherit = 'account.payment.order'
@api.multi
def open2generated(self):
"""
Replace action to propose upload SEPA file to FDS.
:return: window action
"""
action = super(AccountPaymentOrder, self).open2generated()
if self.payment_method_id.code == 'sepa_credit_transfer':
upload_obj = self.env['payment.order.upload.sepa.wizard']
attachment_id = action['res_id']
upload_wizard = upload_obj.create({
'attachment_id': attachment_id,
'payment_order_id': self.id,
})
del action['view_id']
action.update({
'res_model': upload_obj._name,
'res_id': upload_wizard.id,
'flags': {'initial_mode': 'edit'},
'attachment_id': attachment_id
})
return action
| CompassionCH/l10n-switzerland | l10n_ch_fds_upload_sepa/models/account_payment_order.py | Python | agpl-3.0 | 1,096 |
""" Encode any known changes to the database here
to help the matching process
"""
renamed_modules = {
'base_calendar': 'calendar',
'mrp_jit': 'procurement_jit',
'project_mrp': 'sale_service',
# OCA/account-invoicing
'invoice_validation_wkfl': 'account_invoice_validation_workflow',
'account_invoice_zero': 'account_invoice_zero_autopay',
# OCA/server-tools
'audittrail': 'auditlog',
# OCA/bank-statement-import
'account_banking': 'account_bank_statement_import',
'account_banking_camt': 'account_bank_statement_import_camt',
'account_banking_mt940':
'account_bank_statement_import_mt940_base',
'account_banking_nl_ing_mt940':
'account_bank_statement_import_mt940_nl_ing',
'account_banking_nl_rabo_mt940':
'account_bank_statement_import_mt940_nl_rabo',
# OCA/product-attribute
'product_images': 'product_multi_image',
}
renamed_models = {
}
| grap/OpenUpgrade | openerp/addons/openupgrade_records/lib/apriori.py | Python | agpl-3.0 | 931 |
from django.core.management.base import BaseCommand
from django.utils import timezone
from mainsite.models import AccessTokenProxy, AccessTokenScope
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Splitting all scopes on tokens')
chunk_size = 5000
page = 0
self.stdout.write('Deleting AccessTokenScopes')
AccessTokenScope.objects.all().delete()
self.stdout.write('Bulk creating AccessTokenScope')
while True:
tokens = AccessTokenProxy.objects.filter(expires__gt=timezone.now())[page:page+chunk_size]
for t in tokens:
scopes = []
for s in t.scope.split():
scopes.append(AccessTokenScope(scope=s, token=t))
AccessTokenScope.objects.bulk_create(scopes)
if len(tokens) < chunk_size: break
page += chunk_size
self.stdout.write('All done.')
| concentricsky/badgr-server | apps/mainsite/management/commands/generate_token_scopes.py | Python | agpl-3.0 | 970 |
# -*- coding: utf-8 -*-
"""
Integration tests for submitting problem responses and getting grades.
"""
import json
import os
from textwrap import dedent
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import (
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
CodeResponseXMLFactory,
)
from courseware import grades
from courseware.models import StudentModule, StudentModuleHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from student.tests.factories import UserFactory
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.credit.api import (
set_credit_requirements, get_credit_requirement_status
)
from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
from openedx.core.djangoapps.grading_policy.utils import MaxScoresCache
class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that a course gets graded properly.
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
super(TestSubmittingProblems, self).setUp(create_user=False)
# Create course
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
assert self.course, "Couldn't load course %r" % self.COURSE_NAME
# create a test student
self.student = '[email protected]'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
def refresh_course(self):
"""
Re-fetch the course from the database so that the object being dealt with has everything added to it.
"""
self.course = self.store.get_course(self.course.id)
def problem_location(self, problem_url_name):
"""
Returns the url of the problem given the problem's name
"""
return self.course.id.make_usage_key('problem', problem_url_name)
def modx_url(self, problem_location, dispatch):
"""
Return the url needed for the desired action.
problem_location: location of the problem on which we want some action
dispatch: the the action string that gets passed to the view as a kwarg
example: 'check_problem' for having responses processed
"""
return reverse(
'xblock_handler',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_{}_'.format(problem_location.html_id())
# format the response dictionary to be sent in the post request by adding the above prefix to each key
response_dict = {(answer_key_prefix + k): v for k, v in responses.items()}
resp = self.client.post(modx_url, response_dict)
return resp
def look_at_question(self, problem_url_name):
"""
Create state for a problem, but don't answer it
"""
location = self.problem_location(problem_url_name)
modx_url = self.modx_url(location, "problem_get")
resp = self.client.get(modx_url)
return resp
def reset_question_answer(self, problem_url_name):
"""
Reset specified problem for current user.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
def show_question_answer(self, problem_url_name):
"""
Shows the answer to the current student.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_show')
resp = self.client.post(modx_url)
return resp
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
Create and return a dropdown problem.
section_location: location object of section in which to create the problem
(problems must live in a section to be graded properly)
name: string name of the problem
num_input: the number of input fields to create in the problem
"""
prob_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=num_inputs,
weight=num_inputs,
options=['Correct', 'Incorrect', u'ⓤⓝⓘⓒⓞⓓⓔ'],
correct_option='Correct'
)
problem = ItemFactory.create(
parent_location=section_location,
category='problem',
data=prob_xml,
metadata={'rerandomize': 'always'},
display_name=name
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
return problem
def add_graded_section_to_course(self, name, section_format='Homework', late=False, reset=False, showanswer=False):
"""
Creates a graded homework section within a chapter and returns the section.
"""
# if we don't already have a chapter create a new one
if not(hasattr(self, 'chapter')):
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter'
)
if late:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format, 'due': '2013-05-20T23:30'}
)
elif reset:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
rerandomize='always',
metadata={
'graded': True,
'format': section_format,
}
)
elif showanswer:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
showanswer='never',
metadata={
'graded': True,
'format': section_format,
}
)
else:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format}
)
# now that we've added the problem and section to the course
# we fetch the course from the database so the object we are
# dealing with has these additions
self.refresh_course()
return section
def add_grading_policy(self, grading_policy):
"""
Add a grading policy to the course.
"""
self.course.grading_policy = grading_policy
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
"""
calls grades.grade for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
fake_request.user = self.student_user
return grades.grade(self.student_user, fake_request, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
progress_summary = grades.progress_summary(
self.student_user, fake_request, self.course
)
return progress_summary
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
def earned_hw_scores(self):
"""
Global scores, each Score is a Problem Set.
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
def score_for_hw(self, hw_url_name):
"""
Returns list of scores for a given url.
Returns list of scores for the given homework:
[<points on problem_1>, <points on problem_2>, ..., <points on problem_n>]
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name)
return [s.earned for s in hw_section['scores']]
@attr('shard_1')
class TestCourseGrader(TestSubmittingProblems):
"""
Suite of tests for the course grader.
"""
def basic_setup(self, late=False, reset=False, showanswer=False):
"""
Set up a simple course for testing basic grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}],
"GRADE_CUTOFFS": {
'A': .9,
'B': .33
}
}
self.add_grading_policy(grading_policy)
# set up a simple course with four problems
self.homework = self.add_graded_section_to_course('homework', late=late, reset=reset, showanswer=showanswer)
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def weighted_setup(self):
"""
Set up a simple course for testing weighted grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 0.25
}, {
"type": "Final",
"name": "Final Section",
"short_label": "Final",
"weight": 0.75
}
]
}
self.add_grading_policy(grading_policy)
# set up a structure of 1 homework and 1 final
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')
self.final = self.add_graded_section_to_course('Final Section', 'Final')
self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')
def dropping_setup(self):
"""
Set up a simple course for testing the dropping grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 3,
"drop_count": 1,
"short_label": "HW",
"weight": 1
}
]
}
self.add_grading_policy(grading_policy)
# Set up a course structure that just consists of 3 homeworks.
# Since the grading policy drops 1 entire homework, each problem is worth 25%
# names for the problem in the homeworks
self.hw1_names = ['h1p1', 'h1p2']
self.hw2_names = ['h2p1', 'h2p2']
self.hw3_names = ['h3p1', 'h3p2']
self.homework1 = self.add_graded_section_to_course('homework1')
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)
self.homework2 = self.add_graded_section_to_course('homework2')
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)
self.homework3 = self.add_graded_section_to_course('homework3')
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)
def test_submission_late(self):
"""Test problem for due date in the past"""
self.basic_setup(late=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_reset(self):
"""Test problem ProcessingErrors due to resets"""
self.basic_setup(reset=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
# submit a second time to draw NotFoundError
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_show_answer(self):
"""Test problem for ProcessingErrors due to showing answer"""
self.basic_setup(showanswer=True)
resp = self.show_question_answer('p1')
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_show_answer_doesnt_write_to_csm(self):
self.basic_setup()
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# count how many state history entries there are
baseline = StudentModuleHistory.objects.filter(
student_module=student_module
)
baseline_count = baseline.count()
self.assertEqual(baseline_count, 3)
# now click "show answer"
self.show_question_answer('p1')
# check that we don't have more state history entries
csmh = StudentModuleHistory.objects.filter(
student_module=student_module
)
current_count = csmh.count()
self.assertEqual(current_count, 3)
def test_grade_with_max_score_cache(self):
"""
Tests that the max score cache is populated after a grading run
and that the results of grading runs before and after the cache
warms are the same.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.look_at_question('p2')
self.assertTrue(
StudentModule.objects.filter(
module_state_key=self.problem_location('p2')
).exists()
)
location_to_cache = unicode(self.problem_location('p2'))
max_scores_cache = MaxScoresCache.create_for_course(self.course)
# problem isn't in the cache
max_scores_cache.fetch_from_remote([location_to_cache])
self.assertIsNone(max_scores_cache.get(location_to_cache))
self.check_grade_percent(0.33)
# problem is in the cache
max_scores_cache.fetch_from_remote([location_to_cache])
self.assertIsNotNone(max_scores_cache.get(location_to_cache))
self.check_grade_percent(0.33)
def test_none_grade(self):
"""
Check grade is 0 to begin with.
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
def test_b_grade_exact(self):
"""
Check that at exactly the cutoff, the grade is B.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_MAX_SCORE_CACHE": False})
def test_grade_no_max_score_cache(self):
"""
Tests grading when the max score cache is disabled
"""
self.test_b_grade_exact()
def test_b_grade_above(self):
"""
Check grade between cutoffs.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_a_grade(self):
"""
Check that 100 percent completion gets an A
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
self.course.id.to_deprecated_string(),
anonymous_id_for_user(self.student_user, self.course.id)
)
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
"""
self.weighted_setup()
# Get both parts correct
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(self.earned_hw_scores(), [2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework'), [2.0])
def test_weighted_exam(self):
"""
Test that the exam section has the proper weight.
"""
self.weighted_setup()
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.75)
def test_weighted_total(self):
"""
Test that the weighted total adds to 100.
"""
self.weighted_setup()
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0)
def dropping_homework_stage1(self):
"""
Get half the first homework correct and all of the second
"""
self.submit_question_answer(self.hw1_names[0], {'2_1': 'Correct'})
self.submit_question_answer(self.hw1_names[1], {'2_1': 'Incorrect'})
for name in self.hw2_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
def test_dropping_grades_normally(self):
"""
Test that the dropping policy does not change things before it should.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_nochange(self):
"""
Tests that grade does not change when making the global homework grade minimum not unique.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_all_correct(self):
"""
Test that the lowest is dropped for a perfect score.
"""
self.dropping_setup()
self.dropping_homework_stage1()
for name in self.hw3_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework3'), [1.0, 1.0])
def test_min_grade_credit_requirements_status(self):
"""
Test for credit course. If user passes minimum grade requirement then
status will be updated as satisfied in requirement status table.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
# Enable the course for credit
credit_course = CreditCourse.objects.create(
course_key=self.course.id,
enabled=True,
)
# Configure a credit provider for the course
CreditProvider.objects.create(
provider_id="ASU",
enable_integration=True,
provider_url="https://credit.example.com/request",
)
requirements = [{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.52},
}]
# Add a single credit requirement (final grade)
set_credit_requirements(self.course.id, requirements)
self.get_grade_summary()
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
self.assertEqual(req_status[0]["status"], 'satisfied')
@attr('shard_1')
class ProblemWithUploadedFilesTest(TestSubmittingProblems):
"""Tests of problems with uploaded files."""
def setUp(self):
super(ProblemWithUploadedFilesTest, self).setUp()
self.section = self.add_graded_section_to_course('section')
def problem_setup(self, name, files):
"""
Create a CodeResponse problem with files to upload.
"""
xmldata = CodeResponseXMLFactory().build_xml(
allowed_files=files, required_files=files,
)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
display_name=name,
data=xmldata
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def test_three_files(self):
# Open the test files, and arrange to close them later.
filenames = "prog1.py prog2.py prog3.py"
fileobjs = [
open(os.path.join(settings.COMMON_TEST_DATA_ROOT, "capa", filename))
for filename in filenames.split()
]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
self.problem_setup("the_problem", filenames)
with patch('courseware.module_render.XQUEUE_INTERFACE.session') as mock_session:
resp = self.submit_question_answer("the_problem", {'2_1': fileobjs})
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp['success'], "incorrect")
# See how post got called.
name, args, kwargs = mock_session.mock_calls[0]
self.assertEqual(name, "post")
self.assertEqual(len(args), 1)
self.assertTrue(args[0].endswith("/submit/"))
self.assertItemsEqual(kwargs.keys(), ["files", "data"])
self.assertItemsEqual(kwargs['files'].keys(), filenames.split())
@attr('shard_1')
class TestPythonGradedResponse(TestSubmittingProblems):
"""
Check that we can submit a schematic and custom response, and it answers properly.
"""
SCHEMATIC_SCRIPT = dedent("""
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004, output) < 2.7: okay = False;
if get_value(0.0000009, output) < 2.7: okay = False;
if get_value(0.0000014, output) < 2.7: okay = False;
if get_value(0.0000019, output) < 2.7: okay = False;
if get_value(0.0000024, output) < 2.7: okay = False;
if get_value(0.0000029, output) > 0.25: okay = False;
if get_value(0.0000034, output) > 0.25: okay = False;
if get_value(0.0000039, output) > 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']""").strip()
SCHEMATIC_CORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
SCHEMATIC_INCORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
CUSTOM_RESPONSE_SCRIPT = dedent("""
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)""").strip()
CUSTOM_RESPONSE_CORRECT = "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
CUSTOM_RESPONSE_INCORRECT = "Reading my code I see. I hope you like it :)"
COMPUTED_ANSWER_SCRIPT = dedent("""
if submission[0] == "a shout in the street":
correct = ['correct']
else:
correct = ['incorrect']""").strip()
COMPUTED_ANSWER_CORRECT = "a shout in the street"
COMPUTED_ANSWER_INCORRECT = "because we never let them in"
def setUp(self):
super(TestPythonGradedResponse, self).setUp()
self.section = self.add_graded_section_to_course('section')
self.correct_responses = {}
self.incorrect_responses = {}
def schematic_setup(self, name):
"""
set up an example Circuit_Schematic_Builder problem
"""
script = self.SCHEMATIC_SCRIPT
xmldata = SchematicResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='circuitschematic.yaml',
display_name=name,
data=xmldata
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.SCHEMATIC_CORRECT
self.incorrect_responses[name] = self.SCHEMATIC_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def custom_response_setup(self, name):
"""
set up an example custom response problem using a check function
"""
test_csv = self.CUSTOM_RESPONSE_SCRIPT
expect = self.CUSTOM_RESPONSE_CORRECT
cfn_problem_xml = CustomResponseXMLFactory().build_xml(script=test_csv, cfn='test_csv', expect=expect)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=cfn_problem_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = expect
self.incorrect_responses[name] = self.CUSTOM_RESPONSE_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def computed_answer_setup(self, name):
"""
set up an example problem using an answer script'''
"""
script = self.COMPUTED_ANSWER_SCRIPT
computed_xml = CustomResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=computed_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT
self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def _check_correct(self, name):
"""
check that problem named "name" gets evaluated correctly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def _check_incorrect(self, name):
"""
check that problem named "name" gets evaluated incorrectly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def _check_ireset(self, name):
"""
Check that the problem can be reset
"""
# first, get the question wrong
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
# reset the question
self.reset_question_answer(name)
# then get it right
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def test_schematic_correct(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_correct(name)
def test_schematic_incorrect(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_incorrect(name)
def test_schematic_reset(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_ireset(name)
def test_check_function_correct(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_correct(name)
def test_check_function_incorrect(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_incorrect(name)
def test_check_function_reset(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_ireset(name)
def test_computed_correct(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_correct(name)
def test_computed_incorrect(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_incorrect(name)
def test_computed_reset(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_ireset(name)
@attr('shard_1')
class TestAnswerDistributions(TestSubmittingProblems):
"""Check that we can pull answer distributions for problems."""
def setUp(self):
"""Set up a simple course with four problems."""
super(TestAnswerDistributions, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.p1_html_id = self.add_dropdown_to_section(self.homework.location, 'p1', 1).location.html_id()
self.p2_html_id = self.add_dropdown_to_section(self.homework.location, 'p2', 1).location.html_id()
self.p3_html_id = self.add_dropdown_to_section(self.homework.location, 'p3', 1).location.html_id()
self.refresh_course()
def test_empty(self):
# Just make sure we can process this without errors.
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_one_student(self):
# Basic test to make sure we have simple behavior right for a student
# Throw in a non-ASCII answer
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
distributions = grades.answer_distributions(self.course.id)
self.assertEqual(
distributions,
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1
}
}
)
def test_multiple_students(self):
# Our test class is based around making requests for a particular user,
# so we're going to cheat by creating another user and copying and
# modifying StudentModule entries to make them from other users. It's
# a little hacky, but it seemed the simpler way to do this.
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
self.submit_question_answer('p3', {'2_1': u'Correct'})
# Make the above submissions owned by user2
user2 = UserFactory.create()
problems = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
for problem in problems:
problem.student_id = user2.id
problem.save()
# Now make more submissions by our original user
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Correct'})
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
'Correct': 2
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1,
'Incorrect': 1
},
('p3', 'p3', '{}_2_1'.format(self.p3_html_id)): {
'Correct': 1
}
}
)
def test_other_data_types(self):
# We'll submit one problem, and then muck with the student_answers
# dict inside its state to try different data types (str, int, float,
# none)
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
state = json.loads(student_module.state)
state["student_answers"]['{}_2_1'.format(self.p1_html_id)] = val
student_module.state = json.dumps(state)
student_module.save()
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
str(val): 1
},
}
)
def test_missing_content(self):
# If there's a StudentModule entry for content that no longer exists,
# we just quietly ignore it (because we can't display a meaningful url
# or name for it).
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
# Now fetch the state entry for that problem and alter it so it points
# to a non-existent problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
student_module.module_state_key = student_module.module_state_key.replace(
name=student_module.module_state_key.name + "_fake"
)
student_module.save()
# It should be empty (ignored)
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_broken_state(self):
# Missing or broken state for a problem should be skipped without
# causing the whole answer_distribution call to explode.
# Submit p1
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the StudentModule entry for p1 so we can corrupt its state
prb1 = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# Submit p2
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
prb1.state = new_p1_state
prb1.save()
# p1 won't show up, but p2 should still work
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Incorrect': 1
},
}
)
@attr('shard_1')
class TestConditionalContent(TestSubmittingProblems):
"""
Check that conditional content works correctly with grading.
"""
def setUp(self):
"""
Set up a simple course with a grading policy, a UserPartition, and 2 sections, both graded as "homework".
One section is pre-populated with a problem (with 2 inputs), visible to all students.
The second section is empty. Test cases should add conditional content to it.
"""
super(TestConditionalContent, self).setUp()
self.user_partition_group_0 = 0
self.user_partition_group_1 = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_0, 'alpha'),
Group(self.user_partition_group_1, 'beta')
]
)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_SLUG,
user_partitions=[self.partition]
)
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 2,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
self.add_grading_policy(grading_policy)
self.homework_all = self.add_graded_section_to_course('homework1')
self.p1_all_html_id = self.add_dropdown_to_section(self.homework_all.location, 'H1P1', 2).location.html_id()
self.homework_conditional = self.add_graded_section_to_course('homework2')
def split_setup(self, user_partition_group):
"""
Setup for tests using split_test module. Creates a split_test instance as a child of self.homework_conditional
with 2 verticals in it, and assigns self.student_user to the specified user_partition_group.
The verticals are returned.
"""
vertical_0_url = self.course.id.make_usage_key("vertical", "split_test_vertical_0")
vertical_1_url = self.course.id.make_usage_key("vertical", "split_test_vertical_1")
group_id_to_child = {}
for index, url in enumerate([vertical_0_url, vertical_1_url]):
group_id_to_child[str(index)] = url
split_test = ItemFactory.create(
parent_location=self.homework_conditional.location,
category="split_test",
display_name="Split test",
user_partition_id='0',
group_id_to_child=group_id_to_child,
)
vertical_0 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=vertical_0_url,
)
vertical_1 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=vertical_1_url,
)
# Now add the student to the specified group.
UserCourseTagFactory(
user=self.student_user,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(user_partition_group)
)
return vertical_0, vertical_1
def split_different_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems for each group
(so both groups do have graded content, though it is different).
Group 0 has 2 problems, worth 1 and 3 points respectively.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 1 point out of a possible 2 points in the section.
"""
vertical_0, vertical_1 = self.split_setup(user_partition_group)
# Group 0 will have 2 problems in the section, worth a total of 4 points.
self.add_dropdown_to_section(vertical_0.location, 'H2P1_GROUP0', 1).location.html_id()
self.add_dropdown_to_section(vertical_0.location, 'H2P2_GROUP0', 3).location.html_id()
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
# Submit answers for problem in Section 1, which is visible to all students.
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
def test_split_different_problems_group_0(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 0.
"""
self.split_different_problems_setup(self.user_partition_group_0)
self.submit_question_answer('H2P1_GROUP0', {'2_1': 'Correct'})
self.submit_question_answer('H2P2_GROUP0', {'2_1': 'Correct', '2_2': 'Incorrect', '2_3': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 2.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 3.0])
# Grade percent is .63. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = (1.0 + 2.0) / 4
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_different_problems_group_1(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 1.
"""
self.split_different_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def split_one_group_no_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems on for one group.
Group 0 has no problems.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 2 points out of a possible 2 points in the section.
"""
[_, vertical_1] = self.split_setup(user_partition_group)
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
self.submit_question_answer('H1P1', {'2_1': 'Correct'})
def test_split_one_group_no_problems_group_0(self):
"""
Tests what happens when a given group has no problems in it (students receive 0 for that section).
"""
self.split_one_group_no_problems_setup(self.user_partition_group_0)
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [])
self.assertEqual(self.earned_hw_scores(), [1.0, 0.0])
# Grade percent is .25. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 0.0
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_one_group_no_problems_group_1(self):
"""
Verifies students in the group that DOES have a problem receive a score for their problem.
"""
self.split_one_group_no_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
| martynovp/edx-platform | lms/djangoapps/courseware/tests/test_submitting_problems.py | Python | agpl-3.0 | 51,827 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import time
from datetime import datetime
from operator import itemgetter
from lxml import etree
from openerp import netsvc
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
if context is None:
context = {}
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND date_start <= '%s' AND id NOT IN (%s)) %s %s" % (fiscalyear_clause, first_period.date_start, ids, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if initial_bal and not context.get('periods', False) and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.type in ('payable', 'receivable'):
#this function does not suport to be used on move lines not related to payable or receivable accounts
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context.update({
'period_id': ids and ids[0] or False
})
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
if context is None:
context = {}
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
part = data.get('partner_id') and partner_obj.browse(cr, uid, data['partner_id'], context=context) or False
if account and data.get('partner_id'):
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l2.id, SUM(l1.debit-l1.credit)
FROM account_move_line l1, account_move_line l2
WHERE l2.account_id = l1.account_id
AND l1.id <= l2.id
AND l2.id IN %s AND """ + \
self._query_get(cr, uid, obj='l1', context=c) + \
" GROUP BY l2.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {False: ''}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = (invoice_id, invoice_names[invoice_id])
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
_columns = {
'name': fields.char('Name', size=64, required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', size=64, store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2),
'reconcile': fields.function(_get_reconcile, type='char', string='Reconcile Ref'),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
raise osv.except_osv(_('Configuration Error!'), _('Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration/Journals/Journals.') % context.get('journal_type'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'view':
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positif when journal item are debit and negatif when journal item are credit.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_account_id(self, cr, uid, ids, account_id, context=None):
res = {'value': {}}
if account_id:
res['value']['account_tax_id'] = [x.id for x in self.pool.get('account.account').browse(cr, uid, account_id, context=context).tax_ids]
return res
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal).type
part = partner_obj.browse(cr, uid, partner_id)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, val['account_id'])
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def list_partners_to_reconcile(self, cr, uid, context=None):
cr.execute(
"""
SELECT partner_id
FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND (p.last_reconciliation_date IS NULL OR l.date > p.last_reconciliation_date)
AND l.state <> 'draft'
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0
ORDER BY last_reconciliation_date""")
ids = cr.fetchall()
ids = len(ids) and [x[0] for x in ids] or []
return self.pool.get('res.partner').name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
})
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=context)
return True
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if context.get('fy_closing'):
# We don't want to generate any write-off when being called from the
# wizard used to close a fiscal year (and it doesn't give us any
# writeoff_acc_id).
pass
elif (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
})
wf_service = netsvc.LocalService("workflow")
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
wf_service.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
ctx = dict(context, account_period_prefer_normal=True)
pids = period_pool.find(cr, user, date, context=ctx)
if pids:
res.update({
'period_id':pids[0]
})
context.update({
'period_id':pids[0]
})
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
if unlink_ids:
if opening_reconciliation:
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if update_check:
if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if ('journal_id' not in ctx):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if ('period_id' not in ctx):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error !'), _('You can not add/modify entries in a closed period %s of journal %s.' % (period.name,journal.name)))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No piece number !'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and (vals.get('amount_currency', False) is False) and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
if vals.get('analytic_account_id',False):
if journal.analytic_journal_id:
vals['analytic_lines'] = [(0,0, {
'name': vals['name'],
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'account_id': vals.get('analytic_account_id', False),
'unit_amount': vals.get('quantity', 1.0),
'amount': vals.get('debit', 0.0) or vals.get('credit', 0.0),
'general_account_id': vals.get('account_id', False),
'journal_id': journal.analytic_journal_id.id,
'ref': vals.get('ref', False),
'user_id': uid
})]
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
if journal.type in ('purchase_refund', 'sale_refund'):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
else:
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=True).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
self.write(cr, uid,[result], {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total)
})
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the Tax movement
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and ((not context.get('no_store_function')) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
account_move_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jss-emr/openerp-7-src | openerp/addons/account/account_move_line.py | Python | agpl-3.0 | 70,268 |
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
import resource
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
import openerp.tools.config as config
from openerp.release import nt_service_name
from openerp.tools.misc import stripped_sys_argv, dumpstacks
import wsgi_server
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
def memory_info(process):
""" psutil < 2.0 does not have memory_info, >= 3.0 does not have
get_memory_info """
pmem = (getattr(process, 'memory_info', None) or process.get_memory_info)()
return (pmem.rss, pmem.vms)
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class BaseWSGIServerNoBind(werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.tools.config.options["addons_path"].split(','):
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
addons_path = openerp.tools.config.options["addons_path"].split(',')
for i in xml_files:
for path in addons_path:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module]=1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s',i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT,signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e,s):
return self.app(e,s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
threading.Thread(target=self.http_thread).start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: signal_handler(sig, None), 1)
self.cron_spawn()
self.http_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start()
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv('--pidfile','--workers')
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen(nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered",pid)
try:
self.workers_http.pop(pid,None)
self.workers_cron.pop(pid,None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT,signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if (worker.watchdog_timeout is not None) and \
(now - worker.watchdog_time >= worker.watchdog_timeout):
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0],w) for k,w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8*self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
self.worker_kill(self.long_polling_pid, signal.SIGKILL) # FIXME make longpolling process handle SIGTERM correctly
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self):
self.start()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception,e:
_logger.exception(e)
self.stop(False)
sys.exit(-1)
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
ret = select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = memory_info(psutil.Process(os.getpid()))
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception,e:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client,addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
end_time = time.time()
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (db_name, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def start():
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
elif openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
server.run()
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
sys.exit(0)
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| trabacus-softapps/openerp-8.0-cc | openerp/service/server.py | Python | agpl-3.0 | 32,015 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import ICapLyrics, SongLyrics
from weboob.tools.backend import BaseBackend
from .browser import ParolesmaniaBrowser
from urllib import quote_plus
__all__ = ['ParolesmaniaBackend']
class ParolesmaniaBackend(BaseBackend, ICapLyrics):
NAME = 'parolesmania'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '0.f'
DESCRIPTION = 'parolesmania lyrics website'
LICENSE = 'AGPLv3+'
BROWSER = ParolesmaniaBrowser
def create_default_browser(self):
return self.create_browser()
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
sl = self.get_lyrics(songlyrics.id)
songlyrics.content = sl.content
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
| franek/weboob | modules/parolesmania/backend.py | Python | agpl-3.0 | 1,772 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
from django.conf import settings
try:
from settings import celery_local as celery_settings
except ImportError:
from settings import celery as celery_settings
app = Celery('taiga')
app.config_from_object(celery_settings)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| dayatz/taiga-back | taiga/celery.py | Python | agpl-3.0 | 1,321 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import numpy
from union_pooling.union_pooler import UnionPooler
REAL_DTYPE = numpy.float32
class UnionPoolerTest(unittest.TestCase):
def setUp(self):
self.unionPooler = UnionPooler(inputDimensions=5,
columnDimensions=5,
potentialRadius=16,
potentialPct=0.9,
globalInhibition=True,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=2.0,
stimulusThreshold=2,
synPermInactiveDec=0.01,
synPermActiveInc=0.03,
synPermConnected=0.3,
minPctOverlapDutyCycle=0.001,
minPctActiveDutyCycle=0.001,
dutyCyclePeriod=1000,
maxBoost=1.0,
seed=42,
spVerbosity=0,
wrapAround=True,
# union_pooler.py parameters
activeOverlapWeight=1.0,
predictedActiveOverlapWeight=10.0,
maxUnionActivity=0.20,
exciteFunctionType='Fixed',
decayFunctionType='NoDecay')
def testDecayPoolingActivationDefaultDecayRate(self):
self.unionPooler._poolingActivation = numpy.array([0, 1, 2, 3, 4],
dtype=REAL_DTYPE)
expected = numpy.array([0, 1, 2, 3, 4], dtype=REAL_DTYPE)
result = self.unionPooler._decayPoolingActivation()
print result
self.assertTrue(numpy.array_equal(expected, result))
def testAddToPoolingActivation(self):
activeCells = numpy.array([1, 3, 4])
overlaps = numpy.array([0.123, 0.0, 0.0, 0.456, 0.789])
expected = [0.0, 10.0, 0.0, 10.0, 10.0]
result = self.unionPooler._addToPoolingActivation(activeCells, overlaps)
self.assertTrue(numpy.allclose(expected, result))
def testAddToPoolingActivationExistingActivation(self):
self.unionPooler._poolingActivation = numpy.array([0, 1, 2, 3, 4],
dtype=REAL_DTYPE)
activeCells = numpy.array([1, 3, 4])
# [ 0, 1, 0, 1, 1]
overlaps = numpy.array([0.123, 0.0, 0.0, 0.456, 0.789])
expected = [0.0, 11.0, 2.0, 13, 14]
result = self.unionPooler._addToPoolingActivation(activeCells, overlaps)
self.assertTrue(numpy.allclose(expected, result))
def testGetMostActiveCellsUnionSizeZero(self):
self.unionPooler._poolingActivation = numpy.array([0, 1, 2, 3, 4],
dtype=REAL_DTYPE)
self.unionPooler._maxUnionCells = 0
result = self.unionPooler._getMostActiveCells()
self.assertEquals(len(result), 0)
def testGetMostActiveCellsRegular(self):
self.unionPooler._poolingActivation = numpy.array([0, 1, 2, 3, 4],
dtype=REAL_DTYPE)
result = self.unionPooler._getMostActiveCells()
self.assertEquals(len(result), 1)
self.assertEquals(result[0], 4)
def testGetMostActiveCellsIgnoreZeros(self):
self.unionPooler._poolingActivation = numpy.array([0, 0, 0, 3, 4],
dtype=REAL_DTYPE)
self.unionPooler._maxUnionCells = 3
result = self.unionPooler._getMostActiveCells()
self.assertEquals(len(result), 2)
self.assertEquals(result[0], 3)
self.assertEquals(result[1], 4)
if __name__ == "__main__":
unittest.main()
| chanceraine/nupic.research | tests/union_pooling/union_pooler_test.py | Python | agpl-3.0 | 4,882 |
# Aspect ratio
# create mesh
from SMESH_mechanic import *
# get faces with aspect ratio > 1.5
filter = smesh.GetFilter(SMESH.FACE, SMESH.FT_AspectRatio, SMESH.FT_MoreThan, 1.5)
ids = mesh.GetIdsFromFilter(filter)
print "Number of faces with aspect ratio > 1.5:", len(ids)
# copy the faces with aspect ratio > 1.5 to another mesh;
# this demostrates that a filter can be used where usually a group or submesh is acceptable
filter.SetMesh( mesh.GetMesh() )
mesh2 = smesh.CopyMesh( filter, "AR > 1.5" )
print "Number of copied faces with aspect ratio > 1.5:", mesh2.NbFaces()
# create a Group of faces with Aspect Ratio < 1.5
group = mesh.MakeGroup("AR < 1.5", SMESH.FACE, SMESH.FT_AspectRatio, '<', 1.5)
print "Number of faces with aspect ratio < 1.5:", group.Size()
# combine several criteria to Create a Group of only Triangular faces with Aspect Ratio < 1.5;
# note that contents of a GroupOnFilter is dynamically updated as the mesh changes
crit = [ smesh.GetCriterion( SMESH.FACE, SMESH.FT_AspectRatio, '<', 1.5, BinaryOp=SMESH.FT_LogicalAND ),
smesh.GetCriterion( SMESH.FACE, SMESH.FT_ElemGeomType,'=', SMESH.Geom_TRIANGLE ) ]
filter = smesh.GetFilterFromCriteria( crit )
triaGroup = mesh.GroupOnFilter( SMESH.FACE, "Tria AR < 1.5", filter )
print "Number of triangles with aspect ratio < 1.5:", triaGroup.Size()
| FedoraScientific/salome-smesh | doc/salome/examples/filters_ex01.py | Python | lgpl-2.1 | 1,332 |
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Definition of Windows structures"
from __future__ import absolute_import
__revision__ = "$Revision: 560 $"
from .win32defines import LF_FACESIZE, NMTTDISPINFOW_V1_SIZE, HDITEMW_V1_SIZE
import sys
import ctypes
from ctypes import \
c_int, c_uint, c_long, c_ulong, c_void_p, c_wchar, c_char, \
c_ubyte, c_ushort, c_wchar_p, \
POINTER, sizeof, alignment, Union, c_ulonglong, c_longlong, c_size_t
def is_x64():
return sizeof(c_size_t) == 8
class Structure(ctypes.Structure):
"Override the Structure class from ctypes to add printing and comparison"
#----------------------------------------------------------------
def __str__(self):
"""Print out the fields of the ctypes Structure
fields in exceptList will not be printed"""
lines = []
for f in self._fields_:
name = f[0]
lines.append("%20s\t%s"% (name, getattr(self, name)))
return "\n".join(lines)
#----------------------------------------------------------------
def __eq__(self, other_struct):
"return true if the two structures have the same coordinates"
if isinstance(other_struct, ctypes.Structure):
try:
# pretend they are two structures - check that they both
# have the same value for all fields
are_equal = True
for field in self._fields_:
name = field[0]
if getattr(self, name) != getattr(other_struct, name):
are_equal = False
break
return are_equal
except AttributeError:
return False
if isinstance(other_struct, (list, tuple)):
# Now try to see if we have been passed in a list or tuple
try:
are_equal = True
for i, field in enumerate(self._fields_):
name = field[0]
if getattr(self, name) != other_struct[i]:
are_equal = False
break
return are_equal
except:
return False
return False
##====================================================================
#def PrintCtypesStruct(struct, exceptList = []):
# """Print out the fields of the ctypes Structure
#
# fields in exceptList will not be printed"""
# for f in struct._fields_:
# name = f[0]
# if name in exceptList:
# continue
# print("%20s "% name, getattr(struct, name))
# allow ctypes structures to be pickled
# set struct.__reduce__ = _reduce
# e.g. RECT.__reduce__ = _reduce
def _construct(typ, buf):
#print "construct", (typ, buf)
obj = typ.__new__(typ)
ctypes.memmove(ctypes.addressof(obj), buf, len(buf))
return obj
def _reduce(self):
return (_construct, (self.__class__, str(buffer(self))))
#LPTTTOOLINFOW = POINTER(tagTOOLINFOW)
#PTOOLINFOW = POINTER(tagTOOLINFOW)
BOOL = c_int
BYTE = c_ubyte
CHAR = c_char
DWORD = c_ulong
HANDLE = c_void_p
HBITMAP = c_long
LONG = c_long
LPVOID = c_void_p
PVOID = c_void_p
UINT = c_uint
WCHAR = c_wchar
WORD = c_ushort
COLORREF = DWORD
LPBYTE = POINTER(BYTE)
LPWSTR = c_size_t #POINTER(WCHAR)
DWORD_PTR = UINT_PTR = ULONG_PTR = c_size_t
if is_x64():
INT_PTR = LONG_PTR = c_longlong
else:
INT_PTR = LONG_PTR = c_long
HBITMAP = LONG_PTR #LONG
HINSTANCE = LONG_PTR #LONG
HMENU = LONG_PTR #LONG
HBRUSH = LONG_PTR #LONG
HTREEITEM = LONG_PTR #LONG
HWND = LONG_PTR #LONG
LPARAM = LONG_PTR
WPARAM = UINT_PTR
class POINT(Structure):
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/windef.h 307
('x', LONG),
('y', LONG),
]
assert sizeof(POINT) == 8, sizeof(POINT)
assert alignment(POINT) == 4, alignment(POINT)
#====================================================================
class RECT(Structure):
"Wrap the RECT structure and add extra functionality"
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/windef.h 287
('left', LONG),
('top', LONG),
('right', LONG),
('bottom', LONG),
]
#----------------------------------------------------------------
def __init__(self, otherRect_or_left = 0, top = 0, right = 0, bottom = 0):
"""Provide a constructor for RECT structures
A RECT can be constructed by:
- Another RECT (each value will be copied)
- Values for left, top, right and bottom
e.g. my_rect = RECT(otherRect)
or my_rect = RECT(10, 20, 34, 100)
"""
if isinstance(otherRect_or_left, RECT):
self.left = otherRect_or_left.left
self.right = otherRect_or_left.right
self.top = otherRect_or_left.top
self.bottom = otherRect_or_left.bottom
else:
#if not isinstance(otherRect_or_left, (int, long)):
# print type(self), type(otherRect_or_left), otherRect_or_left
if sys.version[0] == '3':
self.left = otherRect_or_left
self.right = right
self.top = top
self.bottom = bottom
else:
self.left = long(otherRect_or_left)
self.right = long(right)
self.top = long(top)
self.bottom = long(bottom)
# #----------------------------------------------------------------
# def __eq__(self, otherRect):
# "return true if the two rectangles have the same coordinates"
#
# try:
# return \
# self.left == otherRect.left and \
# self.top == otherRect.top and \
# self.right == otherRect.right and \
# self.bottom == otherRect.bottom
# except AttributeError:
# return False
#----------------------------------------------------------------
def __str__(self):
"Return a string representation of the RECT"
return "(L%d, T%d, R%d, B%d)" % (
self.left, self.top, self.right, self.bottom)
#----------------------------------------------------------------
def __repr__(self):
"Return some representation of the RECT"
return "<RECT L%d, T%d, R%d, B%d>" % (
self.left, self.top, self.right, self.bottom)
#----------------------------------------------------------------
def __sub__(self, other):
"Return a new rectangle which is offset from the one passed in"
newRect = RECT()
newRect.left = self.left - other.left
newRect.right = self.right - other.left
newRect.top = self.top - other.top
newRect.bottom = self.bottom - other.top
return newRect
#----------------------------------------------------------------
def __add__(self, other):
"Allow two rects to be added using +"
newRect = RECT()
newRect.left = self.left + other.left
newRect.right = self.right + other.left
newRect.top = self.top + other.top
newRect.bottom = self.bottom + other.top
return newRect
#----------------------------------------------------------------
def width(self):
"Return the width of the rect"
return self.right - self.left
#----------------------------------------------------------------
def height(self):
"Return the height of the rect"
return self.bottom - self.top
#----------------------------------------------------------------
def mid_point(self):
"Return a POINT structure representing the mid point"
pt = POINT()
pt.x = int(self.left + self.width()/2)
pt.y = int(self.top + self.height()/2)
return pt
#def __hash__(self):
# return hash (self.left, self.top, self.right, self.bottom)
RECT.__reduce__ = _reduce
assert sizeof(RECT) == 16, sizeof(RECT)
assert alignment(RECT) == 4, alignment(RECT)
class LVCOLUMNW(Structure):
_pack_ = 1
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2982
('mask', UINT),
('fmt', c_int),
('cx', c_int),
('pszText', c_long), #LPWSTR),
('cchTextMax', c_int),
('iSubItem', c_int),
('iImage', c_int),
('iOrder', c_int),
]
class LVITEMW(Structure):
_pack_ = 1
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2679
('mask', UINT),
('iItem', c_int),
('iSubItem', c_int),
('state', UINT),
('stateMask', UINT),
('pszText', c_long), #LPWSTR),
('cchTextMax', c_int),
('iImage', c_int),
('lParam', LPARAM),
('iIndent', c_int),
]
if is_x64():
assert sizeof(LVITEMW) == 44, sizeof(LVITEMW)
assert alignment(LVITEMW) == 1, alignment(LVITEMW)
else:
assert sizeof(LVITEMW) == 40, sizeof(LVITEMW)
assert alignment(LVITEMW) == 1, alignment(LVITEMW)
class TVITEMW(Structure):
#_pack_ = 1
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 3755
('mask', UINT),
('hItem', HTREEITEM),
('state', UINT),
('stateMask', UINT),
('pszText', LPWSTR), #, c_long),
('cchTextMax', c_int),
('iImage', c_int),
('iSelectedImage', c_int),
('cChildren', c_int),
('lParam', LPARAM),
]
if is_x64():
assert sizeof(TVITEMW) == 56, sizeof(TVITEMW)
assert alignment(TVITEMW) == 8, alignment(TVITEMW)
else:
assert sizeof(TVITEMW) == 40, sizeof(TVITEMW)
assert alignment(TVITEMW) == 4, alignment(TVITEMW)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2225
class NMHDR(Structure):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2225
('hwndFrom', HWND),
('idFrom', UINT_PTR),
('code', UINT),
]
if is_x64():
assert sizeof(NMHDR) == 24, sizeof(NMHDR)
assert alignment(NMHDR) == 8, alignment(NMHDR)
else:
assert sizeof(NMHDR) == 12, sizeof(NMHDR)
assert alignment(NMHDR) == 4, alignment(NMHDR)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4275
class NMTVDISPINFOW(Structure):
_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4275
('hdr', NMHDR),
('item', TVITEMW),
]
#assert sizeof(NMTVDISPINFOW) == 52, sizeof(NMTVDISPINFOW)
assert alignment(NMTVDISPINFOW) == 1, alignment(NMTVDISPINFOW)
class LOGFONTW(Structure):
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 1090
('lfHeight', LONG),
('lfWidth', LONG),
('lfEscapement', LONG),
('lfOrientation', LONG),
('lfWeight', LONG),
('lfItalic', BYTE),
('lfUnderline', BYTE),
('lfStrikeOut', BYTE),
('lfCharSet', BYTE),
('lfOutPrecision', BYTE),
('lfClipPrecision', BYTE),
('lfQuality', BYTE),
('lfPitchAndFamily', BYTE),
('lfFaceName', WCHAR * LF_FACESIZE),
]
#----------------------------------------------------------------
def __str__(self):
return "('%s' %d)" % (self.lfFaceName, self.lfHeight)
#----------------------------------------------------------------
def __repr__(self):
return "<LOGFONTW '%s' %d>" % (self.lfFaceName, self.lfHeight)
LOGFONTW.__reduce__ = _reduce
assert sizeof(LOGFONTW) == 92, sizeof(LOGFONTW)
assert alignment(LOGFONTW) == 4, alignment(LOGFONTW)
class TEXTMETRICW(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 878
('tmHeight', LONG),
('tmAscent', LONG),
('tmDescent', LONG),
('tmInternalLeading', LONG),
('tmExternalLeading', LONG),
('tmAveCharWidth', LONG),
('tmMaxCharWidth', LONG),
('tmWeight', LONG),
('tmOverhang', LONG),
('tmDigitizedAspectX', LONG),
('tmDigitizedAspectY', LONG),
('tmFirstChar', WCHAR),
('tmLastChar', WCHAR),
('tmDefaultChar', WCHAR),
('tmBreakChar', WCHAR),
('tmItalic', BYTE),
('tmUnderlined', BYTE),
('tmStruckOut', BYTE),
('tmPitchAndFamily', BYTE),
('tmCharSet', BYTE),
]
assert sizeof(TEXTMETRICW) == 58, sizeof(TEXTMETRICW)
assert alignment(TEXTMETRICW) == 2, alignment(TEXTMETRICW)
class NONCLIENTMETRICSW(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 8767
('cbSize', UINT),
('iBorderWidth', c_int),
('iScrollWidth', c_int),
('iScrollHeight', c_int),
('iCaptionWidth', c_int),
('iCaptionHeight', c_int),
('lfCaptionFont', LOGFONTW),
('iSmCaptionWidth', c_int),
('iSmCaptionHeight', c_int),
('lfSmCaptionFont', LOGFONTW),
('iMenuWidth', c_int),
('iMenuHeight', c_int),
('lfMenuFont', LOGFONTW),
('lfStatusFont', LOGFONTW),
('lfMessageFont', LOGFONTW),
]
assert sizeof(NONCLIENTMETRICSW) == 500, sizeof(NONCLIENTMETRICSW)
assert alignment(NONCLIENTMETRICSW) == 2, alignment(NONCLIENTMETRICSW)
# C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 1025
class LOGBRUSH(Structure):
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/wingdi.h 1025
('lbStyle', UINT),
('lbColor', COLORREF),
('lbHatch', LONG),
]
assert sizeof(LOGBRUSH) == 12, sizeof(LOGBRUSH)
assert alignment(LOGBRUSH) == 4, alignment(LOGBRUSH)
# C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 5147
class MENUITEMINFOW(Structure):
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 5147
('cbSize', UINT),
('fMask', UINT),
('fType', UINT),
('fState', UINT),
('wID', UINT),
('hSubMenu', HMENU),
('hbmpChecked', HBITMAP),
('hbmpUnchecked', HBITMAP),
('dwItemData', ULONG_PTR), #DWORD),
('dwTypeData', LPWSTR),
('cch', UINT),
('hbmpItem', HBITMAP),
]
if is_x64():
assert sizeof(MENUITEMINFOW) == 80, sizeof(MENUITEMINFOW)
assert alignment(MENUITEMINFOW) == 8, alignment(MENUITEMINFOW)
else:
assert sizeof(MENUITEMINFOW) == 48, sizeof(MENUITEMINFOW)
assert alignment(MENUITEMINFOW) == 4, alignment(MENUITEMINFOW)
class MENUBARINFO(Structure):
_fields_ = [
('cbSize', DWORD),
('rcBar', RECT), # rect of bar, popup, item
('hMenu', HMENU), # real menu handle of bar, popup
('hwndMenu', HWND), # hwnd of item submenu if one
('fBarFocused', BOOL, 1), # bar, popup has the focus
('fFocused', BOOL, 1), # item has the focus
]
class MSG(Structure):
_fields_ = [
# C:/PROGRA~1/MIAF9D~1/VC98/Include/winuser.h 1226
('hwnd', HWND),
('message', UINT),
('wParam', WPARAM),
('lParam', LPARAM),
('time', DWORD),
('pt', POINT),
]
if is_x64():
assert sizeof(MSG) == 48, sizeof(MSG)
assert alignment(MSG) == 8, alignment(MSG)
else:
assert sizeof(MSG) == 28, sizeof(MSG)
assert alignment(MSG) == 4, alignment(MSG)
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 1865
class TOOLINFOW(Structure):
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 1865
('cbSize', UINT),
('uFlags', UINT),
('hwnd', HWND),
('uId', UINT_PTR),
('rect', RECT),
('hinst', HINSTANCE),
('lpszText', LPWSTR), #c_long),
('lParam', LPARAM),
('lpReserved', LPVOID)
]
if is_x64():
assert sizeof(TOOLINFOW) == 72, sizeof(TOOLINFOW)
assert alignment(TOOLINFOW) == 8, alignment(TOOLINFOW)
else:
assert sizeof(TOOLINFOW) == 48, sizeof(TOOLINFOW)
assert alignment(TOOLINFOW) == 4, alignment(TOOLINFOW)
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2068
class NMTTDISPINFOW(Structure):
_pack_ = 1
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 2068
('hdr', NMHDR),
('lpszText', LPWSTR),
('szText', WCHAR * 80),
('hinst', HINSTANCE),
('uFlags', UINT),
('lParam', LPARAM),
]
if is_x64():
sizeof(NMTTDISPINFOW) == 212, sizeof(NMTTDISPINFOW)
else:
assert sizeof(NMTTDISPINFOW) == 188, sizeof(NMTTDISPINFOW)
assert alignment(NMTTDISPINFOW) == 1, alignment(NMTTDISPINFOW)
class HDITEMW(Structure):
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 617
('mask', UINT),
('cxy', c_int),
('pszText', LPWSTR), #c_long),
('hbm', HBITMAP),
('cchTextMax', c_int),
('fmt', c_int),
('lParam', LPARAM),
('iImage', c_int),
('iOrder', c_int),
('type', UINT),
('pvFilter', LPVOID),
('state', UINT)
]
if is_x64():
assert sizeof(HDITEMW) == 72, sizeof(HDITEMW)
assert alignment(HDITEMW) == 8, alignment(HDITEMW)
else:
assert sizeof(HDITEMW) == 48, sizeof(HDITEMW)
assert alignment(HDITEMW) == 4, alignment(HDITEMW)
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 4456
class COMBOBOXEXITEMW(Structure):
#_pack_ = 1
_fields_ = [
# C:/_tools/Python24/Lib/site-packages/ctypes/wrap/test/commctrl.h 4456
('mask', UINT),
('iItem', INT_PTR),
('pszText', LPWSTR), #c_long),
('cchTextMax', c_int),
('iImage', c_int),
('iSelectedImage', c_int),
('iOverlay', c_int),
('iIndent', c_int),
('lParam', LPARAM),
]
if is_x64():
assert sizeof(COMBOBOXEXITEMW) == 56, sizeof(COMBOBOXEXITEMW)
assert alignment(COMBOBOXEXITEMW) == 8, alignment(COMBOBOXEXITEMW)
else:
assert sizeof(COMBOBOXEXITEMW) == 36, sizeof(COMBOBOXEXITEMW)
assert alignment(COMBOBOXEXITEMW) == 4, alignment(COMBOBOXEXITEMW)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4757
class TCITEMHEADERW(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4757
('mask', UINT),
('lpReserved1', UINT),
('lpReserved2', UINT),
('pszText', LPWSTR),
('cchTextMax', c_int),
('iImage', c_int),
]
if is_x64():
assert sizeof(TCITEMHEADERW) == 32, sizeof(TCITEMHEADERW)
assert alignment(TCITEMHEADERW) == 8, alignment(TCITEMHEADERW)
else:
assert sizeof(TCITEMHEADERW) == 24, sizeof(TCITEMHEADERW)
assert alignment(TCITEMHEADERW) == 4, alignment(TCITEMHEADERW)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4804
class TCITEMW(Structure):
#if is_x64():
# _pack_ = 8
#else:
# _pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4804
('mask', UINT),
('dwState', DWORD),
('dwStateMask', DWORD),
('pszText', LPWSTR), #c_long), #LPWSTR),
('cchTextMax', c_int),
('iImage', c_int),
('lParam', LPARAM),
]
if is_x64():
assert sizeof(TCITEMW) == 40, sizeof(TCITEMW)
assert alignment(TCITEMW) == 8, alignment(TCITEMW)
else:
assert sizeof(TCITEMW) == 28, sizeof(TCITEMW)
assert alignment(TCITEMW) == 4, alignment(TCITEMW)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 1308
class TBBUTTONINFOW(Structure):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 1308
('cbSize', UINT),
('dwMask', DWORD),
('idCommand', c_int),
('iImage', c_int),
('fsState', BYTE),
('fsStyle', BYTE),
('cx', WORD),
('lParam', POINTER(DWORD)),
('pszText', LPWSTR),
('cchText', c_int),
]
if is_x64():
assert sizeof(TBBUTTONINFOW) == 48, sizeof(TBBUTTONINFOW)
assert alignment(TBBUTTONINFOW) == 8, alignment(TBBUTTONINFOW)
else:
assert sizeof(TBBUTTONINFOW) == 32, sizeof(TBBUTTONINFOW)
assert alignment(TBBUTTONINFOW) == 4, alignment(TBBUTTONINFOW)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 953
class TBBUTTON(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 953
('iBitmap', c_int),
('idCommand', c_int),
('fsState', BYTE),
('fsStyle', BYTE),
('bReserved', BYTE * 2),
('dwData', DWORD_PTR),
('iString', INT_PTR),
]
if is_x64():
assert sizeof(TBBUTTON) == 32, sizeof(TBBUTTON)
assert alignment(TBBUTTON) == 8, alignment(TBBUTTON)
else:
assert sizeof(TBBUTTON) == 20, sizeof(TBBUTTON)
assert alignment(TBBUTTON) == 4, alignment(TBBUTTON)
class REBARBANDINFOW(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 1636
('cbSize', UINT),
('fMask', UINT),
('fStyle', UINT),
('clrFore', COLORREF),
('clrBack', COLORREF),
('lpText', LPWSTR),
('cch', UINT),
('iImage', c_int),
('hwndChild', HWND),
('cxMinChild', UINT),
('cyMinChild', UINT),
('cx', UINT),
('hbmBack', HBITMAP),
('wID', UINT),
('cyChild', UINT),
('cyMaxChild', UINT),
('cyIntegral', UINT),
('cxIdeal', UINT),
('lParam', LPARAM),
('cxHeader', UINT),
#('rcChevronLocation', RECT), # the rect is in client co-ord wrt hwndChild
#('uChevronState', UINT)
]
if is_x64():
assert sizeof(REBARBANDINFOW) == 112, sizeof(REBARBANDINFOW) #128
assert alignment(REBARBANDINFOW) == 8, alignment(REBARBANDINFOW)
else:
assert sizeof(REBARBANDINFOW) == 80, sizeof(REBARBANDINFOW) #100
assert alignment(REBARBANDINFOW) == 4, alignment(REBARBANDINFOW)
# C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 223
class SECURITY_ATTRIBUTES(Structure):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 223
('nLength', DWORD),
('lpSecurityDescriptor', LPVOID),
('bInheritHandle', BOOL),
]
assert sizeof(SECURITY_ATTRIBUTES) == 12 or sizeof(SECURITY_ATTRIBUTES) == 24, sizeof(SECURITY_ATTRIBUTES)
assert alignment(SECURITY_ATTRIBUTES) == 4 or alignment(SECURITY_ATTRIBUTES) == 8, alignment(SECURITY_ATTRIBUTES)
# C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 3794
class STARTUPINFOW(Structure):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 3794
('cb', DWORD),
('lpReserved', LPWSTR),
('lpDesktop', LPWSTR),
('lpTitle', LPWSTR),
('dwX', DWORD),
('dwY', DWORD),
('dwXSize', DWORD),
('dwYSize', DWORD),
('dwXCountChars', DWORD),
('dwYCountChars', DWORD),
('dwFillAttribute', DWORD),
('dwFlags', DWORD),
('wShowWindow', WORD),
('cbReserved2', WORD),
('lpReserved2', LPBYTE),
('hStdInput', HANDLE),
('hStdOutput', HANDLE),
('hStdError', HANDLE),
]
assert sizeof(STARTUPINFOW) == 68 or sizeof(STARTUPINFOW) == 104, sizeof(STARTUPINFOW)
assert alignment(STARTUPINFOW) == 4 or alignment(STARTUPINFOW) == 8, alignment(STARTUPINFOW)
# C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 229
class PROCESS_INFORMATION(Structure):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winbase.h 229
('hProcess', HANDLE),
('hThread', HANDLE),
('dwProcessId', DWORD),
('dwThreadId', DWORD),
]
assert sizeof(PROCESS_INFORMATION) == 16 or sizeof(PROCESS_INFORMATION) == 24, sizeof(PROCESS_INFORMATION)
assert alignment(PROCESS_INFORMATION) == 4 or alignment(PROCESS_INFORMATION) == 8, alignment(PROCESS_INFORMATION)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 3417
class NMLISTVIEW(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 3417
('hdr', NMHDR),
('iItem', c_int),
('iSubItem', c_int),
('uNewState', UINT),
('uOldState', UINT),
('uChanged', UINT),
('ptAction', POINT),
('lParam', LPARAM),
]
if is_x64():
assert sizeof(NMLISTVIEW) == 64, sizeof(NMLISTVIEW)
assert alignment(NMLISTVIEW) == 8, alignment(NMLISTVIEW)
else:
assert sizeof(NMLISTVIEW) == 44, sizeof(NMLISTVIEW)
assert alignment(NMLISTVIEW) == 4, alignment(NMLISTVIEW)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 235
class NMMOUSE(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 235
('hdr', NMHDR),
('dwItemSpec', DWORD_PTR),
('dwItemData', DWORD_PTR),
('pt', POINT),
('dwHitInfo', LPARAM),
]
if is_x64():
assert sizeof(NMMOUSE) == 56, sizeof(NMMOUSE)
assert alignment(NMMOUSE) == 8, alignment(NMMOUSE)
else:
assert sizeof(NMMOUSE) == 32, sizeof(NMMOUSE)
assert alignment(NMMOUSE) == 4, alignment(NMMOUSE)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283
class MOUSEINPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283
('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', DWORD),
]
assert sizeof(MOUSEINPUT) == 24, sizeof(MOUSEINPUT)
assert alignment(MOUSEINPUT) == 2, alignment(MOUSEINPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292
class KEYBDINPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292
('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', DWORD),
]
assert sizeof(KEYBDINPUT) == 16, sizeof(KEYBDINPUT)
assert alignment(KEYBDINPUT) == 2, alignment(KEYBDINPUT)
class HARDWAREINPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4300
('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD),
]
assert sizeof(HARDWAREINPUT) == 8, sizeof(HARDWAREINPUT)
assert alignment(HARDWAREINPUT) == 2, alignment(HARDWAREINPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314
class UNION_INPUT_STRUCTS(Union):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314
('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT),
]
assert sizeof(UNION_INPUT_STRUCTS) == 24, sizeof(UNION_INPUT_STRUCTS)
assert alignment(UNION_INPUT_STRUCTS) == 2, alignment(UNION_INPUT_STRUCTS)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310
class INPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310
('type', DWORD),
# Unnamed field renamed to '_'
('_', UNION_INPUT_STRUCTS),
]
assert sizeof(INPUT) == 28, sizeof(INPUT)
assert alignment(INPUT) == 2, alignment(INPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2415
class NMUPDOWN(Structure):
_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2415
('hdr', NMHDR),
('iPos', c_int),
('iDelta', c_int),
]
if is_x64():
assert sizeof(NMUPDOWN) == 32, sizeof(NMUPDOWN)
assert alignment(NMUPDOWN) == 1, alignment(NMUPDOWN)
else:
assert sizeof(NMUPDOWN) == 20, sizeof(NMUPDOWN)
assert alignment(NMUPDOWN) == 1, alignment(NMUPDOWN)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 9821
class GUITHREADINFO(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 9821
('cbSize', DWORD),
('flags', DWORD),
('hwndActive', HWND),
('hwndFocus', HWND),
('hwndCapture', HWND),
('hwndMenuOwner', HWND),
('hwndMoveSize', HWND),
('hwndCaret', HWND),
('rcCaret', RECT),
]
if is_x64():
assert sizeof(GUITHREADINFO) == 72, sizeof(GUITHREADINFO)
assert alignment(GUITHREADINFO) == 2, alignment(GUITHREADINFO)
else:
assert sizeof(GUITHREADINFO) == 48, sizeof(GUITHREADINFO)
assert alignment(GUITHREADINFO) == 2, alignment(GUITHREADINFO)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 5043
class MENUINFO(Structure):
#_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 5043
('cbSize', DWORD),
('fMask', DWORD),
('dwStyle', DWORD),
('cyMax', UINT),
('hbrBack', HBRUSH),
('dwContextHelpID', DWORD),
('dwMenuData', ULONG_PTR),
]
if is_x64():
assert sizeof(MENUINFO) == 40, sizeof(MENUINFO)
assert alignment(MENUINFO) == 8, alignment(MENUINFO)
else:
assert sizeof(MENUINFO) == 28, sizeof(MENUINFO)
assert alignment(MENUINFO) == 4, alignment(MENUINFO)
NMTTDISPINFOW_V1_SIZE = 184 # Variable c_uint
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2066
class NMTTDISPINFOW(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 2066
('hdr', NMHDR),
('lpszText', LPWSTR),
('szText', WCHAR * 80),
('hinst', HINSTANCE),
('uFlags', UINT),
('lParam', LPARAM),
]
if is_x64():
assert sizeof(NMTTDISPINFOW) == 216, sizeof(NMTTDISPINFOW)
assert alignment(NMTTDISPINFOW) == 8, alignment(NMTTDISPINFOW)
else:
assert sizeof(NMTTDISPINFOW) == 188, sizeof(NMTTDISPINFOW)
assert alignment(NMTTDISPINFOW) == 4, alignment(NMTTDISPINFOW)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2208
class WINDOWPLACEMENT(Structure):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 2208
('length', UINT),
('flags', UINT),
('showCmd', UINT),
('ptMinPosition', POINT),
('ptMaxPosition', POINT),
('rcNormalPosition', RECT),
]
assert sizeof(WINDOWPLACEMENT) == 44, sizeof(WINDOWPLACEMENT)
assert alignment(WINDOWPLACEMENT) == 4, alignment(WINDOWPLACEMENT)
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4052
class TVHITTESTINFO(Structure):
#_pack_ = 1
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/commctrl.h 4052
('pt', POINT),
('flags', UINT),
('hItem', HTREEITEM),
]
if is_x64():
assert sizeof(TVHITTESTINFO) == 24, sizeof(TVHITTESTINFO)
assert alignment(TVHITTESTINFO) == 8, alignment(TVHITTESTINFO)
else:
assert sizeof(TVHITTESTINFO) == 16, sizeof(TVHITTESTINFO)
assert alignment(TVHITTESTINFO) == 4, alignment(TVHITTESTINFO)
class LOGFONTA(Structure):
_fields_ = [
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG),
('lfHeight', LONG)
]
class GV_ITEM(Structure):
_pack_ = 1
_fields_ = [
('row', c_int),
('col', c_int),
('mask', UINT),
('state', UINT),
('nFormat', UINT)
]
#assert sizeof(LVITEMW) == 40, sizeof(LVITEMW)
#assert alignment(LVITEMW) == 1, alignment(LVITEMW)
| manojklm/pywinauto-x64 | pywinauto/win32structures.py | Python | lgpl-2.1 | 32,760 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
import sys
import os
from spack import *
class Namd(MakefilePackage):
"""NAMDis a parallel molecular dynamics code designed for
high-performance simulation of large biomolecular systems."""
homepage = "http://www.ks.uiuc.edu/Research/namd/"
url = "file://{0}/NAMD_2.12_Source.tar.gz".format(os.getcwd())
git = "https://charm.cs.illinois.edu/gerrit/namd.git"
manual_download = True
version("develop", branch="master")
version('2.14b2', sha256='cb4bd918d2d545bb618e4b4a20023a53916f0aa362d9e57f3de1562c36240b00')
version('2.14b1', sha256='9407e54f5271b3d3039a5a9d2eae63c7e108ce31b7481e2197c19e1125b43919')
version('2.13', '9e3323ed856e36e34d5c17a7b0341e38', preferred=True)
version('2.12', '2a1191909b1ab03bf0205971ad4d8ee9')
variant('fftw', default='3', values=('none', '2', '3', 'mkl'),
description='Enable the use of FFTW/FFTW3/MKL FFT')
variant('interface', default='none', values=('none', 'tcl', 'python'),
description='Enables TCL and/or python interface')
depends_on('[email protected]:', when="@2.14b1:")
depends_on('[email protected]', when="@2.13")
depends_on('[email protected]', when="@2.12")
depends_on('fftw@:2.99', when="fftw=2")
depends_on('fftw@3:', when="fftw=3")
depends_on('intel-mkl', when="fftw=mkl")
depends_on('tcl', when='interface=tcl')
depends_on('tcl', when='interface=python')
depends_on('python', when='interface=python')
def _copy_arch_file(self, lib):
config_filename = 'arch/{0}.{1}'.format(self.arch, lib)
copy('arch/Linux-x86_64.{0}'.format(lib),
config_filename)
if lib == 'tcl':
filter_file(r'-ltcl8\.5',
'-ltcl{0}'.format(self.spec['tcl'].version.up_to(2)),
config_filename)
def _append_option(self, opts, lib):
if lib != 'python':
self._copy_arch_file(lib)
spec = self.spec
opts.extend([
'--with-{0}'.format(lib),
'--{0}-prefix'.format(lib), spec[lib].prefix
])
@property
def arch(self):
plat = sys.platform
if plat.startswith("linux"):
plat = "linux"
march = platform.machine()
return '{0}-{1}'.format(plat, march)
@property
def build_directory(self):
return '{0}-spack'.format(self.arch)
def edit(self, spec, prefix):
m64 = '-m64 ' if not spec.satisfies('arch=aarch64:') else ''
with working_dir('arch'):
with open('{0}.arch'.format(self.build_directory), 'w') as fh:
# this options are take from the default provided
# configuration files
# https://github.com/UIUC-PPL/charm/pull/2778
if self.spec.satisfies('^charmpp@:6.10.1'):
optims_opts = {
'gcc': m64 + '-O3 -fexpensive-optimizations \
-ffast-math -lpthread',
'intel': '-O2 -ip'}
else:
optims_opts = {
'gcc': m64 + '-O3 -fexpensive-optimizations \
-ffast-math',
'intel': '-O2 -ip'}
optim_opts = optims_opts[self.compiler.name] \
if self.compiler.name in optims_opts else ''
fh.write('\n'.join([
'NAMD_ARCH = {0}'.format(self.arch),
'CHARMARCH = {0}'.format(self.spec['charmpp'].charmarch),
'CXX = {0.cxx} {0.cxx11_flag}'.format(
self.compiler),
'CXXOPTS = {0}'.format(optim_opts),
'CC = {0}'.format(self.compiler.cc),
'COPTS = {0}'.format(optim_opts),
''
]))
self._copy_arch_file('base')
opts = ['--charm-base', spec['charmpp'].prefix]
fftw_version = spec.variants['fftw'].value
if fftw_version == 'none':
opts.append('--without-fftw')
elif fftw_version == 'mkl':
self._append_option(opts, 'mkl')
else:
_fftw = 'fftw{0}'.format('' if fftw_version == '2' else '3')
self._copy_arch_file(_fftw)
opts.extend(['--with-{0}'.format(_fftw),
'--fftw-prefix', spec['fftw'].prefix])
interface_type = spec.variants['interface'].value
if interface_type != 'none':
self._append_option(opts, 'tcl')
if interface_type == 'python':
self._append_option(opts, 'python')
else:
opts.extend([
'--without-tcl',
'--without-python'
])
config = Executable('./config')
config(self.build_directory, *opts)
def install(self, spec, prefix):
with working_dir(self.build_directory):
mkdirp(prefix.bin)
install('namd2', prefix.bin)
# I'm not sure this is a good idea or if an autoload of the charm
# module would not be better.
install('charmrun', prefix.bin)
| rspavel/spack | var/spack/repos/builtin/packages/namd/package.py | Python | lgpl-2.1 | 5,399 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "add package to environment using dotkit"
section = "environment"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to use with dotkit')
def use(parser, args):
print_module_placeholder_help()
| skosukhin/spack | lib/spack/spack/cmd/use.py | Python | lgpl-2.1 | 1,713 |
#
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2013 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import os
import sys
import glob
from copy import copy
from subprocess import Popen, PIPE
Import('env')
lib_env = env.Clone()
def call(cmd, silent=True):
stdin, stderr = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()
if not stderr:
return stdin.strip()
elif not silent:
print stderr
def ldconfig(*args,**kwargs):
call('ldconfig')
if env['LINKING'] == 'static':
lib_env.Append(CXXFLAGS="-fPIC")
mapnik_lib_link_flag = ''
# note: .data gets the actual list to allow a true copy
# and avoids unintended pollution of other environments
libmapnik_cxxflags = copy(lib_env['CXXFLAGS'].data)
libmapnik_defines = copy(lib_env['CPPDEFINES'])
ABI_VERSION = env['ABI_VERSION']
enabled_imaging_libraries = []
filesystem = 'boost_filesystem%s' % env['BOOST_APPEND']
regex = 'boost_regex%s' % env['BOOST_APPEND']
system = 'boost_system%s' % env['BOOST_APPEND']
# clear out and re-set libs for this env
# note: order matters on linux: see lorder | tsort
lib_env['LIBS'] = [filesystem,
regex]
if env['HAS_CAIRO']:
lib_env.Append(LIBS=env['CAIRO_ALL_LIBS'])
# maybe bz2
if len(env['EXTRA_FREETYPE_LIBS']):
lib_env['LIBS'].extend(copy(env['EXTRA_FREETYPE_LIBS']))
if '-DHAVE_PNG' in env['CPPDEFINES']:
lib_env['LIBS'].append('png')
enabled_imaging_libraries.append('png_reader.cpp')
if '-DMAPNIK_USE_PROJ4' in env['CPPDEFINES']:
lib_env['LIBS'].append('proj')
if '-DHAVE_TIFF' in env['CPPDEFINES']:
lib_env['LIBS'].append('tiff')
enabled_imaging_libraries.append('tiff_reader.cpp')
if '-DHAVE_WEBP' in env['CPPDEFINES']:
lib_env['LIBS'].append('webp')
enabled_imaging_libraries.append('webp_reader.cpp')
lib_env['LIBS'].append('xml2')
if '-DBOOST_REGEX_HAS_ICU' in env['CPPDEFINES']:
lib_env['LIBS'].append('icui18n')
lib_env['LIBS'].append(system)
lib_env['LIBS'].append('harfbuzz')
if '-DHAVE_JPEG' in env['CPPDEFINES']:
lib_env['LIBS'].append('jpeg')
enabled_imaging_libraries.append('jpeg_reader.cpp')
lib_env['LIBS'].append(env['ICU_LIB_NAME'])
lib_env['LIBS'].append('freetype')
if env['RUNTIME_LINK'] == 'static':
if 'icuuc' in env['ICU_LIB_NAME']:
lib_env['LIBS'].append('icudata')
if env['PLATFORM'] == 'Linux':
lib_env['LINKFLAGS'].append('-pthread')
if env['RUNTIME_LINK'] != 'static':
lib_env['LIBS'].insert(0, 'agg')
lib_env['LIBS'].append('z')
if env['PLATFORM'] == 'Darwin':
mapnik_libname = env.subst(env['MAPNIK_LIB_NAME'])
if env['FULL_LIB_PATH']:
lib_path = '%s/%s' % (env['MAPNIK_LIB_BASE'],mapnik_libname)
else:
lib_path = '@loader_path/'+mapnik_libname
mapnik_lib_link_flag += ' -Wl,-install_name,%s' % lib_path
_d = {'version':env['MAPNIK_VERSION_STRING'].replace('-pre','')}
mapnik_lib_link_flag += ' -current_version %(version)s -compatibility_version %(version)s' % _d
else: # unix, non-macos
mapnik_libname = env.subst(env['MAPNIK_LIB_NAME'])
if env['ENABLE_SONAME']:
mapnik_libname = env.subst(env['MAPNIK_LIB_NAME']) + (".%d.%d" % (int(ABI_VERSION[0]),int(ABI_VERSION[1])))
if env['PLATFORM'] == 'SunOS':
if env['CXX'].startswith('CC'):
mapnik_lib_link_flag += ' -R. -h %s' % mapnik_libname
else:
mapnik_lib_link_flag += ' -Wl,-h,%s' % mapnik_libname
else: # Linux and others
lib_env['LIBS'].append('dl')
mapnik_lib_link_flag += ' -Wl,-rpath-link,.'
if env['ENABLE_SONAME']:
mapnik_lib_link_flag += ' -Wl,-soname,%s' % mapnik_libname
if env['FULL_LIB_PATH']:
mapnik_lib_link_flag += ' -Wl,-rpath=%s' % env['MAPNIK_LIB_BASE']
else:
mapnik_lib_link_flag += ' -Wl,-z,origin -Wl,-rpath=\$$ORIGIN'
source = Split(
"""
expression_grammar.cpp
fs.cpp
request.cpp
well_known_srs.cpp
params.cpp
image_filter_types.cpp
miniz_png.cpp
color.cpp
conversions.cpp
image_compositing.cpp
image_scaling.cpp
box2d.cpp
datasource_cache.cpp
datasource_cache_static.cpp
debug.cpp
expression_node.cpp
expression_string.cpp
expression.cpp
transform_expression.cpp
feature_kv_iterator.cpp
feature_style_processor.cpp
feature_type_style.cpp
dasharray_parser.cpp
font_engine_freetype.cpp
font_set.cpp
function_call.cpp
gradient.cpp
graphics.cpp
parse_path.cpp
image_reader.cpp
image_util.cpp
layer.cpp
map.cpp
load_map.cpp
memory.cpp
palette.cpp
marker_helpers.cpp
transform_expression_grammar.cpp
plugin.cpp
rule.cpp
save_map.cpp
wkb.cpp
projection.cpp
proj_transform.cpp
scale_denominator.cpp
simplify.cpp
parse_transform.cpp
memory_datasource.cpp
symbolizer.cpp
symbolizer_keys.cpp
symbolizer_enumerations.cpp
unicode.cpp
raster_colorizer.cpp
mapped_memory_cache.cpp
marker_cache.cpp
svg/svg_parser.cpp
svg/svg_path_parser.cpp
svg/svg_points_parser.cpp
svg/svg_transform_parser.cpp
warp.cpp
css_color_grammar.cpp
text/font_library.cpp
text/vertex_cache.cpp
text/text_layout.cpp
text/text_line.cpp
text/itemizer.cpp
text/scrptrun.cpp
text/face.cpp
text/glyph_positions.cpp
text/placement_finder.cpp
text/properties_util.cpp
text/renderer.cpp
text/symbolizer_helpers.cpp
text/text_properties.cpp
text/font_feature_settings.cpp
text/formatting/base.cpp
text/formatting/list.cpp
text/formatting/text.cpp
text/formatting/format.cpp
text/formatting/layout.cpp
text/formatting/registry.cpp
text/placements/registry.cpp
text/placements/base.cpp
text/placements/dummy.cpp
text/placements/list.cpp
text/placements/simple.cpp
group/group_layout_manager.cpp
group/group_rule.cpp
group/group_symbolizer_helper.cpp
xml_tree.cpp
config_error.cpp
color_factory.cpp
renderer_common.cpp
renderer_common/render_pattern.cpp
renderer_common/process_group_symbolizer.cpp
"""
)
if env['PLUGIN_LINKING'] == 'static':
hit = False
for plugin in env['REQUESTED_PLUGINS']:
details = env['PLUGINS'][plugin]
if details['lib'] in env['LIBS'] or not details['lib']:
plugin_env = SConscript('../plugins/input/%s/build.py' % plugin)
if not plugin_env:
print("Notice: no 'plugin_env' variable found for plugin: '%s'" % plugin)
else:
hit = True
DEF = '-DMAPNIK_STATIC_PLUGIN_%s' % plugin.upper()
lib_env.Append(CPPDEFINES = DEF)
if DEF not in libmapnik_defines:
libmapnik_defines.append(DEF)
if plugin_env.has_key('SOURCES') and plugin_env['SOURCES']:
source += ['../plugins/input/%s/%s' % (plugin, src) for src in plugin_env['SOURCES']]
if plugin_env.has_key('CPPDEFINES') and plugin_env['CPPDEFINES']:
lib_env.AppendUnique(CPPDEFINES=plugin_env['CPPDEFINES'])
if plugin_env.has_key('CXXFLAGS') and plugin_env['CXXFLAGS']:
lib_env.AppendUnique(CXXFLAGS=plugin_env['CXXFLAGS'])
if plugin_env.has_key('LINKFLAGS') and plugin_env['LINKFLAGS']:
lib_env.AppendUnique(LINKFLAGS=plugin_env['LINKFLAGS'])
if plugin_env.has_key('CPPPATH') and plugin_env['CPPPATH']:
lib_env.AppendUnique(CPPPATH=copy(plugin_env['CPPPATH']))
if plugin_env.has_key('LIBS') and plugin_env['LIBS']:
lib_env.AppendUnique(LIBS=plugin_env['LIBS'])
else:
print("Notice: dependencies not met for plugin '%s', not building..." % plugin)
if hit:
lib_env.Append(CPPDEFINES = '-DMAPNIK_STATIC_PLUGINS')
libmapnik_defines.append('-DMAPNIK_STATIC_PLUGINS')
if env['HAS_CAIRO']:
lib_env.AppendUnique(LIBPATH=env['CAIRO_LIBPATHS'])
lib_env.Append(CPPDEFINES = '-DHAVE_CAIRO')
libmapnik_defines.append('-DHAVE_CAIRO')
lib_env.AppendUnique(CPPPATH=copy(env['CAIRO_CPPPATHS']))
source.append('cairo/cairo_context.cpp')
source.append('cairo/cairo_renderer.cpp')
source.append('cairo/cairo_render_vector.cpp')
source.append('cairo/process_markers_symbolizer.cpp')
source.append('cairo/process_text_symbolizer.cpp')
source.append('cairo/process_group_symbolizer.cpp')
source.append('cairo/process_line_symbolizer.cpp')
source.append('cairo/process_line_pattern_symbolizer.cpp')
source.append('cairo/process_polygon_symbolizer.cpp')
source.append('cairo/process_polygon_pattern_symbolizer.cpp')
source.append('cairo/process_debug_symbolizer.cpp')
source.append('cairo/process_point_symbolizer.cpp')
source.append('cairo/process_raster_symbolizer.cpp')
source.append('cairo/process_building_symbolizer.cpp')
for cpp in enabled_imaging_libraries:
source.append(cpp)
# agg backend
source += Split(
"""
agg/agg_renderer.cpp
agg/process_building_symbolizer.cpp
agg/process_line_symbolizer.cpp
agg/process_line_pattern_symbolizer.cpp
agg/process_text_symbolizer.cpp
agg/process_point_symbolizer.cpp
agg/process_polygon_symbolizer.cpp
agg/process_polygon_pattern_symbolizer.cpp
agg/process_raster_symbolizer.cpp
agg/process_shield_symbolizer.cpp
agg/process_markers_symbolizer.cpp
agg/process_group_symbolizer.cpp
agg/process_debug_symbolizer.cpp
"""
)
# clipper
source += Split(
"""
../deps/clipper/src/clipper.cpp
""")
if env['RUNTIME_LINK'] == "static":
source += glob.glob('../deps/agg/src/' + '*.cpp')
# grid backend
if env['GRID_RENDERER']:
source += Split(
"""
grid/grid.cpp
grid/grid_renderer.cpp
grid/process_building_symbolizer.cpp
grid/process_line_pattern_symbolizer.cpp
grid/process_line_symbolizer.cpp
grid/process_markers_symbolizer.cpp
grid/process_point_symbolizer.cpp
grid/process_polygon_pattern_symbolizer.cpp
grid/process_polygon_symbolizer.cpp
grid/process_raster_symbolizer.cpp
grid/process_shield_symbolizer.cpp
grid/process_text_symbolizer.cpp
grid/process_group_symbolizer.cpp
""")
lib_env.Append(CPPDEFINES = '-DGRID_RENDERER')
libmapnik_defines.append('-DGRID_RENDERER')
# https://github.com/mapnik/mapnik/issues/1438
if env['SVG_RENDERER']: # svg backend
source += Split(
"""
svg/output/svg_output_grammars.cpp
svg/output/svg_renderer.cpp
svg/output/svg_generator.cpp
svg/output/svg_output_attributes.cpp
svg/output/process_symbolizers.cpp
svg/output/process_building_symbolizer.cpp
svg/output/process_line_pattern_symbolizer.cpp
svg/output/process_line_symbolizer.cpp
svg/output/process_markers_symbolizer.cpp
svg/output/process_point_symbolizer.cpp
svg/output/process_polygon_pattern_symbolizer.cpp
svg/output/process_polygon_symbolizer.cpp
svg/output/process_raster_symbolizer.cpp
svg/output/process_shield_symbolizer.cpp
svg/output/process_text_symbolizer.cpp
svg/output/process_group_symbolizer.cpp
""")
lib_env.Append(CPPDEFINES = '-DSVG_RENDERER')
libmapnik_defines.append('-DSVG_RENDERER')
if env['XMLPARSER'] == 'libxml2' and env['HAS_LIBXML2']:
source += Split(
"""
libxml2_loader.cpp
""")
lib_env.Append(CPPDEFINES = '-DHAVE_LIBXML2')
libmapnik_defines.append('-DHAVE_LIBXML2')
else:
source += Split(
"""
rapidxml_loader.cpp
"""
)
# clone the env one more time to isolate mapnik_lib_link_flag
lib_env_final = lib_env.Clone()
lib_env_final.Prepend(LINKFLAGS=mapnik_lib_link_flag)
# cache library values for other builds to use
env['LIBMAPNIK_LIBS'] = copy(lib_env['LIBS'])
env['LIBMAPNIK_LINKFLAGS'] = copy(lib_env['LINKFLAGS'])
env['LIBMAPNIK_CXXFLAGS'] = libmapnik_cxxflags
env['LIBMAPNIK_DEFINES'] = libmapnik_defines
mapnik = None
if env['PLATFORM'] == 'Darwin' or not env['ENABLE_SONAME']:
target_path = env['MAPNIK_LIB_BASE_DEST']
if 'uninstall' not in COMMAND_LINE_TARGETS:
if env['LINKING'] == 'static':
mapnik = lib_env_final.StaticLibrary(env['MAPNIK_NAME'], source)
else:
mapnik = lib_env_final.SharedLibrary(env['MAPNIK_NAME'], source)
result = env.Install(target_path, mapnik)
env.Alias(target='install', source=result)
env['create_uninstall_target'](env, os.path.join(target_path,env.subst(env['MAPNIK_LIB_NAME'])))
else:
# Symlink command, only works if both files are in same directory
def symlink(env, target, source):
trgt = str(target[0])
src = str(source[0])
if os.path.islink(trgt) or os.path.exists(trgt):
os.remove(trgt)
os.symlink(os.path.basename(src), trgt)
major, minor, micro = ABI_VERSION
soFile = "%s.%d.%d.%d" % (os.path.basename(env.subst(env['MAPNIK_LIB_NAME'])), int(major), int(minor), int(micro))
target = os.path.join(env['MAPNIK_LIB_BASE_DEST'], soFile)
if 'uninstall' not in COMMAND_LINE_TARGETS:
if env['LINKING'] == 'static':
mapnik = lib_env_final.StaticLibrary(env['MAPNIK_NAME'], source)
else:
mapnik = lib_env_final.SharedLibrary(env['MAPNIK_NAME'], source)
result = env.InstallAs(target=target, source=mapnik)
env.Alias(target='install', source=result)
if result:
env.AddPostAction(result, ldconfig)
# Install symlinks
target1 = os.path.join(env['MAPNIK_LIB_BASE_DEST'], "%s.%d.%d" % \
(os.path.basename(env.subst(env['MAPNIK_LIB_NAME'])),int(major), int(minor)))
target2 = os.path.join(env['MAPNIK_LIB_BASE_DEST'], os.path.basename(env.subst(env['MAPNIK_LIB_NAME'])))
if 'uninstall' not in COMMAND_LINE_TARGETS:
link1 = env.Command(target1, target, symlink)
env.Alias(target='install', source=link1)
link2 = env.Command(target2, target1, symlink)
env.Alias(target='install', source=link2)
# delete in reverse order..
env['create_uninstall_target'](env, target2)
env['create_uninstall_target'](env, target1)
env['create_uninstall_target'](env, target)
# to enable local testing
lib_major_minor = "%s.%d.%d" % (os.path.basename(env.subst(env['MAPNIK_LIB_NAME'])), int(major), int(minor))
local_lib = os.path.basename(env.subst(env['MAPNIK_LIB_NAME']))
if os.path.islink(lib_major_minor) or os.path.exists(lib_major_minor):
os.remove(lib_major_minor)
os.symlink(local_lib,lib_major_minor)
Clean(mapnik,lib_major_minor);
if not env['RUNTIME_LINK'] == 'static':
Depends(mapnik, env.subst('../deps/agg/libagg.a'))
| qianwenming/mapnik | src/build.py | Python | lgpl-2.1 | 15,637 |
import flask.views
from flask import request
TAM_PAGINA=5
def calculoPrimeraPag(cant):
"""
Setea los valores correspondientes a la primera pagina de un paginar,
recibe la cantidad de items, fases o proyectos que existen para dicho paginar
"""
if(cant!=0):#Si devolvio algo
t=cant/TAM_PAGINA
mod=cant%TAM_PAGINA
if mod>0:
t=int(t)+1#Total de paginas
elif int(t)==0:
t=1
m=flask.session['pagina']#Pagina en la que estoy
infopag="Pagina "+ str(m) +" de " + str(t)
if m<t:
flask.session['haynext']=True
else:
flask.session['haynext']=False
if m==1:
flask.session['hayprev']=False
else:
flask.session['hayprev']=True
else:#si no pantalla vacia sin botones siguiente ni anterior
flask.session['haynext']=False
flask.session['hayprev']=False
infopag="Pagina 1 de 1"
return infopag
def calculoDeSiguiente(cant):
"""
Esta funcion es llamada cuando se hace click en el boton "Siguiente" del paginar
Setea los valores correspondientes para la siguiente pagina, calcula si hay siguiente para mostrar o no el boton de "Siguiente"
"""
t=cant/TAM_PAGINA
mod=cant%TAM_PAGINA
if mod>0:
t=int(t)+1#Total de paginas
elif int(t)==0:
t=1
flask.session['pagina']=flask.session['pagina']+1
sobran=cant-flask.session['pagina']* TAM_PAGINA
if sobran>0:
flask.session['haynext']=True
else:
flask.session['haynext']=False
if flask.session['pagina']==1:
flask.session['hayprev']=False
else:
flask.session['hayprev']=True
m=flask.session['pagina']
infopag="Pagina "+ str(m) +" de " + str(t)
return infopag
def calculoDeAnterior(cant):
"""
Esta funcion es llamada cuando se hace click en el boton "Anterior" del paginar
Setea los valores correspondientes para la siguiente pagina, calcula si hay siguiente para mostrar o no el boton de "Siguiente" y "Anterior"
"""
t=cant/TAM_PAGINA #Saber cuantas paginas se necesitan
mod=cant%TAM_PAGINA #modulo
if mod>0: #si el mod no es cero, la division no es perfecta
t=int(t)+1#la cantidad de paginas necesarias es la parte entera de la division mas 1
elif int(t)==0:
t=1
flask.session['pagina']=flask.session['pagina']-1
pag=flask.session['pagina']
if pag==1:
flask.session['hayprev']=False
else:
flask.session['hayprev']=True
if cant>(pag*TAM_PAGINA):
flask.session['haynext']=True
m=flask.session['pagina']#Pagina en la que estoy
infopag="Pagina "+ str(m) +" de " + str(t)
return infopag
def totalPaginas(cant):
"""
Cacula cuantas paginas en total hay para la cant pasada como parametro
"""
t=cant/TAM_PAGINA
mod=cant%TAM_PAGINA
if mod>0:
t=int(t)+1#Total de paginas
else:
t=1
return t
| diegocrzt/pmspy | pms/vista/paginar.py | Python | lgpl-2.1 | 2,986 |
"core.Pixmap tests"
from unittest import SkipTest
from testutils import *
from gfxprim.core import Pixmap
from gfxprim import gfx, core
def test_gfx_submodule_loads():
"gfx is present in a Pixmap"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx
def test_gfx_submodule_has_C():
"gfx contains C"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx.C
assert gfx.C
# These set the param types of the functions in GFX
gfx_params = {
'arc_segment': 'IIIIIFFP',
'circle': 'IIIP',
'ellipse': 'IIIIP',
'fill': 'P',
'fill_circle': 'IIIP',
'fill_ellipse': 'IIIIP',
'fill_polygon': ([(0,0),(1,1),(1,0)], 0, {}),
'fill_rect': 'IIIIP',
'fill_ring': 'IIIIP',
'fill_tetragon': 'IIIIIIIIP',
'fill_triangle': 'IIIIIIP',
'hline': 'IIIP',
'hline_aa': 'IIIP', # Fixpoint, originally 'FFFP'
'line': 'IIIIP',
'line_aa': 'IIIIP', # Fixpoint, originally 'FFFFP'
'polygon': ([(0,0),(1,1),(1,0)], 0, {}),
'putpixel_aa': 'IIP', # Fixpoint, originally 'FFP'
'rect': 'IIIIP',
'ring': 'IIIIP',
'tetragon': 'IIIIIIIIP',
'triangle': 'IIIIIIP',
'vline': 'IIIP',
'vline_aa': 'IIIP', # Fixpoint, originally 'FFFP'
}
def test_all_methods_are_known():
"All methods of gfx submodule have known param types in this test"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
for name in dir(c.gfx):
if name[0] != '_' and name not in ['C', 'ctx']:
assert name in gfx_params
def gen_dummy_args(params):
"""
Generate dummy parameter tuple according to characters in the given string.
0 - 0
S - String ("")
I - Int (1)
F - Float (0.5)
P - Pixel (0)
"""
args = []
for t in params:
if t == '0':
args.append(0)
elif t == 'I':
args.append(1)
elif t == 'P':
args.append(0)
elif t == 'F':
args.append(0.5)
elif t == 'S':
args.append("")
else:
assert False
return tuple(args)
@for_each_case(gfx_params)
def test_method_call(n, params):
"Calling with dummy parameters:"
c = PixmapRand(10, 10, core.C.PIXEL_RGB888)
if isinstance(params, str):
c.gfx.__getattribute__(n)(*gen_dummy_args(params))
else:
assert isinstance(params, tuple) and isinstance(params[-1], dict)
c.gfx.__getattribute__(n)(*params[:-1], **params[-1])
def test_Polygon():
"Polygon() works"
c0 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
c1 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
c2 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
assert c1 == c0
c1.gfx.polygon([1,2,0,4,7,9,5,4,3,2], 43)
c2.gfx.polygon([(1,2),(0,4),(7,9),(5,4),(3,2)], 43)
assert c1 == c2
assert c1 != c0
def test_FillPolygon():
"FillPolygon() works"
c0 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
c1 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
c2 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
assert c1 == c0
c1.gfx.fill_polygon([1,2,0,4,7,9,5,4,3,2], 0)
c2.gfx.fill_polygon([(1,2),(0,4),(7,9),(5,4),(3,2)], 0)
assert c1 == c2
assert c1 != c0
| gfxprim/gfxprim | tests/pylib/test_gfx.py | Python | lgpl-2.1 | 3,030 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Shows how to use BitmapManager to asynchronously load a Bitmap from a file.
Run this snippet providing a list of filenames of (high resolution) pictures:
$ ./asyncload.py /path/to/mypics/*.jpg anotherpic.png nonexistent.png
Press space to sequentially load the pictures. A rotating rectangle appears
during the time the picture file is being loaded to show how the main thread
is not affected by the load operation.
Press 'f' to display the frame time graph, which should show no significant
glitches while loading
'''
import sys
import libavg
from libavg import player
APP_RESOLUTION = (640, 480)
class AsyncLoadApp(libavg.AVGApp):
def init(self):
'''
Create placeholders for the example. A single ImageNode is used to show
the pictures.
'''
self.__imageNode = libavg.avg.ImageNode(pos=(10, 20), parent=self._parentNode)
self.__spinner = libavg.avg.RectNode(color='222222',
fillopacity=1, size=(40, 40), active=False,
pos=(10, self._parentNode.size.y - 50), parent=self._parentNode)
self.__infoNode = libavg.avg.WordsNode(text='Press space to load the first image',
fontsize=11, pos=(10, 5), parent=self._parentNode)
self.__pics = sys.argv[1:]
self.__currentPic = -1
player.subscribe(player.ON_FRAME, self.__onFrame)
def onKeyDown(self, event):
'''
Intercept a space keypress and trigger the request.
'''
if event.keystring == 'space':
self.__requestNextBitmap()
def __requestNextBitmap(self):
'''
Ask the BitmapManager to load a new file. loadBitmap() call returns immediately.
'''
self.__currentPic = (self.__currentPic + 1) % len(self.__pics)
libavg.avg.BitmapManager.get().loadBitmap(self.__pics[self.__currentPic],
self.__onBitmapLoaded)
self.__spinner.active = True
self.__spinner.angle = 0
def __onBitmapLoaded(self, bmp):
'''
This callback is invoked by BitmapManager, 'bmp' can be either a Bitmap instance
or a RuntimeError instance (hence checking for Exception is consistent).
'''
self.__spinner.active = False
if isinstance(bmp, Exception):
self.__infoNode.text = ('Error loading '
'image %s : %s' % (self.__pics[self.__currentPic], str(bmp)))
self.__imageNode.href = ''
else:
self.__infoNode.text = ('Loaded %s, '
'press space for the next one' % self.__pics[self.__currentPic])
self.__setBitmapAndResize(bmp)
def __setBitmapAndResize(self, bmp):
originalSize = bmp.getSize()
if originalSize.x > originalSize.y:
ratio = (APP_RESOLUTION[0] - 20) / originalSize.x
else:
ratio = (APP_RESOLUTION[1] - 40) / originalSize.y
self.__imageNode.setBitmap(bmp)
self.__imageNode.size = originalSize * ratio
def __onFrame(self):
if self.__spinner.active:
self.__spinner.angle += 0.05
if len(sys.argv) == 1:
print 'Usage: %s <filename> [<filename> [<filename> [..]]]' % sys.argv[0]
sys.exit(1)
AsyncLoadApp.start(resolution=APP_RESOLUTION)
| pararthshah/libavg-vaapi | src/samples/asyncload.py | Python | lgpl-2.1 | 3,390 |
# Copyright (C) 2010, 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from webkit import model
def combine_condition(conditions):
if conditions:
if len(conditions) == 1:
return conditions[0]
else:
return bracket_if_needed(' && '.join(map(bracket_if_needed, conditions)))
else:
return None
def bracket_if_needed(condition):
if re.match(r'.*(&&|\|\|).*', condition):
return '(%s)' % condition
else:
return condition
def parse(file):
receiver_attributes = None
destination = None
messages = []
conditions = []
master_condition = None
superclass = []
for line in file:
match = re.search(r'messages -> (?P<destination>[A-Za-z_0-9]+) \s*(?::\s*(?P<superclass>.*?) \s*)?(?:(?P<attributes>.*?)\s+)?{', line)
if match:
receiver_attributes = parse_attributes_string(match.group('attributes'))
if match.group('superclass'):
superclass = match.group('superclass')
if conditions:
master_condition = conditions
conditions = []
destination = match.group('destination')
continue
if line.startswith('#'):
trimmed = line.rstrip()
if line.startswith('#if '):
conditions.append(trimmed[4:])
elif line.startswith('#endif') and conditions:
conditions.pop()
elif line.startswith('#else') or line.startswith('#elif'):
raise Exception("ERROR: '%s' is not supported in the *.in files" % trimmed)
continue
match = re.search(r'([A-Za-z_0-9]+)\((.*?)\)(?:(?:\s+->\s+)\((.*?)\))?(?:\s+(.*))?', line)
if match:
name, parameters_string, reply_parameters_string, attributes_string = match.groups()
if parameters_string:
parameters = parse_parameters_string(parameters_string)
for parameter in parameters:
parameter.condition = combine_condition(conditions)
else:
parameters = []
attributes = parse_attributes_string(attributes_string)
if reply_parameters_string:
reply_parameters = parse_parameters_string(reply_parameters_string)
for reply_parameter in reply_parameters:
reply_parameter.condition = combine_condition(conditions)
elif reply_parameters_string == '':
reply_parameters = []
else:
reply_parameters = None
messages.append(model.Message(name, parameters, reply_parameters, attributes, combine_condition(conditions)))
return model.MessageReceiver(destination, superclass, receiver_attributes, messages, combine_condition(master_condition))
def parse_attributes_string(attributes_string):
if not attributes_string:
return None
return attributes_string.split()
def split_parameters_string(parameters_string):
parameters = []
current_parameter_string = ''
nest_level = 0
for character in parameters_string:
if character == ',' and nest_level == 0:
parameters.append(current_parameter_string)
current_parameter_string = ''
continue
if character == '<':
nest_level += 1
elif character == '>':
nest_level -= 1
current_parameter_string += character
parameters.append(current_parameter_string)
return parameters
def parse_parameters_string(parameters_string):
parameters = []
for parameter_string in split_parameters_string(parameters_string):
match = re.search(r'\s*(?:\[(?P<attributes>.*?)\]\s+)?(?P<type_and_name>.*)', parameter_string)
attributes_string, type_and_name_string = match.group('attributes', 'type_and_name')
split = type_and_name_string.rsplit(' ', 1)
parameter_kind = 'class'
if split[0].startswith('struct '):
parameter_kind = 'struct'
split[0] = split[0][7:]
parameter_type = split[0]
parameter_name = split[1]
parameters.append(model.Parameter(kind=parameter_kind, type=parameter_type, name=parameter_name, attributes=parse_attributes_string(attributes_string)))
return parameters
| annulen/qtwebkit-snapshots | Source/WebKit2/Scripts/webkit/parser.py | Python | lgpl-2.1 | 5,618 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import run_tests # set sys.path
import tempfile
import os
from kobo.http import *
class TestPOSTTransport(unittest.TestCase):
def setUp(self):
self.postt = POSTTransport()
def test_get_content_type(self):
tf0 = tempfile.mkstemp()[1]
tf1 = tempfile.mkstemp(suffix=".txt")[1]
tf2 = tempfile.mkstemp(suffix=".rtf")[1]
tf3 = tempfile.mkstemp(suffix=".avi")[1]
self.assertEqual(self.postt.get_content_type(tf0), "application/octet-stream")
self.assertEqual(self.postt.get_content_type(tf1), "text/plain")
# *.rtf: py2.7 returns 'application/rtf'; py2.4 returns 'text/rtf'
self.assertEqual(self.postt.get_content_type(tf2).split("/")[1], "rtf")
self.assertTrue(self.postt.get_content_type(tf2) in ("application/rtf", "text/rtf"))
self.assertEqual(self.postt.get_content_type(tf3), "video/x-msvideo")
def test_add_file(self):
tf1 = tempfile.mkstemp()[1]
tf2 = tempfile.mkstemp()[1]
tf3 = open(tempfile.mkstemp()[1])
os.unlink(tf1)
self.assertRaises(OSError, self.postt.add_file, "file", tf1)
self.assertEqual(self.postt.add_file("file", tf2), None)
self.assertRaises(TypeError, self.postt.add_file, "file", tf3)
if __name__ == '__main__':
unittest.main()
| pombredanne/https-git.fedorahosted.org-git-kobo | tests/test_http.py | Python | lgpl-2.1 | 1,375 |
import unittest
from charm.toolbox.symcrypto import SymmetricCryptoAbstraction,AuthenticatedCryptoAbstraction, MessageAuthenticator
from charm.toolbox.pairinggroup import PairingGroup,GT
from charm.core.math.pairing import hashPair as sha1
class SymmetricCryptoAbstractionTest(unittest.TestCase):
def testAESCBC(self):
self.MsgtestAESCBC(b"hello world")
def testAESCBCLong(self):
self.MsgtestAESCBC(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def testAESCBC_Seperate(self):
self.MsgTestAESCBCSeperate(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def MsgtestAESCBC(self,msg):
groupObj = PairingGroup('SS512')
a = SymmetricCryptoAbstraction(sha1(groupObj.random(GT)))
ct = a.encrypt(msg)
dmsg = a.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
def MsgTestAESCBCSeperate(self,msg):
groupObj = PairingGroup('SS512')
ran = groupObj.random(GT)
a = SymmetricCryptoAbstraction(sha1(ran))
ct = a.encrypt(msg)
b = SymmetricCryptoAbstraction(sha1(ran))
dmsg = b.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
class AuthenticatedCryptoAbstractionTest(unittest.TestCase):
def testAESCBC(self):
self.MsgtestAESCBC(b"hello world")
def testAESCBCLong(self):
self.MsgtestAESCBC(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def testAESCBC_Seperate(self):
self.MsgTestAESCBCSeperate(b"Lots of people working in cryptography have no deep \
concern with real application issues. They are trying to discover things \
clever enough to write papers about -- Whitfield Diffie.")
def MsgtestAESCBC(self,msg):
groupObj = PairingGroup('SS512')
a = AuthenticatedCryptoAbstraction(sha1(groupObj.random(GT)))
ct = a.encrypt(msg)
dmsg = a.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
def MsgTestAESCBCSeperate(self,msg):
groupObj = PairingGroup('SS512')
ran = groupObj.random(GT)
a = AuthenticatedCryptoAbstraction(sha1(ran))
ct = a.encrypt(msg)
b = AuthenticatedCryptoAbstraction(sha1(ran))
dmsg = b.decrypt(ct);
assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg)
class MessageAuthenticatorTest(unittest.TestCase):
def testSelfVerify(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
assert m.verify(a), "expected message to verify";
def testSeperateVerify(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
assert m1.verify(a), "expected message to verify";
def testTamperData(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
a["msg"]= "tampered"
assert not m1.verify(a), "expected message to verify";
def testTamperMac(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
a["digest"]= "tampered"
assert not m1.verify(a), "expected message to verify";
def testTamperAlg(self):
key = sha1(PairingGroup('SS512').random(GT))
m = MessageAuthenticator(key)
a = m.mac('hello world')
m1 = MessageAuthenticator(key)
m1._algorithm = "alg" # bypassing the algorithm check to verify the mac is over the alg + data
a["alg"]= "alg"
assert not m1.verify(a), "expected message to verify";
if __name__ == "__main__":
unittest.main()
| lferr/charm | charm/test/toolbox/symcrypto_test.py | Python | lgpl-3.0 | 4,362 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-11 11:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('equinox_api', '0004_operation_description'),
]
operations = [
migrations.AddField(
model_name='application',
name='new_att',
field=models.BooleanField(default=True),
),
]
| ivanprjcts/equinox-spring16-API | equinox_spring16_api/equinox_api/migrations/0005_application_new_att.py | Python | lgpl-3.0 | 463 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from FastBitSet import FastBitSet
import math
import mmh3
class FastBloomFilter(object):
mask32 = 0xffffffff
mask64 = 0xffffffffffffffff
mask128 = 0xffffffffffffffffffffffffffffffff
seeds = [2, 3, 5, 7, 11,
13, 17, 19, 23, 29,
31, 37, 41, 43, 47,
53, 59, 61, 67, 71,
73, 79, 83, 89, 97,
101, 103, 107, 109, 113,
127, 131, 137, 139, 149,
151, 157, 163, 167, 173,
179, 181, 191, 193, 197,
199, 211, 223, 227, 229,
233, 239, 241, 251, 257,
263, 269, 271, 277, 281,
283, 293, 307, 311, 313,
317, 331, 337, 347, 349,
353, 359, 367, 373, 379,
383, 389, 397, 401, 409,
419, 421, 431, 433, 439,
443, 449, 457, 461, 463,
467, 479, 487, 491, 499,
503, 509, 521, 523, 541,
547, 557, 563, 569, 571,
577, 587, 593, 599, 601,
607, 613, 617, 619, 631,
641, 643, 647, 653, 659,
661, 673, 677, 683, 691]
def __init__(self, n, fpr=0.00001):
m = -1 * math.log(fpr, math.e) * n / math.pow(math.log(2, math.e), 2)
k = (m / n) * math.log(2, math.e)
self.n = int(math.ceil(n))
self.fpr = fpr
self.m = int(math.ceil(m))
self.k = int(k)
self.bsUnitSize = 64
self.bsCap = int(math.ceil(self.m / 64))
self.bitSet = FastBitSet(self.bsCap, self.bsUnitSize)
self.bitSetLength = self.bitSet.length
def append(self, s):
self.bitSet.setList(self.hashs(s, self.k))
def exists(self, s):
bites = self.bitSet.getList(self.hashs(s, self.k))
return not (0 in bites)
def remove(self, s):
self.bitSet.setList(self.hashs(s, self.k), False)
def clear(self):
self.bitSet.clear()
def hashs(self, s, k):
bitSetLength = self.bitSetLength
#mask = self.mask32
mask = self.mask128
seeds = self.seeds
hashs = []
for i in range(k):
#print(mmh3.hash64(s, seeds[i]))
#hashs.append((mmh3.hash(s, seeds[i]) & mask) % bitSetLength)
hashs.append((mmh3.hash128(s, seeds[i]) & mask) % bitSetLength)
return hashs
def hashs2(self, s, k):
bitSetLength = self.bitSetLength
mask = self.mask32
hashs = []
hash1 = mmh3.hash64(s, 0)
hash2 = mmh3.hash64(s, hash1)
for i in k:
hashs.append(((hash1 + i * hash2) % bitSetLength) & mask)
return hashs
| nealzhang/util | FastBloomFilter.py | Python | lgpl-3.0 | 2,828 |
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/Survey.xml'
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/bratton farm-2.0.xml'
# fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/test.xml'
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/Portsmouth Heights.xml'
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/FreeCAD_Document.xml'
# demo files
# http://www.landxml.org/webapps/LandXMLSamples.aspx
# http://landxml.org/schema/LandXML-2.0/samples/Carlson Software/corridor from CD3-2.0.xml
import geodat
import re
from geodat.say import say
import PySide
from PySide import QtGui
import FreeCADGui as Gui
import FreeCAD
class node():
def __init__(self,typ):
# print("erzuegen node,type ",typ)
self.typ=typ
self.params={}
self.content=[]
def getParam(self,param):
return self.params[param]
def getNodes(self,typ):
ns=[]
for c in self.content:
if c.typ==typ:
ns += [c]
return ns
def addContent(self,c):
self.content += [c]
def __str__(self):
return self.typ
def getiterator(self,typ):
rc=[]
for obj in self.content:
if obj.typ==typ:
rc += [obj]
rc += obj.getiterator(typ)
return rc
def parseParams(string):
params={}
s=string
while s!="":
res = re.search(r"(\S+)=\"([^\"]*)\"\s+(\S.*)", s)
if res != None:
assert len(res.groups())==3
k,v,s=res.group(1),res.group(2),res.group(3)
params[k]=v
continue
res = re.search(r"(\S+)=\"(.*)\"", s)
if res != None:
assert len(res.groups())==2
k,v,s=res.group(1),res.group(2),""
params[k]=v
continue
else:
raise Exception("parse Params Fehler:"+ s)
s=""
return params
def getData(fn,pb=None):
if pb==None:
pb=QtGui.QProgressBar()
pb.show()
stack=[0,0]*4
stackpointer=-1
objs=[]
say("Read data from cache file ...")
say(fn)
f=open(fn,"r")
content=f.readlines()
c2=[]
cl=len(content)
# FreeCAD File hack
if content[2].startswith(" FreeCAD Document, see http://www.freecadweb.org"):
content=content[4:]
cl=len(content)
say(cl)
i=-1
pb.setMaximum(cl)
while i<cl-1:
pb.setValue(i)
i += 1
line=content[i].strip()
j=0
while re.search(r">\s*$", line) == None and j<60:
i += 1
j += 1
line += content[i]
c2 +=[line]
line=''
content=c2
pb.setMaximum(len(content))
for lc,line in enumerate(content):
if "<TextureHexString>" in line:
say ("break A")
continue
pb.setValue(lc)
# if lc%100 == 0:
# say(lc)
# Gui.updateGui()
# if stackpointer != -1:
# print (res.groups())
# print (stackpointer)
# print ("\n-------------NEXT:")
# print(line)
# print ("--- PARSE IT------------------------")
if re.search(r"^\s*$",line):
continue
# ein satz
res = re.search(r"^\s*<(\S+)\s+([^<]*)/>\s*$", line)
if res != None:
# print ("complete! ",res.groups())
assert len(res.groups())==2
typ=res.group(1)
obj=node(typ)
paramstring=res.group(2)
obj.params=parseParams(paramstring)
objs += [obj]
if stackpointer != -1:
stack[stackpointer].content += [obj]
# print stack[stackpointer]
# for c in stack[stackpointer].content:
# print c,",",
# print
continue
res = re.search(r"^\s*<(\S+)\s+([^<]*)>\s*$", line)
if res != None:
# print ("!start! ",res.groups())
assert len(res.groups())==2
typ=res.group(1)
obj=node(typ)
paramstring=res.group(2)
obj.params=parseParams(paramstring)
objs += [obj]
if stackpointer != -1:
stack[stackpointer].content += [obj]
# for c in stack[stackpointer].content:
# print c,
stackpointer += 1
stack[stackpointer]=obj
continue
res = re.search(r"^\s*</([^<]*)>\s*$", line)
if res != None:
# print ("!ende---------STACKPOINTER down! ",res.groups())
assert len(res.groups())==1
stackpointer -= 1
continue
res = re.search(r"^\s*<([^<\s]*)>\s*$", line)
if res != None:
# print ("!simple start! ",res.groups())
assert len(res.groups())==1
typ=res.group(1)
obj=node(typ)
if stackpointer != -1:
stack[stackpointer].content += [obj]
stackpointer += 1
stack[stackpointer] = obj
continue
#auf und zu
res = re.search(r"^\s*<(\S+)\s*([^<]*)>(.*)</([^<]+)>\s*$", line)
if res != None:
# print ("!alles! ",res.groups())
assert len(res.groups())==4
typ=res.group(1)
obj=node(typ)
paramstring=res.group(2)
obj.params=parseParams(paramstring)
obj.text=res.group(3)
objs += [obj]
if stackpointer != -1:
stack[stackpointer].content += [obj]
# for c in stack[stackpointer].content:
# print c,
# stackpointer += 1
# stack[stackpointer]=obj
continue
raise Exception("unerwartet :" +line +":")
# x = re.findall('<([^<]*)>', line)
# for xl in x:
# print(xl)
# say("done getit--------")
FreeCAD.stackpointer=stackpointer
FreeCAD.stack=stack
FreeCAD.objs=objs
return stack[0]
if 0:
#----------------------------
# import landxml
pb=QtGui.QProgressBar()
pb.show()
# progressbar.setValue(0)
#import geodat.my_xmlparser
#reload (geodat.my_xmlparser)
from say import *
# tree=geodat.my_xmlparser.getData(fn)
tree=getData(fn)
# tree=FreeCAD.stack[0]
say("import done")
Gui.updateGui()
Ps={}
pnodes=tree.getiterator('P')
pb.setMaximum(len(pnodes))
for i,element in enumerate(pnodes):
pb.setValue(i)
# say((element.params,element.text))
_coords=element.text.split(' ')
Ps[element.params['id']]=FreeCAD.Vector(float(_coords[0]),float(_coords[1]),float(_coords[2]))
import Points
ptsa=Ps.values()
Points.show(Points.Points(ptsa))
App.activeDocument().recompute()
Gui.SendMsgToActiveView("ViewFit")
Gui.updateGui()
if 0:
for element in tree.getiterator('PntList3D')[:4]:
say((element.params,element.text))
say("Some Faces")
for element in tree.getiterator('F')[:4]:
say((element.params,element.text))
say("BREAKLINES")
for element in tree.getiterator('Breakline')[:3]:
# say((element.typ,element.params))
# say(element.content[0].text)
_coords=element.content[0].text.split(' ')
coords=np.array([float(a) for a in _coords])
coords=coords.reshape(len(_coords)/3,3)
pts=[FreeCAD.Vector(p) for p in coords]
Part.show(Part.makePolygon(pts))
App.ActiveDocument.ActiveObject.Label=element.params['desc']
Gui.updateGui()
for element in tree.getiterator('Boundary')[:10]:
say((element.typ,element.params))
# say("relations")
# for element in tree.getiterator('relation'):
# say(element.params)
1/0
col=[]
for element in tree.getiterator('F'):
say((element.params,element.text))
ixs=element.text.split(' ')
ptsf=[Ps[ix] for ix in ixs]
ptsf += [ptsf[0]]
col +=[Part.makePolygon(ptsf)]
Part.show(Part.Compound(col))
def showFace(rbf,rbf2,x,y,gridsize,shapeColor,bound):
import Draft
makeLoft=False
grids=gridsize
ws=[]
pts2=[]
xi, yi = np.linspace(np.min(x), np.max(x), grids), np.linspace(np.min(y), np.max(y), grids)
for ix in xi:
points=[]
for iy in yi:
iz=float(rbf(ix,iy))
#---------------------- special hacks #+#
if bound>0:
if iz > bound: iz = bound
if iz < -bound: iz = -bound
points.append(FreeCAD.Vector(iy,ix,iz))
if makeLoft:
w=Draft.makeWire(points,closed=False,face=False,support=None)
ws.append(w)
pts2.append(points)
if makeLoft:
ll=FreeCAD.activeDocument().addObject('Part::Loft','elevation')
ll.Sections=ws
ll.Ruled = True
ll.ViewObject.ShapeColor = shapeColor
ll.ViewObject.LineColor = (0.00,0.67,0.00)
for w in ws:
w.ViewObject.Visibility=False
ll.Label="Interpolation Gitter " + str(grids)
bs=Part.BSplineSurface()
bs.interpolate(pts2)
Part.show(bs.toShape())
import scipy.interpolate
def interpolate(x,y,z, gridsize,mode='thin_plate',rbfmode=True,shape=None):
mode=str(mode)
grids=gridsize
dx=np.max(x)-np.min(x)
dy=np.max(y)-np.min(y)
if dx>dy:
gridx=grids
gridy=int(round(dy/dx*grids))
else:
gridy=grids
gridx=int(round(dx/dy*grids))
if shape != None:
(gridy,gridx)=shape
xi, yi = np.linspace(np.min(x), np.max(x), gridx), np.linspace(np.min(y), np.max(y), gridy)
xi, yi = np.meshgrid(xi, yi)
if rbfmode:
rbf = scipy.interpolate.Rbf(x, y, z, function=mode)
rbf2 = scipy.interpolate.Rbf( y,x, z, function=mode)
else:
sayErr("interp2d nicht implementiert")
x=np.array(x)
y=np.array(y)
z=np.array(z)
xi, yi = np.linspace(np.min(x), np.max(x), gridx), np.linspace(np.min(y), np.max(y), gridy)
rbf = scipy.interpolate.interp2d(x, y, z, kind=mode)
rbf2 = scipy.interpolate.interp2d(y, x, z, kind=mode)
zi=rbf2(yi,xi)
return [rbf,xi,yi,zi]
def createsurface(pts,mode='thin_plate',rbfmode=True,gridCount=20,zfactor=1,bound=10**5,matplot=False):
modeColor={
'linear' : ( 1.0, 0.3, 0.0),
'thin_plate' : (0.0, 1.0, 0.0),
'cubic' : (0.0, 1.0, 1.0),
'inverse' : (1.0, 1.0, 0.0),
'multiquadric' : (1.0, .0, 1.0),
'gaussian' : (1.0, 1.0, 1.0),
'quintic' :(0.5,1.0, 0.0)
}
x=[v[1] for v in pts]
y=[v[0] for v in pts]
z=[zfactor*v[2] for v in pts]
x=np.array(x)
y=np.array(y)
z=np.array(z)
gridsize=gridCount
rbf,xi,yi,zi1 = interpolate(x,y,z, gridsize,mode,rbfmode)
# hilfsebene
xe=[100,-100,100,-100]
ye=[100,100,-100,-100]
ze=[20,10,20,5]
rbf2,xi2,yi2,zi2 = interpolate(xe,ye,ze, gridsize,mode,rbfmode,zi1.shape)
zi=zi1
color=(1.0,0.0,0.0)
showFace(rbf,rbf2,x,y,gridsize,color,bound)
App.ActiveDocument.ActiveObject.Label=mode + " ZFaktor " + str(zfactor) + " #"
rc=App.ActiveDocument.ActiveObject
if 0:
createsurface(ptsa,mode='linear')
if 0:
pn=ptsa[000:2000]
Points.show(Points.Points(pn))
createsurface(pn,mode='linear')
| microelly2/geodata | geodat/my_xmlparser.py | Python | lgpl-3.0 | 9,730 |
# coding=utf-8
"""
CERMMorse : test_config
5/7/2017 : 11:32 PM
Author : James L. Key
"""
from unittest import TestCase
from readconfig import Config
__author__ = 'James L. Key'
__project__ = 'CERMMorse'
class TestConfig(TestCase):
def setUp(self):
self.conf = Config(configpath='../data/config.json')
self.conf.getconfig()
def evalcolor(self):
color = self.conf.Color
r = color[0]
g = color[1]
b = color[2]
if (r not in range(0, 2)) | (g not in range(0, 2)) | (b not in range(0, 2)):
return False
else:
return True
def test_getconfig(self):
self.assertIsInstance(self.conf.LCDPin1, int, 'Config LCDPin1 is not an Integer!!')
self.assertIn(self.conf.LCDPin1, range(0, 4), 'Config LCDPin1 is not in I2C Range!!')
self.assertIsInstance(self.conf.LCDPin2, int, 'Config LCDPin2 is not an Integer!!')
self.assertIn(self.conf.LCDPin2, range(0, 4), 'Config LCDPin1 is not in I2C Range!!')
self.assertIsInstance(self.conf.RelayPin, int, 'Config RelayPin is not an Integer!!')
self.assertIn(self.conf.RelayPin, range(0, 27), 'Config LCDPin1 is not in GPIO Range!!')
self.assertIsInstance(self.conf.MotionDetPin, int, 'Config MotionDetPin is not an Integer!!')
self.assertIn(self.conf.MotionDetPin, range(0, 27), 'Config LCDPin1 is not in GPIO Range!!')
self.assertIsInstance(self.conf.WPM, int, 'Config WPM is not an Integer!!')
self.assertGreaterEqual(self.conf.WPM, 1, 'Config WPM is not Greater than 1!!')
self.assertIsInstance(self.conf.MaxWPM, int, 'Config MaxWPM is not an Integer!!')
self.assertGreaterEqual(self.conf.MaxWPM, self.conf.WPM, 'Config MaxWPM is not Greater or Equal to WPM!!')
self.assertLess(self.conf.MaxWPM, 31, 'Config MaxWPM is Greater than 30WPM -- Seriously? !!')
self.assertIsInstance(self.conf.SpeedAdjust, bool, 'Config SpeedAdjust is not Boolean!!')
self.assertIsInstance(self.conf._Colorstr, str, 'Config Stored Color String is not a String!!')
self.assertTrue(self.evalcolor(),
'Parsed Color is not valid - value of number is not (0 or 1) and in form (#, #, #)')
self.assertIsInstance(self.conf.ParagraphSep, str, 'Config ParagraphSep is not a String!!')
| jameslkey/CERM-Morse-Code-Exhibit | tests/test_config.py | Python | lgpl-3.0 | 2,346 |
from openerp import models, api, _
from openerp.exceptions import UserError
class ExportMoveExport(models.TransientModel):
_name = 'export.move.export'
_description = 'Export Moves'
@api.multi
def create_export_file(self):
context = dict(self._context or {})
moves = self.env['export.move'].browse(context.get('active_ids'))
export_to_create = self.env['export.move']
for move in moves:
if move.state == 'created':
export_to_create += move
if not export_to_create:
raise UserError(_('There is no posted move item to create a Export-File.'))
export_to_create.action_create_export_file()
return {'type': 'ir.actions.act_window_close'}
| mncoon/odoo-addons | syscoon_finance_export/wizard/move_export.py | Python | lgpl-3.0 | 748 |
__author__ = 'beau'
import serial
ser = serial.Serial('/dev/tty.usbmodem1422',9600, timeout=1)
import time
import sys
def get_angle():
print "requesting angle"
start = time.time()
ser.write('nofddfp\n')
print "..."
reply = ser.readline()
deltaT = time.time()-start
print reply
print "round-trip time {}".format(deltaT)
sys.stdout.flush()
while True:
get_angle()
print "-------------"
#time.sleep(0.1) | B3AU/micropython | robocam/main.py | Python | lgpl-3.0 | 452 |
#!env python
# Copyright 2008 Simon Edwards <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import re
import os.path
import glob
import kbindinggenerator.cmakeparser as cmakeparser
def ExtractInstallFiles(filename=None,input=None,variables=None):
if variables is None:
variables = {}
else:
variables = variables.copy()
install_list = []
if filename is not None:
variables['cmake_current_source_dir'] = [os.path.dirname(filename)]
ExtractInstallFilesWithContext(variables, install_list, filename,input)
# print(repr(variables))
# print(repr(install_list))
return install_list
def ExtractInstallFilesWithContext(variables, install_list, filename=None, input=None, fileprefix=""):
inputstring = ""
currentdir = ""
if input:
inputstring = input
elif filename:
currentdir = os.path.dirname(filename)
fhandle = open(filename)
inputstring= fhandle.read()
fhandle.close()
parser = cmakeparser.CMakeParser()
command_list = parser.parse(inputstring, filename)
include_dirs = []
for commandobject in command_list:
command = commandobject.command().lower()
args = [arg.value() for arg in commandobject.arguments()]
if command=="set":
variables[args[0].lower()] = ExpandArgs(variables, args[1:], filename)
elif command=="install":
install_args = ExpandArgs(variables, args, filename)
for arg in install_args:
if arg.endswith('.h'):
for basepath in [currentdir, fileprefix] + include_dirs:
fullpath = os.path.join(basepath, arg)
# print(fullpath)
if os.path.exists(fullpath):
install_list.append(fullpath)
break
else:
fullpath = os.path.join(currentdir, basepath, arg)
if os.path.exists(fullpath):
install_list.append(fullpath)
break
else:
print("Unable to find header file " + arg)
elif command=="include":
if filename is not None:
command_args = ExpandArgs(variables, args, filename)
this_dir = os.path.dirname(filename)
for arg in command_args:
if len(arg.strip())!=0:
include_filename = os.path.join(this_dir,arg)
if os.path.exists(include_filename):
ExtractInstallFilesWithContext(variables, install_list, include_filename)
elif command=="add_subdirectory":
if filename is not None:
command_args = ExpandArgs(variables, args, filename)
this_dir = os.path.dirname(filename)
for arg in command_args:
if len(arg.strip())!=0:
include_filename = os.path.join(this_dir,arg,"CMakeLists.txt")
if os.path.exists(include_filename):
ExtractInstallFilesWithContext(variables, install_list, include_filename, fileprefix=os.path.join(fileprefix,arg))
elif command=="file":
# This is just a basic cmake FILE() implementation. It just does GLOB.
command_args = ExpandArgs(variables, args, filename)
varname = None
result = None
try:
it = iter(command_args)
arg = it.__next__()
if arg.lower()=='glob' and filename is not None:
arg = it.next()
varname = arg
arg = it.next()
relative_dir = os.path.dirname(filename)
if arg.lower()=='relative':
arg = it.next()
relative_dir = arg
arg = it.next()
if not relative_dir.endswith('/'):
relative_dir += '/'
result = []
current_dir = variables['cmake_current_source_dir'][0]
while True:
for x in glob.iglob(os.path.join(current_dir, arg)):
if x.startswith(relative_dir):
x = x[len(relative_dir):]
result.append(x)
arg = it.next()
except StopIteration:
if varname is not None and result is not None:
variables[varname.lower()] = result
elif command=="ecm_generate_headers":
header_args = ExpandArgs(variables, args, filename)
# print("ecm_generate_headers:"+repr(header_args))
prefix=""
if "RELATIVE" in header_args:
prefix = header_args[header_args.index("RELATIVE")+1]
for item in header_args:
if item == "REQUIRED_HEADERS" or item == "RELATIVE":
break
headername = os.path.join(currentdir, prefix, item.lower() + ".h")
if os.path.exists(headername):
install_list.append(headername)
elif command == "target_include_directories":
include_args = ExpandArgs(variables, args, filename)
if "PUBLIC" in include_args:
for item in include_args[include_args.index("PUBLIC")+1:]:
include_dirs.append(item)
#print("include dirs:",repr(include_dirs))
def ExpandArgs(variables, args, filename=None):
rex = re.compile(r'(\$\{[^\}]+\})')
fixed_args = []
for arg in args:
fixed_parts = []
if arg.startswith("$<BUILD_INTERFACE:"):
arg = arg[len("$<BUILD_INTERFACE:"): -1]
parts = rex.split(arg)
for part in parts:
if part.startswith("${"):
name = part[2:-1].lower()
if name in variables:
value = variables[name]
if len(value)==1:
fixed_parts.append(variables[name][0])
else:
fixed_args.extend(value)
else:
print("Undefined cmake variable '" + name + "' in " + filename)
else:
fixed_parts.append(part)
fixed_args.append(''.join(fixed_parts))
return fixed_args
def __FetchCommands(lexer):
topmode = True
command_list = []
command = None
args = []
tok = lexer.token()
while 1:
if not tok:
if command:
command_list.append( (command,args) )
break # No more input
if topmode:
if tok.type=="COMMAND":
command = tok.value
topmode = False
else:
print("Fail")
# Fail
tok = lexer.token()
else:
# Grab arguments
if tok.type=="COMMAND":
if command:
command_list.append( (command,args) )
command = None
args = []
topmode = True
continue
args.append(tok.value)
tok = lexer.token()
return command_list
if __name__=="__main__":
#print("Testing")
#lexer = cmakelexer.CMakeLexer()
print(ExtractInstallFiles(filename="/home/sbe/devel/svn/kde/trunk/KDE/kdeedu/marble/src/lib/CMakeLists.txt"))
def foo():
ExtractInstallFiles(input="""
find_package(KDE4 REQUIRED)
include (KDE4Defaults)
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${KDEBASE_WORKSPACE_SOURCE_DIR}/libs ${CMAKE_CURRENT_SOURCE_DIR}/.. ${KDE4_INCLUDES} ${OPENGL_INCLUDE_DIR})
add_subdirectory(tests)
add_definitions(-DKDE_DEFAULT_DEBUG_AREA=1209)
########### next target ###############
set(plasmagik_SRCS
packagemetadata.cpp
packagestructure.cpp
package.cpp
)
set(plasma_LIB_SRCS
${plasmagik_SRCS}
abstractrunner.cpp
animationdriver.cpp
animator.cpp
applet.cpp
appletbrowser.cpp
appletbrowser/customdragtreeview.cpp
appletbrowser/kcategorizeditemsview.cpp
appletbrowser/kcategorizeditemsviewdelegate.cpp
appletbrowser/kcategorizeditemsviewmodels.cpp
appletbrowser/openwidgetassistant.cpp
appletbrowser/plasmaappletitemmodel.cpp
configxml.cpp
containment.cpp
corona.cpp
datacontainer.cpp
dataengine.cpp
dataenginemanager.cpp
delegate.cpp
dialog.cpp
extender.cpp
extenderitem.cpp
paintutils.cpp
panelsvg.cpp
plasma.cpp
popupapplet.cpp
private/applethandle.cpp
private/datacontainer_p.cpp
private/desktoptoolbox.cpp
private/nativetabbar.cpp
private/packages.cpp
private/paneltoolbox.cpp
private/toolbox.cpp
private/tooltip.cpp
querymatch.cpp
runnercontext.cpp
runnermanager.cpp
scripting/appletscript.cpp
scripting/dataenginescript.cpp
scripting/runnerscript.cpp
scripting/scriptengine.cpp
service.cpp
servicejob.cpp
svg.cpp
theme.cpp
tooltipmanager.cpp
uiloader.cpp
version.cpp
view.cpp
wallpaper.cpp
widgets/checkbox.cpp
widgets/combobox.cpp
widgets/flash.cpp
widgets/frame.cpp
widgets/groupbox.cpp
widgets/icon.cpp
widgets/label.cpp
widgets/lineedit.cpp
widgets/meter.cpp
widgets/pushbutton.cpp
widgets/radiobutton.cpp
widgets/signalplotter.cpp
widgets/slider.cpp
widgets/tabbar.cpp
widgets/textedit.cpp
widgets/webcontent.cpp
)
kde4_add_ui_files (
plasma_LIB_SRCS
appletbrowser/kcategorizeditemsviewbase.ui
)
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
MESSAGE(STATUS "Adding support for OpenGL applets to libplasma")
set(plasma_LIB_SRCS
${plasma_LIB_SRCS}
glapplet.cpp)
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
kde4_add_library(plasma SHARED ${plasma_LIB_SRCS})
target_link_libraries(plasma ${KDE4_KIO_LIBS} ${KDE4_KFILE_LIBS} ${KDE4_KNEWSTUFF2_LIBS}
${QT_QTUITOOLS_LIBRARY} ${QT_QTWEBKIT_LIBRARY}
${KDE4_THREADWEAVER_LIBRARIES} ${KDE4_SOLID_LIBS} ${X11_LIBRARIES})
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
target_link_libraries(plasma ${QT_QTOPENGL_LIBRARY} ${OPENGL_gl_LIBRARY})
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
set_target_properties(plasma PROPERTIES
VERSION 3.0.0
SOVERSION 3
${KDE4_DISABLE_PROPERTY_}LINK_INTERFACE_LIBRARIES "${KDE4_KDEUI_LIBS}"
)
install(TARGETS plasma ${INSTALL_TARGETS_DEFAULT_ARGS})
########### install files ###############
set(plasmagik_HEADERS
packagemetadata.h
packagestructure.h
package.h
)
install(FILES ${plasmagik_HEADERS} DESTINATION ${INCLUDE_INSTALL_DIR}/plasma/ COMPONENT Devel)
set(plasma_LIB_INCLUDES
abstractrunner.h
animationdriver.h
animator.h
applet.h
appletbrowser.h
configxml.h
containment.h
corona.h
datacontainer.h
dataengine.h
dataenginemanager.h
delegate.h
dialog.h
extender.h
extenderitem.h
paintutils.h
panelsvg.h
plasma.h
plasma_export.h
popupapplet.h
querymatch.h
runnercontext.h
runnermanager.h
service.h
servicejob.h
svg.h
theme.h
tooltipmanager.h
uiloader.h
tooltipmanager.h
version.h
view.h
wallpaper.h)
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
set(plasma_LIB_INCLUDES
${plasma_LIB_INCLUDES}
glapplet.h)
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
install(FILES
${plasma_LIB_INCLUDES}
DESTINATION ${INCLUDE_INSTALL_DIR}/plasma COMPONENT Devel)
install(FILES
widgets/checkbox.h
widgets/combobox.h
widgets/flash.h
widgets/frame.h
widgets/groupbox.h
widgets/icon.h
widgets/label.h
widgets/lineedit.h
widgets/meter.h
widgets/pushbutton.h
widgets/radiobutton.h
widgets/signalplotter.h
widgets/slider.h
widgets/tabbar.h
widgets/textedit.h
widgets/webcontent.h
DESTINATION ${INCLUDE_INSTALL_DIR}/plasma/widgets COMPONENT Devel)
install(FILES
scripting/appletscript.h
scripting/dataenginescript.h
scripting/runnerscript.h
scripting/scriptengine.h
DESTINATION ${INCLUDE_INSTALL_DIR}/plasma/scripting COMPONENT Devel)
install(FILES
includes/AbstractRunner
includes/AnimationDriver
includes/Animator
includes/Applet
includes/AppletBrowser
includes/AppletScript
includes/CheckBox
includes/ComboBox
includes/ConfigXml
includes/Containment
includes/Corona
includes/DataContainer
includes/DataEngine
includes/DataEngineManager
includes/DataEngineScript
includes/Delegate
includes/Dialog
includes/Extender
includes/ExtenderItem
includes/Flash
includes/GroupBox
includes/Icon
includes/Label
includes/LineEdit
includes/Meter
includes/Package
includes/PackageMetadata
includes/PackageStructure
includes/PaintUtils
includes/PanelSvg
includes/Plasma
includes/PopupApplet
includes/PushButton
includes/QueryMatch
includes/RadioButton
includes/RunnerContext
includes/RunnerManager
includes/RunnerScript
includes/ScriptEngine
includes/Service
includes/ServiceJob
includes/SignalPlotter
includes/Slider
includes/Svg
includes/TabBar
includes/TextEdit
includes/ToolTipManager
includes/Theme
includes/UiLoader
includes/View
includes/Version
includes/Wallpaper
includes/WebContent
DESTINATION ${INCLUDE_INSTALL_DIR}/KDE/Plasma COMPONENT Devel)
if(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
install(FILES
includes/GLApplet
DESTINATION ${INCLUDE_INSTALL_DIR}/KDE/Plasma COMPONENT Devel)
endif(QT_QTOPENGL_FOUND AND OPENGL_FOUND)
install(FILES
servicetypes/plasma-animator.desktop
servicetypes/plasma-applet.desktop
servicetypes/plasma-containment.desktop
servicetypes/plasma-dataengine.desktop
servicetypes/plasma-packagestructure.desktop
servicetypes/plasma-runner.desktop
servicetypes/plasma-scriptengine.desktop
servicetypes/plasma-wallpaper.desktop
DESTINATION ${SERVICETYPES_INSTALL_DIR})
install(FILES scripting/plasmoids.knsrc DESTINATION ${CONFIG_INSTALL_DIR})
""")
# Tokenize
#while 1:
# tok = lexer.token()
# if not tok: break # No more input
# print tok
#while 1:
# tok = cmakelexer.lex.token()
# if not tok: break # No more input
# print tok
| KDE/twine2 | kbindinggenerator/cmake.py | Python | lgpl-3.0 | 15,357 |
import sys, os
from pythonparser import diagnostic
from ...language.environment import ProcessArgumentManager
from ...master.databases import DeviceDB, DatasetDB
from ...master.worker_db import DeviceManager, DatasetManager
from ..module import Module
from ..embedding import Stitcher
from ..targets import OR1KTarget
from . import benchmark
def main():
if not len(sys.argv) == 2:
print("Expected exactly one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
with open(sys.argv[1]) as f:
testcase_code = compile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench'}
exec(testcase_code, testcase_vars)
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
device_mgr = DeviceManager(DeviceDB(device_db_path))
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
argument_mgr = ProcessArgumentManager({})
def embed():
experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))
stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
stitcher.stitch_call(experiment.run, (), {})
stitcher.finalize()
return stitcher
stitcher = embed()
module = Module(stitcher)
target = OR1KTarget()
llvm_ir = target.compile(module)
elf_obj = target.assemble(llvm_ir)
elf_shlib = target.link([elf_obj])
benchmark(lambda: embed(),
"ARTIQ embedding")
benchmark(lambda: Module(stitcher),
"ARTIQ transforms and validators")
benchmark(lambda: target.compile(module),
"LLVM optimizations")
benchmark(lambda: target.assemble(llvm_ir),
"LLVM machine code emission")
benchmark(lambda: target.link([elf_obj]),
"Linking")
benchmark(lambda: target.strip(elf_shlib),
"Stripping debug information")
if __name__ == "__main__":
main()
| JQIamo/artiq | artiq/compiler/testbench/perf_embedding.py | Python | lgpl-3.0 | 2,231 |
from forgery_py import *
for x in range(20):
randPerson=name.first_name(),name.last_name(),personal.gender(),name.location(),address.phone()
randCV=lorem_ipsum.title(),lorem_ipsum.sentence()
randAddr=address.city(),address.state(),address.country(),address.continent()
randEmail=internet.email_address()
randColor=basic.hex_color()
randComment=basic.text(200)
randDate=date.date()
print("name: {}\n gender: {}\n home: {}\n phone: {}\n email: {}".
format(randPerson[:2],randPerson[2],randPerson[3],randPerson[4],randEmail))
print(f" CV: {randCV}")
print(f" favourite color: {randColor}")
print(f" comment: {randComment}")
print("handout date: {:#^50s}".format(str(randDate))) | WZQ1397/automatic-repo | python/modules/ForgeryPyGenerateVirtualData.py | Python | lgpl-3.0 | 706 |
from twisted.web import server, resource
from Tribler.Core.Modules.restapi.util import convert_db_channel_to_json, convert_search_torrent_to_json, \
fix_unicode_dict
from Tribler.Core.simpledefs import (NTFY_CHANNELCAST, SIGNAL_CHANNEL, SIGNAL_ON_SEARCH_RESULTS, SIGNAL_TORRENT,
NTFY_UPGRADER, NTFY_STARTED, NTFY_WATCH_FOLDER_CORRUPT_TORRENT, NTFY_INSERT,
NTFY_NEW_VERSION, NTFY_FINISHED, NTFY_TRIBLER, NTFY_UPGRADER_TICK, NTFY_CHANNEL,
NTFY_DISCOVERED, NTFY_TORRENT, NTFY_ERROR, NTFY_DELETE, NTFY_MARKET_ON_ASK,
NTFY_UPDATE, NTFY_MARKET_ON_BID, NTFY_MARKET_ON_TRANSACTION_COMPLETE,
NTFY_MARKET_ON_ASK_TIMEOUT, NTFY_MARKET_ON_BID_TIMEOUT,
NTFY_MARKET_ON_PAYMENT_RECEIVED, NTFY_MARKET_ON_PAYMENT_SENT,
SIGNAL_RESOURCE_CHECK, SIGNAL_LOW_SPACE, NTFY_CREDIT_MINING, STATE_SHUTDOWN)
import Tribler.Core.Utilities.json_util as json
from Tribler.Core.version import version_id
class EventsEndpoint(resource.Resource):
"""
Important events in Tribler are returned over the events endpoint. This connection is held open. Each event is
pushed over this endpoint in the form of a JSON dictionary. Each JSON dictionary contains a type field that
indicates the type of the event. Individual events are separated by a newline character (\n).
Currently, the following events are implemented:
- events_start: An indication that the event socket is opened and that the server is ready to push events. This
includes information about whether Tribler has started already or not and the version of Tribler used.
- search_result_channel: This event dictionary contains a search result with a channel that has been found.
- search_result_torrent: This event dictionary contains a search result with a torrent that has been found.
- upgrader_started: An indication that the Tribler upgrader has started.
- upgrader_finished: An indication that the Tribler upgrader has finished.
- upgrader_tick: An indication that the state of the upgrader has changed. The dictionary contains a human-readable
string with the new state.
- watch_folder_corrupt_torrent: This event is emitted when a corrupt .torrent file in the watch folder is found.
The dictionary contains the name of the corrupt torrent file.
- new_version_available: This event is emitted when a new version of Tribler is available.
- tribler_started: An indicator that Tribler has completed the startup procedure and is ready to use.
- channel_discovered: An indicator that Tribler has discovered a new channel. The event contains the name,
description and dispersy community id of the discovered channel.
- torrent_discovered: An indicator that Tribler has discovered a new torrent. The event contains the infohash, name,
list of trackers, list of files with name and size, and the dispersy community id of the discovered torrent.
- torrent_removed_from_channel: An indicator that a torrent has been removed from a channel. The event contains
the infohash and the dispersy id of the channel which contained the removed torrent.
- torrent_finished: A specific torrent has finished downloading. The event includes the infohash and name of the
torrent that has finished downloading.
- torrent_error: An error has occurred during the download process of a specific torrent. The event includes the
infohash and a readable string of the error message.
- tribler_exception: An exception has occurred in Tribler. The event includes a readable string of the error.
- market_ask: Tribler learned about a new ask in the market. The event includes information about the ask.
- market_bid: Tribler learned about a new bid in the market. The event includes information about the bid.
- market_ask_timeout: An ask has expired. The event includes information about the ask.
- market_bid_timeout: An bid has expired. The event includes information about the bid.
- market_transaction_complete: A transaction has been completed in the market. The event contains the transaction
that was completed.
- market_payment_received: We received a payment in the market. The events contains the payment information.
- market_payment_sent: We sent a payment in the market. The events contains the payment information.
- market_iom_input_required: The Internet-of-Money modules requires user input (like a password or challenge
response).
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
self.events_requests = []
self.infohashes_sent = set()
self.channel_cids_sent = set()
self.session.add_observer(self.on_search_results_channels, SIGNAL_CHANNEL, [SIGNAL_ON_SEARCH_RESULTS])
self.session.add_observer(self.on_search_results_torrents, SIGNAL_TORRENT, [SIGNAL_ON_SEARCH_RESULTS])
self.session.add_observer(self.on_upgrader_started, NTFY_UPGRADER, [NTFY_STARTED])
self.session.add_observer(self.on_upgrader_finished, NTFY_UPGRADER, [NTFY_FINISHED])
self.session.add_observer(self.on_upgrader_tick, NTFY_UPGRADER_TICK, [NTFY_STARTED])
self.session.add_observer(self.on_watch_folder_corrupt_torrent,
NTFY_WATCH_FOLDER_CORRUPT_TORRENT, [NTFY_INSERT])
self.session.add_observer(self.on_new_version_available, NTFY_NEW_VERSION, [NTFY_INSERT])
self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER, [NTFY_STARTED])
self.session.add_observer(self.on_channel_discovered, NTFY_CHANNEL, [NTFY_DISCOVERED])
self.session.add_observer(self.on_torrent_discovered, NTFY_TORRENT, [NTFY_DISCOVERED])
self.session.add_observer(self.on_torrent_removed_from_channel, NTFY_TORRENT, [NTFY_DELETE])
self.session.add_observer(self.on_torrent_finished, NTFY_TORRENT, [NTFY_FINISHED])
self.session.add_observer(self.on_torrent_error, NTFY_TORRENT, [NTFY_ERROR])
self.session.add_observer(self.on_market_ask, NTFY_MARKET_ON_ASK, [NTFY_UPDATE])
self.session.add_observer(self.on_market_bid, NTFY_MARKET_ON_BID, [NTFY_UPDATE])
self.session.add_observer(self.on_market_ask_timeout, NTFY_MARKET_ON_ASK_TIMEOUT, [NTFY_UPDATE])
self.session.add_observer(self.on_market_bid_timeout, NTFY_MARKET_ON_BID_TIMEOUT, [NTFY_UPDATE])
self.session.add_observer(self.on_market_transaction_complete,
NTFY_MARKET_ON_TRANSACTION_COMPLETE, [NTFY_UPDATE])
self.session.add_observer(self.on_market_payment_received, NTFY_MARKET_ON_PAYMENT_RECEIVED, [NTFY_UPDATE])
self.session.add_observer(self.on_market_payment_sent, NTFY_MARKET_ON_PAYMENT_SENT, [NTFY_UPDATE])
self.session.add_observer(self.on_resource_event, SIGNAL_RESOURCE_CHECK, [SIGNAL_LOW_SPACE])
self.session.add_observer(self.on_credit_minig_error, NTFY_CREDIT_MINING, [NTFY_ERROR])
self.session.add_observer(self.on_shutdown, NTFY_TRIBLER, [STATE_SHUTDOWN])
def write_data(self, message):
"""
Write data over the event socket if it's open.
"""
try:
message_str = json.dumps(message)
except UnicodeDecodeError:
# The message contains invalid characters; fix them
message_str = json.dumps(fix_unicode_dict(message))
if len(self.events_requests) == 0:
return
else:
[request.write(message_str + '\n') for request in self.events_requests]
def start_new_query(self):
self.infohashes_sent = set()
self.channel_cids_sent = set()
def on_search_results_channels(self, subject, changetype, objectID, results):
"""
Returns the channel search results over the events endpoint.
"""
query = ' '.join(results['keywords'])
for channel in results['result_list']:
channel_json = convert_db_channel_to_json(channel, include_rel_score=True)
if self.session.config.get_family_filter_enabled() and \
self.session.lm.category.xxx_filter.isXXX(channel_json['name']):
continue
if channel_json['dispersy_cid'] not in self.channel_cids_sent:
self.write_data({"type": "search_result_channel", "event": {"query": query, "result": channel_json}})
self.channel_cids_sent.add(channel_json['dispersy_cid'])
def on_search_results_torrents(self, subject, changetype, objectID, results):
"""
Returns the torrent search results over the events endpoint.
"""
query = ' '.join(results['keywords'])
for torrent in results['result_list']:
torrent_json = convert_search_torrent_to_json(torrent)
torrent_name = torrent_json['name']
torrent_json['relevance_score'] = torrent_json['relevance_score'] if 'relevance_score' in torrent_json \
else self.session.lm.torrent_db.relevance_score_remote_torrent(torrent_name)
if self.session.config.get_family_filter_enabled() and torrent_json['category'] == 'xxx':
continue
if 'infohash' in torrent_json and torrent_json['infohash'] not in self.infohashes_sent:
self.write_data({"type": "search_result_torrent", "event": {"query": query, "result": torrent_json}})
self.infohashes_sent.add(torrent_json['infohash'])
def on_upgrader_started(self, subject, changetype, objectID, *args):
self.write_data({"type": "upgrader_started"})
def on_upgrader_finished(self, subject, changetype, objectID, *args):
self.write_data({"type": "upgrader_finished"})
def on_upgrader_tick(self, subject, changetype, objectID, *args):
self.write_data({"type": "upgrader_tick", "event": {"text": args[0]}})
def on_watch_folder_corrupt_torrent(self, subject, changetype, objectID, *args):
self.write_data({"type": "watch_folder_corrupt_torrent", "event": {"name": args[0]}})
def on_new_version_available(self, subject, changetype, objectID, *args):
self.write_data({"type": "new_version_available", "event": {"version": args[0]}})
def on_tribler_started(self, subject, changetype, objectID, *args):
self.write_data({"type": "tribler_started"})
def on_channel_discovered(self, subject, changetype, objectID, *args):
self.write_data({"type": "channel_discovered", "event": args[0]})
def on_torrent_discovered(self, subject, changetype, objectID, *args):
self.write_data({"type": "torrent_discovered", "event": args[0]})
def on_torrent_removed_from_channel(self, subject, changetype, objectID, *args):
self.write_data({"type": "torrent_removed_from_channel", "event": args[0]})
def on_torrent_finished(self, subject, changetype, objectID, *args):
self.write_data({"type": "torrent_finished", "event": {"infohash": objectID.encode('hex'), "name": args[0]}})
def on_torrent_error(self, subject, changetype, objectID, *args):
self.write_data({"type": "torrent_error", "event": {"infohash": objectID.encode('hex'), "error": args[0]}})
def on_tribler_exception(self, exception_text):
self.write_data({"type": "tribler_exception", "event": {"text": exception_text}})
def on_market_ask(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_ask", "event": args[0]})
def on_market_bid(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_bid", "event": args[0]})
def on_market_ask_timeout(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_ask_timeout", "event": args[0]})
def on_market_bid_timeout(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_bid_timeout", "event": args[0]})
def on_market_transaction_complete(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_transaction_complete", "event": args[0]})
def on_market_payment_received(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_payment_received", "event": args[0]})
def on_market_payment_sent(self, subject, changetype, objectID, *args):
self.write_data({"type": "market_payment_sent", "event": args[0]})
def on_resource_event(self, subject, changetype, objectID, *args):
self.write_data({"type": changetype, "event": args[0]})
def on_credit_minig_error(self, subject, changetype, ojbectID, *args):
self.write_data({"type": "credit_mining_error", "event": args[0]})
def on_shutdown(self, subject, changetype, objectID, *args):
self.write_data({"type": "shutdown", "event": args[0]})
def render_GET(self, request):
"""
.. http:get:: /events
A GET request to this endpoint will open the event connection.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/events
"""
def on_request_finished(_):
self.events_requests.remove(request)
self.events_requests.append(request)
request.notifyFinish().addCallbacks(on_request_finished, on_request_finished)
request.write(json.dumps({"type": "events_start", "event": {
"tribler_started": self.session.lm.initComplete, "version": version_id}}) + '\n')
return server.NOT_DONE_YET
| Captain-Coder/tribler | Tribler/Core/Modules/restapi/events_endpoint.py | Python | lgpl-3.0 | 13,739 |
"""Connectors"""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import abc as _abc
import re as _re
from ... import plain as _plain
from .. import _std as _std_http
_BASIC_USER_TOKENS = ('user', 'password')
class HttpBasicClerk(_std_http.HttpStandardClerk):
"""An authentication clerk for HTTP Basic authentication"""
__metaclass__ = _abc.ABCMeta
_BASIC_USER_TOKENS = _BASIC_USER_TOKENS
def _inputs(self, upstream_affordances, downstream_affordances):
return ((),)
def _append_response_auth_challenge(self, realm, input=None,
affordances=None):
self._append_response_auth_challenge_header('Basic realm="{}"'
.format(realm))
def _outputs(self, upstream_affordances, downstream_affordances):
return (_BASIC_USER_TOKENS,)
def _provisionsets(self, upstream_affordances, downstream_affordances):
return (_plain.PlainAuth.PROVISIONS,)
class HttpBasicScanner(_std_http.HttpStandardScanner):
"""An authentication scanner for HTTP Basic authentication"""
__metaclass__ = _abc.ABCMeta
_AUTHORIZATION_HEADER_RE = \
_re.compile(r'\s*Basic\s*(?P<creds_base64>[^\s]*)')
_BASIC_USER_TOKENS = _BASIC_USER_TOKENS
def _outputs(self, upstream_affordances, downstream_affordances):
return (self._BASIC_USER_TOKENS,)
def _provisionsets(self, upstream_affordances, downstream_affordances):
return (_plain.PlainAuth.PROVISIONS,)
| nisavid/bedframe | bedframe/auth/http/_basic/_connectors.py | Python | lgpl-3.0 | 1,567 |
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :func:`iris.analysis.maths.add` function."""
from __future__ import (absolute_import, division, print_function)
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import operator
from iris.analysis.maths import add
from iris.tests.unit.analysis.maths import \
CubeArithmeticBroadcastingTestMixin, CubeArithmeticMaskingTestMixin
class TestBroadcasting(tests.IrisTest, CubeArithmeticBroadcastingTestMixin):
@property
def data_op(self):
return operator.add
@property
def cube_func(self):
return add
class TestMasking(tests.IrisTest, CubeArithmeticMaskingTestMixin):
@property
def data_op(self):
return operator.add
@property
def cube_func(self):
return add
if __name__ == "__main__":
tests.main()
| Jozhogg/iris | lib/iris/tests/unit/analysis/maths/test_add.py | Python | lgpl-3.0 | 1,586 |
from collections import defaultdict
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
from msaf.models import Marker
def summarize_alleles2( analytical_sets, temp_dir = None ):
""" return a tuple of (report, plot)
"""
allele_plots = {}
allele_reports = {}
for analytical_set in analytical_sets:
allele_df = analytical_set.get_allele_df()
report, plot = summarize_alleles3( allele_df )
allele_reports[ analytical_set.get_label() ] = report
allele_plots[ analytical_set.get_colour() ] = plot
# create plots
if temp_dir:
plot_file = make_allele_plot2( allele_plots, temp_dir + 'allele_plot.pdf',
analytical_sets )
else:
plot_file = None
return (allele_reports, plot_file)
def summarize_alleles3( allele_df ):
""" return a tuple of (dict, dict):
1dict: alleles: [ (allele, freq, count, mean_height, min_size, max_size, delta), ...]
2dict: marker: ( [ size, ...], [ height, ....] )
"""
allele_list = defaultdict(list)
allele_plot = defaultdict(lambda x = None: ([], []))
grouped = allele_df.groupby( ['marker_id', 'value'] )
for (marker_id, allele), df in grouped:
allele_list[marker_id].append(
(allele, len(df), np.mean( df['height'] ), min(df['size']), max(df['size']),
list(df['sample_id']), np.mean( df['size'] ))
)
code = Marker.get(marker_id).code
allele_plot[code][0].extend( df['size'] )
allele_plot[code][1].extend( df['height'] )
# calculate other stuff
results = {}
for marker_id in allele_list:
alleles = allele_list[marker_id]
total_allele = sum( x[1] for x in alleles )
allele_params = [
(allele, count/total_allele, count, mean_height, min_size, max_size,
max_size - min_size, sample_ids, mean_size )
for (allele, count, mean_height, min_size, max_size, sample_ids, mean_size )
in alleles
]
delta_status = check_delta( allele_params)
results[marker_id] = dict(
code = Marker.get(marker_id).code,
unique_allele = len(allele_params),
total_allele = total_allele,
alleles = allele_params,
delta_status = delta_status )
return (results, allele_plot)
def make_allele_plot2( data_plots, filename, analytical_sets = None ):
n = len(data_plots) # number of distinct colors
markers = set() # number of markers
for d in data_plots:
markers.update( list(data_plots[d].keys()) )
m = len(markers) + 1
fig = plt.figure( figsize=(21, 4 * m), dpi=600 )
axes = []
for idx, marker in enumerate( sorted(markers) ):
ax = fig.add_subplot( m, 1, idx + 1 )
for colour in data_plots:
data = data_plots[colour][marker]
ax.vlines( data[0], [0], data[1], colors = [ colour ] )
ax.get_xaxis().set_tick_params( which='both', direction='out' )
ax.get_yaxis().set_tick_params( which='both', direction='out' )
minor_locator = MultipleLocator(1)
major_locator = MultipleLocator(5)
ax.get_xaxis().set_major_locator( major_locator )
ax.get_xaxis().set_minor_locator( minor_locator )
for label in ax.get_xticklabels():
label.set_size( 'xx-small' )
for label in ax.get_yticklabels():
label.set_size( 'xx-small' )
ax.set_ylabel( marker )
ax.set_ylim(0)
#ax.set_xlim(min(data[0]), max(data[0]))
ax.set_xlim(auto = True)
axes.append( ax )
# create the legend plot by creating dummy
if analytical_sets:
lx = fig.add_subplot( m, 1, m )
for analytical_set in analytical_sets:
lx.vlines( [0,0], [0], [0,0],
colors = [ analytical_set.get_colour() ],
label = analytical_set.get_label() )
leg = lx.legend(ncol = n )
#lx.set_ylabel( 'Legend' )
lx.set_axis_off()
fig.tight_layout()
fig.savefig( filename )
plt.close()
return filename
def summarize_alleles_xxx( allele_df, temp_dir = None ):
""" return a dict containing:
alleles: [ (allele, freq, count, mean_height, min_size, max_size, delta), ...]
"""
allele_list = defaultdict(list)
allele_plot = defaultdict(lambda x = None: ([], []))
grouped = allele_df.groupby( ['marker_id', 'value'] )
for (marker_id, allele), df in grouped:
allele_list[marker_id].append(
(allele, len(df), np.mean( df['height'] ), min(df['size']), max(df['size']), list(df['sample_id']))
)
if temp_dir:
code = Marker.get(marker_id).code
allele_plot[code][0].extend( df['size'] )
allele_plot[code][1].extend( df['height'] )
# calculate other stuff
results = {}
for marker_id in allele_list:
alleles = allele_list[marker_id]
total_allele = sum( x[1] for x in alleles )
allele_params = [
(allele, count/total_allele, count, mean_height, min_size, max_size,
max_size - min_size, sample_ids )
for (allele, count, mean_height, min_size, max_size, sample_ids) in alleles ]
delta_status = check_delta( allele_params)
results[marker_id] = dict(
code = Marker.get(marker_id).code,
unique_allele = len(allele_params),
total_allele = total_allele,
alleles = allele_params,
delta_status = delta_status )
if temp_dir:
plot_file = make_allele_plot( allele_plot, temp_dir + 'allele_plot.pdf' )
else:
plot_file = None
return (results, plot_file)
def check_delta( alleles ):
# check if only single allele
if len(alleles) <= 1:
return [ True ]
threshold = 1
delta_status = []
if alleles[1][0] - alleles[0][0] <= threshold:
delta_status.append( False )
else:
delta_status.append( True )
for i in range(1, len(alleles) - 1):
if ( alleles[i][0] - alleles[i-1][0] <= threshold or
alleles[i+1][0] - alleles[i][0] <= threshold ):
delta_status.append( False )
else:
delta_status.append( True )
if alleles[-2][0] - alleles[-1][0] == 1:
delta_status.append( False )
else:
delta_status.append( True )
return delta_status
def make_allele_plot( data_plots, filename ):
n = len(data_plots)
fig = plt.figure( figsize=(21, 4 * n), dpi=600 )
axes = []
for idx, key in enumerate( sorted(data_plots) ):
data = data_plots[key]
ax = fig.add_subplot( n, 1, idx )
ax.vlines( data[0], [0], data[1] )
ax.get_xaxis().set_tick_params( which='both', direction='out' )
ax.get_yaxis().set_tick_params( which='both', direction='out' )
minor_locator = MultipleLocator(1)
major_locator = MultipleLocator(5)
ax.get_xaxis().set_major_locator( major_locator )
ax.get_xaxis().set_minor_locator( minor_locator )
for label in ax.get_xticklabels():
label.set_size( 'xx-small' )
for label in ax.get_yticklabels():
label.set_size( 'xx-small' )
ax.set_ylabel( key )
axes.append( ax )
fig.savefig( filename )
plt.close()
return filename
| trmznt/msaf | msaf/lib/tools/allele.py | Python | lgpl-3.0 | 7,523 |
from io.endeios.example.javascripts import ABean, TestInterface
from java.lang import Long
def myecho(a):
return a
def greet():
return "Hello Python"
class MyClass(TestInterface):
def __init__(self):
pass
def getBean(self):
retVal =ABean()
retVal.setName("python")
retVal.setNumber(101)
retVal.setReady(True)
return retVal
def isActive(self):
return True
def getNum(self):
# NOTE Long.parseLong("101") returns a java.math.BigIntger on java side...
return Long(101)
def serviceableResult(self):
listOfStrings = service.getStrings()
print("List is "+listOfStrings.toString()+"\n")
retString = ""
for part in listOfStrings:
print("partString is "+part+"\n")
retString = retString + part
print("retString is "+retString+"\n")
return retString
def gimmeMyObject():
return MyClass()
| Endeios/javascripts | src/main/python/functions.py | Python | lgpl-3.0 | 863 |
import androidhelper
import curses
droid = androidhelper.Android()
win=curses.initscr()
result="No result"
try:
win.box()
w,h=win.getmaxyx()
win.addstr(2,2,"Curses Test %sx%s" % (w,h))
win.addstr(10,10,"Hit a key")
win.getch()
finally:
curses.endwin()
print("Result=",result)
| gc313/Learning-Python | testcurses.py | Python | lgpl-3.0 | 292 |
#!/usr/bin/python
# Quick PoC template for HTTP POST form brute force, with anti-CRSF token
# Target: DVWA v1.10
# Date: 2015-10-19
# Author: g0tmi1k ~ https://blog.g0tmi1k.com/
# Source: https://blog.g0tmi1k.com/2015/10/dvwa-login/
import requests
import sys
import re
from BeautifulSoup import BeautifulSoup
# Variables
target = 'http://192.168.1.33/DVWA'
user_list = '/usr/share/seclists/Usernames/top_shortlist.txt'
pass_list = '/usr/share/seclists/Passwords/rockyou.txt'
# Value to look for in response header (Whitelisting)
success = 'index.php'
# Get the anti-CSRF token
def csrf_token():
try:
# Make the request to the URL
print "\n[i] URL: %s/login.php" % target
r = requests.get("{0}/login.php".format(target), allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n[!] csrf_token: Failed to connect (URL: %s/login.php).\n[i] Quitting." % (target)
sys.exit(-1)
# Extract anti-CSRF token
soup = BeautifulSoup(r.text)
user_token = soup("input", {"name": "user_token"})[0]["value"]
print "[i] user_token: %s" % user_token
# Extract session information
session_id = re.match("PHPSESSID=(.*?);", r.headers["set-cookie"])
session_id = session_id.group(1)
print "[i] session_id: %s\n" % session_id
return session_id, user_token
# Make the request to-do the brute force
def url_request(username, password, session_id, user_token):
# POST data
data = {
"username": username,
"password": password,
"user_token": user_token,
"Login": "Login"
}
# Cookie data
cookie = {
"PHPSESSID": session_id
}
try:
# Make the request to the URL
#print "\n[i] URL: %s/vulnerabilities/brute/" % target
#print "[i] Data: %s" % data
#print "[i] Cookie: %s" % cookie
r = requests.post("{0}/login.php".format(target), data=data, cookies=cookie, allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n\n[!] url_request: Failed to connect (URL: %s/vulnerabilities/brute/).\n[i] Quitting." % (target)
sys.exit(-1)
# Wasn't it a redirect?
if r.status_code != 301 and r.status_code != 302:
# Feedback for the user (there was an error again) & Stop execution of our request
print "\n\n[!] url_request: Page didn't response correctly (Response: %s).\n[i] Quitting." % (r.status_code)
sys.exit(-1)
# We have what we need
return r.headers["Location"]
# Main brute force loop
def brute_force(user_token, session_id):
# Load in wordlists files
with open(pass_list) as password:
password = password.readlines()
with open(user_list) as username:
username = username.readlines()
# Counter
i = 0
# Loop around
for PASS in password:
for USER in username:
USER = USER.rstrip('\n')
PASS = PASS.rstrip('\n')
# Increase counter
i += 1
# Feedback for the user
print ("[i] Try %s: %s // %s" % (i, USER, PASS))
# Fresh CSRF token each time?
#user_token, session_id = csrf_token()
# Make request
attempt = url_request(USER, PASS, session_id, user_token)
#print attempt
# Check response
if attempt == success:
print ("\n\n[i] Found!")
print "[i] Username: %s" % (USER)
print "[i] Password: %s" % (PASS)
return True
return False
# Get initial CSRF token
session_id, user_token = csrf_token()
# Start brute forcing
brute_force(user_token, session_id)
| cybert79/HaXor | boot2root-scripts/dvwa-login-bruteforce-http-post-csrf.py | Python | unlicense | 3,803 |
# coding=utf-8
# Лекция http://uneex.ru/LecturesCMC/PythonIntro2014/03_DataTypes
import decimal
import random
# print decimal.Decimal(1.1) + decimal.Decimal(1.1)
# print decimal.Decimal("1.1") + decimal.Decimal("1.1")
# print dir(random)
a = []
for j in range(0, 10):
a.append(random.randrange(100))
print a
| pasko-evg/Python-2014 | Lecture03/lecture_03.py | Python | unlicense | 321 |
import unittest
import itertools
class Solution:
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
if not A:
return 0
n = len(A)
sum_ = sum(A)
f = sum(i * num for i, num in enumerate(A))
max_ = f
for num in itertools.islice(A, n - 1):
f += n * num - sum_
if f > max_:
max_ = f
return max_
class Test(unittest.TestCase):
def test(self):
self._test([4, 3, 2, 6], 26)
def _test(self, A, expected):
actual = Solution().maxRotateFunction(A)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| chrisxue815/leetcode_python | problems/test_0396.py | Python | unlicense | 723 |
from util import *
import random, string
from random import randint
import time
class KonačniAutomat(types.SimpleNamespace):
"""Automat koji prepoznaje regularni jezik."""
@classmethod
def iz_komponenti(klasa, stanja, abeceda, prijelaz, početno, završna):
"""Sipser page 35 definition 1.5 - konstrukcija iz petorke."""
assert abeceda # nije prazna
assert početno in stanja
assert završna <= stanja
assert funkcija(prijelaz, Kartezijev_produkt(stanja, abeceda), stanja)
return klasa(**vars())
@classmethod
def iz_tablice(klasa, tablica):
"""Parsiranje tabličnog zapisa konačnog automata (Sipser page 36).
Pogledati funkciju util.parsiraj_tablicu_KA za detalje."""
return klasa.iz_komponenti(*parsiraj_tablicu_KA(tablica))
@property
def komponente(M):
"""Sipser page 35 definition 1.5 - rastav u petorku."""
return M.stanja, M.abeceda, M.prijelaz, M.početno, M.završna
def prihvaća(automat, ulaz):
"""Prihvaća li konačni automat zadani ulaz?"""
stanje = automat.početno
for znak in ulaz:
stanje = automat.prijelaz[stanje, znak]
return stanje in automat.završna
def izračunavanje(automat, ulaz):
"""Stanja kroz koja automat prolazi čitajući ulaz (Sipser page 40)."""
stanje = automat.početno
yield stanje
print("poč.stanje u fji izr",stanje)
for znak in ulaz:
stanje = automat.prijelaz[stanje, znak]
print("stanje u fji,znak",stanje,znak)
yield stanje
def prirodni(automat):
"""Zamjenjuje stanja prirodnim brojevima, radi preglednosti."""
Q, Σ, δ, q0, F = automat.komponente
rječnik = {q:i for i, q in enumerate(Q, 1)}
QN = set(rječnik.values())
δN = {(rječnik[polazno], znak): rječnik[dolazno]
for (polazno, znak), dolazno in δ.items()}
q0N = rječnik[q0]
FN = {rječnik[završno] for završno in F}
return KonačniAutomat.iz_komponenti(QN, Σ, δN, q0N, FN)
def crtaj(automat):
"""Ispisuje na ekran dijagram automata u DOT formatu.
Dobiveni string može se kopirati u sandbox.kidstrythisathome.com/erdos
ili u www.webgraphviz.com."""
NedeterminističkiKonačniAutomat.iz_konačnog_automata(automat).crtaj()
def unija(M1, M2):
"""Konačni automat za L(M1)∪L(M2)."""
assert M1.abeceda == M2.abeceda
Q1, Σ, δ1, q1, F1 = M1.komponente
Q2, Σ, δ2, q2, F2 = M2.komponente
Q = Kartezijev_produkt(Q1, Q2)
δ = {((r1,r2), α): (δ1[r1,α], δ2[r2,α]) for r1,r2 in Q for α in Σ}
F = Kartezijev_produkt(Q1, F2) | Kartezijev_produkt(F1, Q2)
return KonačniAutomat.iz_komponenti(Q, Σ, δ, (q1,q2), F)
def presjek(M1, M2):
"""Konačni automat za L(M1)∩L(M2)."""
M = M1.unija(M2)
M.završna = Kartezijev_produkt(M1.završna, M2.završna)
return M
def komplement(M):
"""Konačni automat za (M.abeceda)*\L(M)."""
Q, Σ, δ, q0, F = M.komponente
return KonačniAutomat.iz_komponenti(Q, Σ, δ, q0, Q - F)
def razlika(M1, M2):
"""Konačni automat za L(M1)\L(M2)."""
return M1.presjek(M2.komplement())
def simetrična_razlika(M1, M2):
"""Konačni automat za L(M1)△L(M2)."""
return M1.razlika(M2).unija(M2.razlika(M1))
def optimizirana_simetrična_razlika(M1, M2):
"""Konačni automat za L(M1)△L(M2), s |M1.stanja|·|M2.stanja| stanja."""
M = M1.razlika(M2)
M.završna |= Kartezijev_produkt(M1.stanja - M1.završna, M2.završna)
return M
def lema_o_napuhavanju(automat):
p = input("Unesi broj p: ")
print("Unešen broj je:", p)
riječ = input("Unesi riječ: ")
print("Unesena riječ je:", riječ)
def partition(lst,duljina,znakPonavljanja):
x = lst[:duljina]
yz = lst[duljina:]
y = yz[:znakPonavljanja]
z = yz[znakPonavljanja:]
return x,y,z
def konkatenacija_riječi(riječ,koliko):
lista=[]
lista.extend([riječ for i in range(int(koliko))])
riječNova =''.join(lista)
return riječNova
lista = list(automat.stanja)
duljinaListe = len(automat.stanja)
def generirajRječnik(lista,duljinaListe):
rječnik = dict()
for i in lista:
rječnik[i] = 0
return rječnik
brojPonavljanjaStanja = generirajRječnik(lista,duljinaListe)
a = KonačniAutomat.izračunavanje(automat,riječ)
j = 0
for i in a:
break
stanjePonavljanja = ''
for i in a:
j = j + 1
brojPonavljanjaStanja[i] +=1
if brojPonavljanjaStanja[i] == 2 and j<= int(p):
stanjePonavljanja = i
mjestoNapuhavanja = j-1
break
kolikoPuta = 0
count = 0
b = KonačniAutomat.izračunavanje(automat,riječ)
for k in b:
if k == stanjePonavljanja:
kolikoPuta += 1
count = 1
if k != stanjePonavljanja and count == 1:
break
x,y,z = partition(riječ,mjestoNapuhavanja,kolikoPuta-1)
print("x,y,z=",x,y,z)
i = input("Unesi broj i: ")
print("Unešen broj je:", i)
yi = konkatenacija_riječi(y,i)
print("y^i", yi)
riječ= []
riječ.append(x)
riječ.append(yi)
riječ.append(z)
print("riječ", riječ)
napuhanaRiječ = ''.join(riječ)
print("napuhanaRiječ", napuhanaRiječ)
return automat.prihvaća(napuhanaRiječ)
def dohvatljiva(δ, S, α):
"""Stanja do kojih je moguće doći iz stanja iz S čitanjem znaka α."""
return unija_familije(δ[q, α] for q in S)
def ε_ljuska(δ, S):
"""Stanja do kojih je moguće doći iz stanja iz S bez čitanja znaka."""
while True:
S_novi = dohvatljiva(δ, S, ε) | S
if S_novi == S:
return S_novi
S = S_novi
def ε_proširenje(Σ):
"""Σ∪{ε}"""
return disjunktna_unija(Σ, {ε})
class NedeterminističkiKonačniAutomat(types.SimpleNamespace):
"""Nedeterministički automat koji prepoznaje regularni jezik."""
@classmethod
def iz_komponenti(klasa, stanja, abeceda, prijelaz, početno, završna):
"""Relacijska definicija: Δ⊆Q×(Σ∪{ε})×Q"""
assert abeceda # nije prazna
assert početno in stanja
assert završna <= stanja
assert relacija(prijelaz, stanja, ε_proširenje(abeceda), stanja)
return klasa(**vars())
@classmethod
def iz_funkcije(klasa, stanja, abeceda, f_prijelaza, početno, završna):
"""Funkcijska definicija: δ:Q×(Σ∪{ε})→℘(Q) (Sipser page 53 def.1.37)"""
prijelaz = relacija_iz_funkcije(f_prijelaza)
return klasa.iz_komponenti(stanja, abeceda, prijelaz, početno, završna)
@classmethod
def iz_konačnog_automata(klasa, konačni_automat):
"""Pretvorba iz determinističkog KA u nedeterministički."""
Q, Σ, δ, q0, F = konačni_automat.komponente
Δ = {(q, α, δ[q, α]) for q in Q for α in Σ}
return klasa.iz_komponenti(Q, Σ, Δ, q0, F)
@classmethod
def iz_tablice(klasa, tablica):
"""Parsiranje tabličnog zapisa nedeterminističkog KA (Sipser page 54).
Pogledati funkciju util.parsiraj_tablicu_NKA za detalje."""
return klasa.iz_komponenti(*parsiraj_tablicu_NKA(tablica))
@property
def komponente(M):
"""Relacijska definicija - rastav u petorku."""
return M.stanja, M.abeceda, M.prijelaz, M.početno, M.završna
@property
def funkcija_prijelaza(automat):
"""Relacija prijelaza pretvorena u funkciju."""
return funkcija_iz_relacije(automat.prijelaz,
automat.stanja, ε_proširenje(automat.abeceda))
def prihvaća(automat, ulaz):
"""Prihvaća li automat zadani ulaz?"""
δ = automat.funkcija_prijelaza
moguća = ε_ljuska(δ, {automat.početno})
for znak in ulaz: moguća = ε_ljuska(δ, dohvatljiva(δ, moguća, znak))
return not moguća.isdisjoint(automat.završna)
def crtaj(automat):
"""Ispisuje na ekran dijagram automata u DOT formatu.
Dobiveni string može se kopirati u sandbox.kidstrythisathome.com/erdos
ili u www.webgraphviz.com."""
print(DOT_NKA(automat))
def izračunavanje(nka, ulaz):
"""Generator niza skupova mogućih stanja kroz koja nedeterministički
konačni automat prolazi čitajući zadani ulaz."""
return nka.optimizirana_partitivna_konstrukcija().izračunavanje(ulaz)
def partitivna_konstrukcija(nka):
"""Ekvivalentni KA zadanom NKA, s 2^|nka.stanja| stanja."""
Q, Σ, Δ, q0, F = nka.komponente
δ = nka.funkcija_prijelaza
PQ = partitivni_skup(Q)
δ_KA = {(S,α): ε_ljuska(δ, dohvatljiva(δ, S, α)) for S in PQ for α in Σ}
F_KA = {S for S in PQ if S & F}
q0_KA = ε_ljuska(δ, {q0})
return KonačniAutomat.iz_komponenti(PQ, Σ, δ_KA, q0_KA, F_KA)
def optimizirana_partitivna_konstrukcija(nka):
"""Ekvivalentni KA zadanom NKA, samo s dostižnim stanjima."""
Q, Σ, Δ, q0, F = nka.komponente
δ = nka.funkcija_prijelaza
Q_KA = set()
δ_KA = {}
F_KA = set()
q0_KA = ε_ljuska(δ, {q0})
red = collections.deque([q0_KA])
while red:
stanje = red.popleft()
if stanje not in Q_KA:
for α in Σ:
novo_stanje = ε_ljuska(δ, dohvatljiva(δ, stanje, α))
δ_KA[stanje, α] = novo_stanje
red.append(novo_stanje)
Q_KA.add(stanje)
if not stanje.isdisjoint(F):
F_KA.add(stanje)
return KonačniAutomat.iz_komponenti(Q_KA, Σ, δ_KA, q0_KA, F_KA)
def označi(nka, l):
"""Označava stanja danog NKA dodatnom oznakom l, radi disjunktnosti."""
Q, Σ, Δ, q0, F = nka.komponente
Ql = {označi1(q, l) for q in Q}
Δl = {(označi1(p, l), α, označi1(q, l)) for p, α, q in Δ}
q0l = označi1(q0, l)
Fl = {označi1(q, l) for q in F}
return NedeterminističkiKonačniAutomat.iz_komponenti(Ql, Σ, Δl, q0l, Fl)
def unija(N1, N2):
"""Nedeterministički konačni automat koji prepoznaje L(N1)∪L(N2)."""
assert N1.abeceda == N2.abeceda
if not N1.stanja.isdisjoint(N2.stanja):
N1 = N1.označi(1)
N2 = N2.označi(2)
Q1, Σ, Δ1, q1, F1 = N1.komponente
Q2, Σ, Δ2, q2, F2 = N2.komponente
q0 = novo('q0', Q1 | Q2)
Q = disjunktna_unija(Q1, Q2, {q0})
F = disjunktna_unija(F1, F2)
Δ = disjunktna_unija(Δ1, Δ2, {(q0, ε, q1), (q0, ε, q2)})
return NedeterminističkiKonačniAutomat.iz_komponenti(Q, Σ, Δ, q0, F)
def konkatenacija(N1, N2):
"""Nedeterministički konačni automat koji prepoznaje L(N1)L(N2)."""
assert N1.abeceda == N2.abeceda
if not N1.stanja.isdisjoint(N2.stanja):
N1 = N1.označi(3)
N2 = N2.označi(4)
Q1, Σ, Δ1, q1, F1 = N1.komponente
Q2, Σ, Δ2, q2, F2 = N2.komponente
Q = disjunktna_unija(Q1, Q2)
Δ = disjunktna_unija(Δ1, Δ2, {(p1, ε, q2) for p1 in F1})
return NedeterminističkiKonačniAutomat.iz_komponenti(Q, Σ, Δ, q1, F2)
def plus(N):
"""Nedeterministički konačni automat za Kleenejev plus od L(N)."""
Q, Σ, Δ, q0, F = N.komponente
Δp = Δ | {(p, ε, q0) for p in F}
return NedeterminističkiKonačniAutomat.iz_komponenti(Q, Σ, Δp, q0, F)
def zvijezda(N):
"""Nedeterministički konačni automat za Kleenejevu zvijezdu od L(N)."""
Q, Σ, Δ, q0, F = N.plus().komponente
start = novo('start', Q)
return NedeterminističkiKonačniAutomat.iz_komponenti(
Q | {start}, Σ, Δ | {(start, ε, q0)}, start, F | {start})
def prirodni(automat):
"""Zamjenjuje stanja prirodnim brojevima, radi preglednosti."""
Q, Σ, δ, q0, F = automat.komponente
rječnik = {q:i for i, q in enumerate(Q, 1)}
QN = set(rječnik.values())
δN = {(rječnik[polazno],znak,rječnik[dolazno])
for (polazno, znak, dolazno) in list(δ)}
q0N = rječnik[q0]
FN = {rječnik[završno] for završno in F}
return NedeterminističkiKonačniAutomat.iz_komponenti(QN, Σ, δN, q0N, FN)
def ciklus(automat,r1):
"""ε_ciklus"""
Q, Σ, δ, q0, F = automat.komponente
rječnikPrirodni = {q:i for i, q in enumerate(Q, 1)}
stanje = rječnikPrirodni[r1]
QN = set(rječnikPrirodni.values())
δN = {(rječnikPrirodni[polazno],znak,rječnikPrirodni[dolazno])
for (polazno, znak, dolazno) in list(δ)}
rječnikPrirodniPrijelazi = dict()
for (polazno, znak, dolazno) in list(δN):
if(znak == ε):
if polazno in rječnikPrirodniPrijelazi:
rječnikPrirodniPrijelazi[polazno].append(dolazno)
else:
rječnikPrirodniPrijelazi[polazno] = [dolazno]
def postoji_ciklus(rječnikPrirodniPrijelazi, stanje):
boja = { stanje : "bijela" for stanje in rječnikPrirodniPrijelazi }
nasaoCiklus = [False]
if boja[stanje] == "bijela":
dfs_posjeti(rječnikPrirodniPrijelazi, stanje, boja, nasaoCiklus)
if nasaoCiklus[0]:
return nasaoCiklus[0]
def dfs_posjeti(rječnikPrirodniPrijelazi, stanje, boja, nasaoCiklus):
početni = stanje
if nasaoCiklus[0]:
return
boja[stanje] = "siva"
for prijelaz in rječnikPrirodniPrijelazi[stanje]:
if boja[prijelaz] == "siva":
if(prijelaz == početni and len(rječnikPrirodniPrijelazi[stanje]) > 1):
rječnikPrirodniPrijelazi[stanje].remove(prijelaz)
boja[stanje] = "bijela"
else:
nasaoCiklus[0] = True
return
if boja[prijelaz] == "bijela":
dfs_posjeti(rječnikPrirodniPrijelazi, prijelaz, boja, nasaoCiklus)
boja[stanje] = "crna"
return postoji_ciklus(rječnikPrirodniPrijelazi, stanje)
def beskonačna_petlja(automat):
"""beskonačna_petlja ako je unos neka riječ abecede automata"""
Q, Σ, δ, q0, F = automat.komponente
duljinaRiječi = len(list(Σ))*10
def random_string(duljina):
return ''.join(random.choice(list(Σ)) for i in range(duljina))
ulaz = random_string(randint(1,duljinaRiječi))
'''
def prihvaća(ulaz):
"""Prihvaća li automat zadani ulaz?"""
print("automat: ", automat)
δ = automat.funkcija_prijelaza
print("--------------------------------------------------------")
moguća = ε_ljuska(δ, {automat.početno})
print("moguća: ", moguća)
def ciklus_tražim(automat,r1):
"""ε_ciklus"""
Q, Σ, δ, q0, F = automat.komponente
rječnikPrirodni = {q:i for i, q in enumerate(Q, 1)}
početakCiklusa = rječnikPrirodni[r1]
print("p",početakCiklusa)
QN = set(rječnikPrirodni.values())
δN = {(rječnikPrirodni[polazno],znak,rječnikPrirodni[dolazno])
for (polazno, znak, dolazno) in list(δ)}
rječnikPrirodniPrijelazi = dict()
for (polazno, znak, dolazno) in list(δN):
#if(znak == ε):
if polazno in rječnikPrirodniPrijelazi:
rječnikPrirodniPrijelazi[polazno].append(dolazno)
else:
rječnikPrirodniPrijelazi[polazno] = [dolazno]
#tražiStanjeIzKOjegSeMožeUćiUECiklus,vratiGaKaoPOčetakCiklusa
print(rječnikPrirodni)
print(rječnikPrirodniPrijelazi)
def postoji_ciklus(rječnikPrirodniPrijelazi, stanje):
boja = { stanje : "bijela" for stanje in rječnikPrirodniPrijelazi }
nasaoCiklus = 0
print("postoji_ciklus", stanje)
print("bijela", boja[stanje], stanje)
if boja[stanje] == "bijela":
dfs_posjeti(rječnikPrirodniPrijelazi, stanje, boja, nasaoCiklus)
if not nasaoCiklus == 0:
return nasaoCiklus
def dfs_posjeti(rječnikPrirodniPrijelazi, stanje, boja, nasaoCiklus):
početni = stanje
print("dfs_posjeti")
print("nasaoCiklus", nasaoCiklus)
if not nasaoCiklus == 0:
return
boja[stanje] = "siva"
print("boja[stanje]",boja[stanje], stanje)
for prijelaz in rječnikPrirodniPrijelazi[stanje]:
print("prijelaz in rječnikPrirodniPrijelazi[stanje]",prijelaz, rječnikPrirodniPrijelazi[stanje])
time.sleep(2)
print("boja[priojelaz]", boja[prijelaz])
if boja[prijelaz] == "siva":
print("SIVVVA")
if(prijelaz == početni and len(rječnikPrirodniPrijelazi[stanje]) > 1):
rječnikPrirodniPrijelazi[stanje].remove(prijelaz)
boja[stanje] = "bijela"
print("prviIF")
else:
nasaoCiklus = stanje
print("else:NASAOCIKLUS",nasaoCiklus)
return
if boja[prijelaz] == "bijela":
dfs_posjeti(rječnikPrirodniPrijelazi, prijelaz, boja, nasaoCiklus)
boja[stanje] = "crna"
print("boja[stanje]=crna", boja[stanje])
#return postoji_ciklus(rječnikPrirodniPrijelazi, stanje)
print("stanje prije povratka", stanje)
return stanje
for znak, broj in rječnikPrirodni.items():
print("stanje u foru,znak,broj", znak,broj)
#početakCiklusa = postoji_ciklus(rječnikPrirodniPrijelazi,broj)
print("broj,PočetakCiklusa", broj,početakCiklusa)
if broj == početakCiklusa:
return znak
return None
#početakPetlje = ciklus_tražim(automat,automat.početno)
#print("početakPetlje", početakPetlje)
while True:
for znak in ulaz:
print("ulaz", ulaz)
print("znak", znak)
print("dohvatljiva, ",dohvatljiva(δ, moguća, znak))
moguća = set(ε_ljuska(δ, dohvatljiva(δ, moguća, znak)))
if(len(moguća) == 0):return None
print("moguća u true", moguća)
#if početakPetlje in moguća: #moguća beskonačna petlja
#return ulaz
return None
return ulaz
return prihvaća('0') #prihvaća(ulaz)'''
| silvahaberl/Jezici-gramatike-automati- | KA.py | Python | unlicense | 16,900 |
from django.conf.urls import url, include
from snippets import views
from rest_framework.routers import DefaultRouter
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
# The API URLs are now determined automatically by the router.
# Additionally, we include the login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| bertrandvidal/stuff | djangoprojects/django_rest_framework/tutorial/snippets/urls.py | Python | unlicense | 568 |
#!/usr/bin/env python
#encoding=utf-8
from onefile import *
def two():
print "at two\n",
def second():
print "at second\n",
if __name__ == '__main__':
two()
#one()
#first()
| licongyu95/learning_python | core_python_programming/cap2/two.py | Python | unlicense | 180 |
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER FEC CAND ID', 'number': '2'},
{'name': 'CANDIDATE NAME', 'number': '3'},
{'name': 'STREET 1', 'number': '4'},
{'name': 'STREET 2', 'number': '5'},
{'name': 'CITY', 'number': '6'},
{'name': 'STATE', 'number': '7'},
{'name': 'ZIP', 'number': '8'},
{'name': 'PTY/CODE', 'number': '9'},
{'name': 'CAN/OFFICE', 'number': '10'},
{'name': 'CAN/STATE', 'number': '11'},
{'name': 'CAN/DIST', 'number': '12'},
{'name': 'YEAR OF ELECTION 1900-2999', 'number': '13'},
{'name': 'FEC COMMITTEE ID NUMBER (PCC)', 'number': '14'},
{'name': 'COMMITTEE NAME (PCC)', 'number': '15'},
{'name': 'STREET 1', 'number': '16'},
{'name': 'STREET 2', 'number': '17'},
{'name': 'CITY', 'number': '18'},
{'name': 'STATE', 'number': '19'},
{'name': 'ZIP', 'number': '20'},
{'name': 'FEC COMMITTEE ID NUMBER (Auth)', 'number': '21'},
{'name': 'COMMITTEE NAME (Auth)', 'number': '22'},
{'name': 'STREET 1', 'number': '23'},
{'name': 'STREET 2', 'number': '24'},
{'name': 'CITY', 'number': '25'},
{'name': 'STATE', 'number': '26'},
{'name': 'ZIP', 'number': '27'},
{'name': 'NAME/CAN (as signed)', 'number': '28'},
{'name': 'Signed', 'number': '29-'},
{'name': 'PRI PERSONAL FUNDS DECLARED', 'number': '30'},
{'name': 'GEN PERSONAL FUNDS DECLARED', 'number': '31'},
]
self.fields_names = self.hash_names(self.fields)
| h4ck3rm1k3/FEC-Field-Documentation | fec/version/v5_0/F2.py | Python | unlicense | 1,867 |
from collections import deque
import gym
from gym import spaces
import numpy as np
from ray.rllib.utils.images import rgb2gray, resize
def is_atari(env):
if (
hasattr(env.observation_space, "shape")
and env.observation_space.shape is not None
and len(env.observation_space.shape) <= 2
):
return False
return hasattr(env, "unwrapped") and hasattr(env.unwrapped, "ale")
def get_wrapper_by_cls(env, cls):
"""Returns the gym env wrapper of the given class, or None."""
currentenv = env
while True:
if isinstance(currentenv, cls):
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
return None
class MonitorEnv(gym.Wrapper):
def __init__(self, env=None):
"""Record episodes stats prior to EpisodicLifeEnv, etc."""
gym.Wrapper.__init__(self, env)
self._current_reward = None
self._num_steps = None
self._total_steps = None
self._episode_rewards = []
self._episode_lengths = []
self._num_episodes = 0
self._num_returned = 0
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self._total_steps is None:
self._total_steps = sum(self._episode_lengths)
if self._current_reward is not None:
self._episode_rewards.append(self._current_reward)
self._episode_lengths.append(self._num_steps)
self._num_episodes += 1
self._current_reward = 0
self._num_steps = 0
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
self._current_reward += rew
self._num_steps += 1
self._total_steps += 1
return (obs, rew, done, info)
def get_episode_rewards(self):
return self._episode_rewards
def get_episode_lengths(self):
return self._episode_lengths
def get_total_steps(self):
return self._total_steps
def next_episode_results(self):
for i in range(self._num_returned, len(self._episode_rewards)):
yield (self._episode_rewards[i], self._episode_lengths[i])
self._num_returned = len(self._episode_rewards)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs):
"""Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset.
For environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few fr
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, dim):
"""Warp frames to the specified size (dim x dim)."""
gym.ObservationWrapper.__init__(self, env)
self.width = dim
self.height = dim
self.observation_space = spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
)
def observation(self, frame):
frame = rgb2gray(frame)
frame = resize(frame, height=self.height, width=self.width)
return frame[:, :, None]
# TODO: (sven) Deprecated class. Remove once traj. view is the norm.
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=env.observation_space.dtype,
)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
class FrameStackTrajectoryView(gym.ObservationWrapper):
def __init__(self, env):
"""No stacking. Trajectory View API takes care of this."""
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
assert shp[2] == 1
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1]), dtype=env.observation_space.dtype
)
def observation(self, observation):
return np.squeeze(observation, axis=-1)
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=0, high=1, shape=env.observation_space.shape, dtype=np.float32
)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
def wrap_deepmind(env, dim=84, framestack=True):
"""Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
env (EnvType): The env object to wrap.
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
"""
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if env.spec is not None and "NoFrameskip" in env.spec.id:
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, dim)
# env = ScaledFloatFrame(env) # TODO: use for dqn?
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
# 4x image framestacking.
if framestack is True:
env = FrameStack(env, 4)
return env
| ray-project/ray | rllib/env/wrappers/atari_wrappers.py | Python | apache-2.0 | 10,098 |
import numpy as np
import pandas as pd
import pytest
ROWID_ZERO_INDEXED_BACKENDS = ('omniscidb',)
@pytest.mark.parametrize(
'column',
[
'string_col',
'double_col',
'date_string_col',
pytest.param('timestamp_col', marks=pytest.mark.skip(reason='hangs')),
],
)
@pytest.mark.xfail_unsupported
def test_distinct_column(backend, alltypes, df, column):
expr = alltypes[column].distinct()
result = expr.execute()
expected = df[column].unique()
assert set(result) == set(expected)
@pytest.mark.xfail_unsupported
def test_rowid(con, backend):
t = con.table('functional_alltypes')
result = t[t.rowid()].execute()
first_value = 0 if backend.name() in ROWID_ZERO_INDEXED_BACKENDS else 1
expected = pd.Series(
range(first_value, first_value + len(result)),
dtype=np.int64,
name='rowid',
)
pd.testing.assert_series_equal(result.iloc[:, 0], expected)
@pytest.mark.xfail_unsupported
def test_named_rowid(con, backend):
t = con.table('functional_alltypes')
result = t[t.rowid().name('number')].execute()
first_value = 0 if backend.name() in ROWID_ZERO_INDEXED_BACKENDS else 1
expected = pd.Series(
range(first_value, first_value + len(result)),
dtype=np.int64,
name='number',
)
pd.testing.assert_series_equal(result.iloc[:, 0], expected)
| cloudera/ibis | ibis/backends/tests/test_column.py | Python | apache-2.0 | 1,384 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import os
from rqalpha.const import ORDER_TYPE, SIDE, POSITION_EFFECT
from .data_dict import TickDict, PositionDict, AccountDict, InstrumentDict, OrderDict, TradeDict, CommissionDict
from ..vnpy import *
from ..utils import make_order_book_id
ORDER_TYPE_MAPPING = {
ORDER_TYPE.MARKET: defineDict["THOST_FTDC_OPT_AnyPrice"],
ORDER_TYPE.LIMIT: defineDict["THOST_FTDC_OPT_LimitPrice"],
}
SIDE_MAPPING = {
SIDE.BUY: defineDict['THOST_FTDC_D_Buy'],
SIDE.SELL: defineDict['THOST_FTDC_D_Sell'],
}
POSITION_EFFECT_MAPPING = {
POSITION_EFFECT.OPEN: defineDict['THOST_FTDC_OF_Open'],
POSITION_EFFECT.CLOSE: defineDict['THOST_FTDC_OF_Close'],
POSITION_EFFECT.CLOSE_TODAY: defineDict['THOST_FTDC_OF_CloseToday'],
}
def query_in_sync(func):
@wraps(func)
def wrapper(api, data, error, n, last):
api.req_id = max(api.req_id, n)
result = func(api, data, last)
if last:
api.gateway.on_query(api.api_name, n, result)
return wrapper
class CtpMdApi(MdApi):
def __init__(self, gateway, temp_path, user_id, password, broker_id, address, api_name='ctp_md'):
super(CtpMdApi, self).__init__()
self.gateway = gateway
self.temp_path = temp_path
self.req_id = 0
self.connected = False
self.logged_in = False
self.user_id = user_id
self.password = password
self.broker_id = broker_id
self.address = address
self.api_name = api_name
def onFrontConnected(self):
"""服务器连接"""
self.connected = True
self.login()
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connected = False
self.logged_in = False
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
def onRspError(self, error, n, last):
"""错误回报"""
self.gateway.on_err(error)
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
if error['ErrorID'] == 0:
self.logged_in = True
else:
self.gateway.on_err(error)
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
if error['ErrorID'] == 0:
self.logged_in = False
else:
self.gateway.on_err(error)
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
pass
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
pass
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick_dict = TickDict(data)
if tick_dict.is_valid:
self.gateway.on_tick(tick_dict)
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
def connect(self):
"""初始化连接"""
if not self.connected:
if not os.path.exists(self.temp_path):
os.makedirs(self.temp_path)
self.createFtdcMdApi(self.temp_path)
self.registerFront(self.address)
self.init()
else:
self.login()
def subscribe(self, order_book_id):
"""订阅合约"""
ins_dict = self.gateway.get_ins_dict(order_book_id)
if ins_dict is None:
return None
instrument_id = ins_dict.instrument_id
if instrument_id:
self.subscribeMarketData(str(instrument_id))
def login(self):
"""登录"""
if not self.logged_in:
req = {
'UserID': self.user_id,
'Password': self.password,
'BrokerID': self.broker_id,
}
self.req_id += 1
self.reqUserLogin(req, self.req_id)
return self.req_id
def close(self):
"""关闭"""
self.exit()
class CtpTdApi(TdApi):
def __init__(self, gateway, temp_path, user_id, password, broker_id, address, auth_code, user_production_info, api_name='ctp_td'):
super(CtpTdApi, self).__init__()
self.gateway = gateway
self.temp_path = temp_path
self.req_id = 0
self.connected = False
self.logged_in = False
self.authenticated = False
self.user_id = user_id
self.password = password
self.broker_id = broker_id
self.address = address
self.auth_code = auth_code
self.user_production_info = user_production_info
self.front_id = 0
self.session_id = 0
self.require_authentication = False
self.pos_cache = {}
self.ins_cache = {}
self.order_cache = {}
self.api_name = api_name
def onFrontConnected(self):
"""服务器连接"""
self.connected = True
if self.require_authentication:
self.authenticate()
else:
self.login()
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connected = False
self.logged_in = False
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
def onRspAuthenticate(self, data, error, n, last):
"""验证客户端回报"""
if error['ErrorID'] == 0:
self.authenticated = True
self.login()
else:
self.gateway.on_err(error)
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
if error['ErrorID'] == 0:
self.front_id = str(data['FrontID'])
self.session_id = str(data['SessionID'])
self.logged_in = True
self.qrySettlementInfoConfirm()
else:
self.gateway.on_err(error)
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
if error['ErrorID'] == 0:
self.logged_in = False
else:
self.gateway.on_err(error)
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
order_dict = OrderDict(data, rejected=True)
if order_dict.is_valid:
self.gateway.on_order(order_dict)
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass
def onRspParkedOrderAction(self, data, error, n, last):
""""""
pass
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
self.gateway.on_err(error)
def onRspQueryMaxOrderVolume(self, data, error, n, last):
""""""
pass
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
pass
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass
def onRspRemoveParkedOrderAction(self, data, error, n, last):
""""""
pass
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass
def onRspExecOrderAction(self, data, error, n, last):
""""""
pass
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass
def onRspQuoteInsert(self, data, error, n, last):
""""""
pass
def onRspQuoteAction(self, data, error, n, last):
""""""
pass
def onRspLockInsert(self, data, error, n, last):
""""""
pass
def onRspCombActionInsert(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryOrder(self, data, last):
"""报单回报"""
order_dict = OrderDict(data)
if order_dict.is_valid:
self.order_cache[order_dict.order_id] = order_dict
if last:
return self.order_cache
def onRspQryTrade(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryInvestorPosition(self, data, last):
"""持仓查询回报"""
if data['InstrumentID']:
order_book_id = make_order_book_id(data['InstrumentID'])
if order_book_id not in self.pos_cache:
self.pos_cache[order_book_id] = PositionDict(data, self.gateway.get_ins_dict(order_book_id))
else:
self.pos_cache[order_book_id].update_data(data)
if last:
return self.pos_cache
@query_in_sync
def onRspQryTradingAccount(self, data, last):
"""资金账户查询回报"""
return AccountDict(data)
def onRspQryInvestor(self, data, error, n, last):
""""""
pass
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
def onRspQryInstrumentMarginRate(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryInstrumentCommissionRate(self, data, last):
"""请求查询合约手续费率响应"""
return CommissionDict(data)
def onRspQryExchange(self, data, error, n, last):
""""""
pass
def onRspQryProduct(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryInstrument(self, data, last):
"""合约查询回报"""
ins_dict = InstrumentDict(data)
if ins_dict.is_valid:
self.ins_cache[ins_dict.order_book_id] = ins_dict
if last:
return self.ins_cache
def onRspQryDepthMarketData(self, data, error, n, last):
""""""
pass
def onRspQrySettlementInfo(self, data, error, n, last):
""""""
pass
def onRspQryTransferBank(self, data, error, n, last):
""""""
pass
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass
def onRspQryNotice(self, data, error, n, last):
""""""
pass
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass
def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
""""""
pass
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass
def onRspQryEWarrantOffset(self, data, error, n, last):
""""""
pass
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass
def onRspQryExchangeMarginRate(self, data, error, n, last):
""""""
pass
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass
def onRspQryExchangeRate(self, data, error, n, last):
""""""
pass
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass
def onRspQryProductExchRate(self, data, error, n, last):
""""""
pass
def onRspQryProductGroup(self, data, error, n, last):
""""""
pass
def onRspQryOptionInstrTradeCost(self, data, error, n, last):
""""""
pass
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass
def onRspQryExecOrder(self, data, error, n, last):
""""""
pass
def onRspQryForQuote(self, data, error, n, last):
""""""
pass
def onRspQryQuote(self, data, error, n, last):
""""""
pass
def onRspQryLock(self, data, error, n, last):
""""""
pass
def onRspQryLockPosition(self, data, error, n, last):
""""""
pass
def onRspQryInvestorLevel(self, data, error, n, last):
""""""
pass
def onRspQryExecFreeze(self, data, error, n, last):
""""""
pass
def onRspQryCombInstrumentGuard(self, data, error, n, last):
""""""
pass
def onRspQryCombAction(self, data, error, n, last):
""""""
pass
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass
def onRspQryAccountregister(self, data, error, n, last):
""""""
pass
def onRspError(self, error, n, last):
"""错误回报"""
self.gateway.on_err(error)
def onRtnOrder(self, data):
"""报单回报"""
order_dict = OrderDict(data)
if order_dict.is_valid:
self.gateway.on_order(order_dict)
def onRtnTrade(self, data):
"""成交回报"""
trade_dict = TradeDict(data)
self.gateway.on_trade(trade_dict)
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
self.gateway.on_err(error)
order_dict = OrderDict(data, rejected=True)
if order_dict.is_valid:
self.gateway.on_order(order_dict)
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
self.gateway.on_err(error)
def onRtnInstrumentStatus(self, data):
""""""
pass
def onRtnTradingNotice(self, data):
""""""
pass
def onRtnErrorConditionalOrder(self, data):
""""""
pass
def onRtnExecOrder(self, data):
""""""
pass
def onErrRtnExecOrderInsert(self, data, error):
""""""
pass
def onErrRtnExecOrderAction(self, data, error):
""""""
pass
def onErrRtnForQuoteInsert(self, data, error):
""""""
pass
def onRtnQuote(self, data):
""""""
pass
def onErrRtnQuoteInsert(self, data, error):
""""""
pass
def onErrRtnQuoteAction(self, data, error):
""""""
pass
def onRtnForQuoteRsp(self, data):
""""""
pass
def onRtnCFMMCTradingAccountToken(self, data):
""""""
pass
def onRtnLock(self, data):
""""""
pass
def onErrRtnLockInsert(self, data, error):
""""""
pass
def onRtnCombAction(self, data):
""""""
pass
def onErrRtnCombActionInsert(self, data, error):
""""""
pass
def onRspQryContractBank(self, data, error, n, last):
""""""
pass
def onRspQryParkedOrder(self, data, error, n, last):
""""""
pass
def onRspQryParkedOrderAction(self, data, error, n, last):
""""""
pass
def onRspQryTradingNotice(self, data, error, n, last):
""""""
pass
def onRspQryBrokerTradingParams(self, data, error, n, last):
""""""
pass
def onRspQryBrokerTradingAlgos(self, data, error, n, last):
""""""
pass
def onRspQueryCFMMCTradingAccountToken(self, data, error, n, last):
""""""
pass
def onRtnFromBankToFutureByBank(self, data):
""""""
pass
def onRtnFromFutureToBankByBank(self, data):
""""""
pass
def onRtnRepealFromBankToFutureByBank(self, data):
""""""
pass
def onRtnRepealFromFutureToBankByBank(self, data):
""""""
pass
def onRtnFromBankToFutureByFuture(self, data):
""""""
pass
def onRtnFromFutureToBankByFuture(self, data):
""""""
pass
def onRtnRepealFromBankToFutureByFutureManual(self, data):
""""""
pass
def onRtnRepealFromFutureToBankByFutureManual(self, data):
""""""
pass
def onRtnQueryBankBalanceByFuture(self, data):
""""""
pass
def onErrRtnBankToFutureByFuture(self, data, error):
""""""
pass
def onErrRtnFutureToBankByFuture(self, data, error):
""""""
pass
def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
""""""
pass
def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
""""""
pass
def onErrRtnQueryBankBalanceByFuture(self, data, error):
""""""
pass
def onRtnRepealFromBankToFutureByFuture(self, data):
""""""
pass
def onRtnRepealFromFutureToBankByFuture(self, data):
""""""
pass
def onRspFromBankToFutureByFuture(self, data, error, n, last):
""""""
pass
def onRspFromFutureToBankByFuture(self, data, error, n, last):
""""""
pass
def onRspQueryBankAccountMoneyByFuture(self, data, error, n, last):
""""""
pass
def onRtnOpenAccountByBank(self, data):
""""""
pass
def onRtnCancelAccountByBank(self, data):
""""""
pass
def onRtnChangeAccountByBank(self, data):
""""""
pass
def connect(self):
"""初始化连接"""
if not self.connected:
if not os.path.exists(self.temp_path):
os.makedirs(self.temp_path)
self.createFtdcTraderApi(self.temp_path)
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(self.address)
self.init()
else:
if self.require_authentication:
self.authenticate()
else:
self.login()
def login(self):
"""连接服务器"""
if not self.logged_in:
req = {
'UserID': self.user_id,
'Password': self.password,
'BrokerID': self.broker_id,
}
self.req_id += 1
self.reqUserLogin(req, self.req_id)
return self.req_id
def authenticate(self):
"""申请验证"""
if self.authenticated:
req = {
'UserID': self.user_id,
'BrokerID': self.broker_id,
'AuthCode': self.auth_code,
'UserProductInfo': self.user_production_info,
}
self.req_id += 1
self.reqAuthenticate(req, self.req_id)
else:
self.login()
return self.req_id
def qrySettlementInfoConfirm(self):
req = {
'BrokerID': self.broker_id,
'InvestorID': self.user_id,
}
self.req_id += 1
self.reqSettlementInfoConfirm(req, self.req_id)
return self.req_id
def qryInstrument(self):
self.ins_cache = {}
self.req_id += 1
self.reqQryInstrument({}, self.req_id)
return self.req_id
def qryCommission(self, order_book_id):
self.req_id += 1
ins_dict = self.gateway.get_ins_dict(order_book_id)
if ins_dict is None:
return None
req = {
'InstrumentID': ins_dict.instrument_id,
'InvestorID': self.user_id,
'BrokerID': self.broker_id,
'ExchangeID': ins_dict.exchange_id,
}
self.reqQryInstrumentCommissionRate(req, self.req_id)
return self.req_id
def qryAccount(self):
"""查询账户"""
self.req_id += 1
self.reqQryTradingAccount({}, self.req_id)
return self.req_id
def qryPosition(self):
"""查询持仓"""
self.pos_cache = {}
self.req_id += 1
req = {
'BrokerID': self.broker_id,
'InvestorID': self.user_id,
}
self.reqQryInvestorPosition(req, self.req_id)
return self.req_id
def qryOrder(self):
"""订单查询"""
self.order_cache = {}
self.req_id += 1
req = {
'BrokerID': self.broker_id,
'InvestorID': self.user_id,
}
self.reqQryOrder(req, self.req_id)
return self.req_id
def sendOrder(self, order):
"""发单"""
ins_dict = self.gateway.get_ins_dict(order.order_book_id)
if ins_dict is None:
return None
req = {
'InstrumentID': ins_dict.instrument_id,
'LimitPrice': order.price,
'VolumeTotalOriginal': order.quantity,
'OrderPriceType': ORDER_TYPE_MAPPING.get(order.type, ''),
'Direction': SIDE_MAPPING.get(order.side, ''),
'CombOffsetFlag': POSITION_EFFECT_MAPPING.get(order.position_effect, ''),
'OrderRef': str(order.order_id),
'InvestorID': self.user_id,
'UserID': self.user_id,
'BrokerID': self.broker_id,
'CombHedgeFlag': defineDict['THOST_FTDC_HF_Speculation'], # 投机单
'ContingentCondition': defineDict['THOST_FTDC_CC_Immediately'], # 立即发单
'ForceCloseReason': defineDict['THOST_FTDC_FCC_NotForceClose'], # 非强平
'IsAutoSuspend': 0, # 非自动挂起
'TimeCondition': defineDict['THOST_FTDC_TC_GFD'], # 今日有效
'VolumeCondition': defineDict['THOST_FTDC_VC_AV'], # 任意成交量
'MinVolume': 1, # 最小成交量为1
}
self.req_id += 1
self.reqOrderInsert(req, self.req_id)
return self.req_id
def cancelOrder(self, order):
"""撤单"""
ins_dict = self.gateway.get_ins_dict(order.order_book_id)
if ins_dict is None:
return None
self.req_id += 1
req = {
'InstrumentID': ins_dict.instrument_id,
'ExchangeID': ins_dict.exchange_id,
'OrderRef': str(order.order_id),
'FrontID': int(self.front_id),
'SessionID': int(self.session_id),
'ActionFlag': defineDict['THOST_FTDC_AF_Delete'],
'BrokerID': self.broker_id,
'InvestorID': self.user_id,
}
self.reqOrderAction(req, self.req_id)
return self.req_id
def close(self):
"""关闭"""
self.exit()
| ricequant/rqalpha-mod-vnpy | rqalpha_mod_vnpy/ctp/api.py | Python | apache-2.0 | 22,615 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.