repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
snsokolov/contests | codeforces/558C_chem_2.py | 1 | 4460 | #!/usr/bin/env python3
# 558C_chem.py - Codeforces.com/problemset/problem/558/C Chem quiz by Sergey 2015
# Standard modules
import unittest
import sys
# Additional modules
import itertools
###############################################################################
# Chem Class
###############################################################################
class Chem:
""" Chem representation """
MAX = 100001
def __init__(self, args):
""" Default constructor """
self.n = args[0]
self.list = args[1]
def calculate(self):
""" Main calcualtion function of the class """
valid = set()
stp = [0] * self.MAX
first = 1
for vol in self.list:
new_valid = set()
steps = 0
prev = 1
while vol > 0:
if (first or vol in valid):
if prev % 2 == 1:
ssteps = steps + 1
svol = vol << 1
while svol <= self.MAX:
new_valid.add(svol)
stp[svol] += ssteps
svol <<= 1
ssteps += 1
new_valid.add(vol)
stp[vol] += steps
prev = vol
vol >>= 1
steps += 1
if not first:
valid.intersection_update(new_valid)
else:
valid.update(new_valid)
first = 0
result = None
for n in valid:
if result is None or result > stp[n]:
result = stp[n]
return str(result)
###############################################################################
# Helping classes
###############################################################################
###############################################################################
# Executable code
###############################################################################
def get_inputs(test_inputs=None):
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
""" Unit-testable input function wrapper """
if it:
return next(it)
else:
return sys.stdin.readline()
# Getting string inputs. Place all uinput() calls here
num = int(uinput())
nums = list(map(int, uinput().split(), itertools.repeat(10, num)))
# Decoding inputs into a list
return [num, nums]
def calculate(test_inputs=None):
""" Base class calculate method wrapper """
return Chem(get_inputs(test_inputs)).calculate()
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_sample_tests(self):
""" Quiz sample tests. Add \n to separate lines """
# Sample test 1
test = "3\n4 8 2"
self.assertEqual(calculate(test), "2")
self.assertEqual(get_inputs(test)[0], 3)
self.assertEqual(list(get_inputs(test)[1]), [4, 8, 2])
# Sample test 2
test = "3\n3 5 6"
self.assertEqual(calculate(test), "5")
# Sample test 3
test = "3\n10 4 4"
self.assertEqual(calculate(test), "3")
def test_timelimit_tests(self):
# Time limit test
imax = 10000
test = str(imax)
nums = [random.randint(1, 100000) for i in range(imax)]
test += "\n" + " ".join(map(str, nums))
import timeit
start = timeit.default_timer()
args = get_inputs(test)
init = timeit.default_timer()
d = Chem(args)
calc = timeit.default_timer()
d.calculate()
stop = timeit.default_timer()
print("\nTime Test: {0:.3f} (inp {1:.3f} init {2:.3f} calc {3:.3f})".
format(stop-start, init-start, calc-init, stop-calc))
def test_Chem_class__basic_functions(self):
""" Chem class basic functions testing """
# Constructor test
d = Chem([3, [4, 8, 2]])
self.assertEqual(d.list[0], 4)
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
import random
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(calculate())
| unlicense | 4,462,464,134,741,659,000 | 26.361963 | 81 | 0.440135 | false |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/idle_test/test_window.py | 11 | 1075 | "Test window, coverage 47%."
from idlelib import window
import unittest
from test.support import requires
from tkinter import Tk
class WindowListTest(unittest.TestCase):
def test_init(self):
wl = window.WindowList()
self.assertEqual(wl.dict, {})
self.assertEqual(wl.callbacks, [])
# Further tests need mock Window.
class ListedToplevelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
window.registry = set()
requires('gui')
cls.root = Tk()
cls.root.withdraw()
@classmethod
def tearDownClass(cls):
window.registry = window.WindowList()
cls.root.update_idletasks()
## for id in cls.root.tk.call('after', 'info'):
## cls.root.after_cancel(id) # Need for EditorWindow.
cls.root.destroy()
del cls.root
def test_init(self):
win = window.ListedToplevel(self.root)
self.assertIn(win, window.registry)
self.assertEqual(win.focused_widget, win)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | -6,704,342,921,100,253,000 | 22.888889 | 65 | 0.631628 | false |
jkpr/qlang | qlang/borrow.py | 1 | 4800 | """
Supply source XLSForm
get the translations from different file(s)
"""
import xlsxwriter
import argparse
import re
import qlang
from qlang import QlangError
import spreadsheet
class TranslationDict():
"""
Intermediate product is a dictionary
{
"eng-string1" : {
"language1" : ["found1", "found2", ...]
"language2" : ["found1", "found2", ...]
},
"eng-string2" : ...
}
"""
def __init__(self):
self.data = {}
self.languages = set()
def add_translation(self, eng, foreign, lang):
eng = self.clean_string(eng)
foreign = self.clean_string(foreign)
try:
this_dict = self.data[eng]
if lang in this_dict:
this_dict[lang].append(foreign)
else:
this_dict[lang] = [foreign]
except KeyError:
self.data[eng] = {lang: [foreign]}
self.languages.add(lang)
def update(self, other):
if isinstance(other, TranslationDict):
for k in other:
try:
this_dict = self.data[k]
other_dict = other[k]
for lang in other_dict:
if lang in this_dict:
this_dict[lang].extend(other_dict[lang])
else:
this_dict[lang] = other_dict[lang]
except KeyError:
self.data[k] = other[k]
def write_out(self, path):
languages = list(self.languages)
wb = xlsxwriter.Workbook(path)
ws = wb.add_worksheet('translations')
ws.write_row(0, 0, ['English'].extend(languages))
for i, k in enumerate(self.data):
ws.write(i+1, 0, k)
translations = self.data[k]
for j, lang in enumerate(languages):
try:
pass
except:
pass
@staticmethod
def clean_string(s):
s = s.replace('\r', '\n')
s = s.strip()
s = qlang.space_newline_fix(s)
number_re = r'^([A-Z]|(\S*\d+[a-z]?))(\.|:)\s+'
m = re.match(number_re, s)
if m:
s = s[m.span()[1]:]
s = s.strip()
return s
def __str__(self):
return str(self.data)
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def extract_line_translations(line, english, others, translations):
result = TranslationDict()
for col, name in english:
eng = line[col]
if eng == '':
continue
these_translations = translations[name]
for lang in others:
try:
this_col = these_translations[lang]
foreign = line[this_col]
result.add_translation(eng, foreign, lang)
except KeyError:
# This language not found... unlikely
pass
return result
# give me a worksheet, and I will give you
def create_translation_dict(ws):
result = TranslationDict()
header = ws[0]
try:
english, others, translations = qlang.preprocess_header(header)
for line in ws[1:]:
found = extract_line_translations(line, english, others, translations)
result.update(found)
except QlangError:
# English not found, do nothing
pass
print(result)
return result
if __name__ == '__main__':
prog_desc = 'Grab translations from existing XLSForms'
parser = argparse.ArgumentParser(description=prog_desc)
file_help = 'One or more paths to source XLSForms containing translations.'
parser.add_argument('xlsxfile', nargs='+', help=file_help)
merge_help = 'An XLSForm that receives the translations from source files.'
parser.add_argument('-m', '--merge', help=merge_help)
out_help = 'Path to write output.'
parser.add_argument('-o', '--outpath', help=out_help)
args = parser.parse_args()
print("Received")
print("Files: ", args.xlsxfile)
print("Merge: ", args.merge)
print("Outpath: ", args.outpath)
f = [
'test/files/GHR5-SDP-Questionnaire-v13-jkp.xlsx',
'/Users/jpringle/Downloads/KER5-SDP-Questionnaire-v2-jef.xlsx'
]
wb = spreadsheet.Workbook(f[1])
create_translation_dict(wb['survey'])
# If just input files: create a dictionary of sorts that can be used later
# If input files and out: write that dictionary to out
# If input files and merge: add translations to merge, overwrite where needed, add entire dictionary in new worksheet
# If input files and merge and out: above plus specified outfile
| mit | -6,599,520,850,606,766,000 | 28.447853 | 121 | 0.554375 | false |
tjcorona/PyFR | pyfr/solvers/navstokes/system.py | 1 | 2757 | # -*- coding: utf-8 -*-
from pyfr.solvers.baseadvecdiff import BaseAdvectionDiffusionSystem
from pyfr.solvers.navstokes.elements import NavierStokesElements
from pyfr.solvers.navstokes.inters import (NavierStokesBaseBCInters,
NavierStokesIntInters,
NavierStokesMPIInters)
class NavierStokesSystem(BaseAdvectionDiffusionSystem):
name = 'navier-stokes'
elementscls = NavierStokesElements
intinterscls = NavierStokesIntInters
mpiinterscls = NavierStokesMPIInters
bbcinterscls = NavierStokesBaseBCInters
def rhs(self, t, uinbank, foutbank):
runall = self.backend.runall
q1, q2 = self._queues
kernels = self._kernels
self.eles_scal_upts_inb.active = uinbank
self.eles_scal_upts_outb.active = foutbank
q1 << kernels['eles', 'disu']()
q1 << kernels['mpiint', 'scal_fpts_pack']()
runall([q1])
q1 << kernels['iint', 'con_u']()
q1 << kernels['bcint', 'con_u'](t=t)
q1 << kernels['eles', 'tgradpcoru_upts']()
if ('eles', 'avis') in kernels:
q1 << kernels['eles', 'avis']()
q1 << kernels['mpiint', 'avis_fpts_pack']()
q2 << kernels['mpiint', 'scal_fpts_send']()
q2 << kernels['mpiint', 'scal_fpts_recv']()
q2 << kernels['mpiint', 'scal_fpts_unpack']()
runall([q1, q2])
q1 << kernels['mpiint', 'con_u']()
q1 << kernels['eles', 'tgradcoru_upts']()
q1 << kernels['eles', 'gradcoru_upts']()
q1 << kernels['eles', 'gradcoru_fpts']()
q1 << kernels['mpiint', 'vect_fpts_pack']()
if ('eles', 'avis') in kernels:
q2 << kernels['mpiint', 'avis_fpts_send']()
q2 << kernels['mpiint', 'avis_fpts_recv']()
q2 << kernels['mpiint', 'avis_fpts_unpack']()
runall([q1, q2])
if ('eles', 'gradcoru_qpts') in kernels:
q1 << kernels['eles', 'gradcoru_qpts']()
q1 << kernels['eles', 'tdisf']()
q1 << kernels['eles', 'tdivtpcorf']()
q1 << kernels['iint', 'comm_flux']()
q1 << kernels['bcint', 'comm_flux'](t=t)
q2 << kernels['mpiint', 'vect_fpts_send']()
q2 << kernels['mpiint', 'vect_fpts_recv']()
q2 << kernels['mpiint', 'vect_fpts_unpack']()
runall([q1, q2])
q1 << kernels['mpiint', 'comm_flux']()
q1 << kernels['eles', 'tdivtconf']()
if ('eles', 'tdivf_qpts') in kernels:
q1 << kernels['eles', 'tdivf_qpts']()
q1 << kernels['eles', 'negdivconf'](t=t)
q1 << kernels['eles', 'divf_upts']()
else:
q1 << kernels['eles', 'negdivconf'](t=t)
runall([q1])
| bsd-3-clause | 4,086,353,880,125,991,000 | 35.276316 | 68 | 0.540443 | false |
GNOME/pygtk | tests/test_gdk.py | 6 | 2483 | import unittest
import gc
from common import gtk, gobject
class CallOnDel:
def __init__(self, callback):
self.callback = callback
def __del__(self):
self.callback()
class GdkTest(unittest.TestCase):
def testBitmapCreateFromData(self):
gtk.gdk.bitmap_create_from_data(None, '\x00', 1, 1)
#def testPixmapCreateFromData(self):
# black = gtk.gdk.color_parse('black')
# gtk.gdk.pixmap_create_from_data(None, '\x00', 1, 1, 1,
# black, black)
def _testWindow(self):
common = {'finalized': False}
def on_finalize():
common['finalized'] = True
w = gtk.gdk.Window(None, 200, 200, gtk.gdk.WINDOW_TEMP, 0, 0)
w.set_data('foo', CallOnDel(on_finalize))
w.destroy()
while gtk.events_pending():
gtk.main_iteration(block=False)
del w
# Note that this depends on the mainloop processing an X event so
# if might fail if the timing is off
while gc.collect():
pass
assert common['finalized']
def testDrawIndexedImage(self):
w = gtk.Window()
w.realize()
w.window.draw_indexed_image(gtk.gdk.GC(w.window),
0, 0,
1, 2,
gtk.gdk.RGB_DITHER_NONE,
'\x00\x01',
1,
[0xdeadbe, 0xbebabe])
def _collect(self):
cnt = 0
while True:
x = gc.collect()
cnt += x
if x == 0:
break
return cnt
def testDisplay(self):
while gc.collect():
pass
display = gtk.gdk.Display(None)
if gobject.pygobject_version >= (2,13):
dispref = display.weak_ref()
del display
self.assertEqual(dispref(), None)
else:
del display
self.assertEquals(self._collect(), 1)
display = gtk.gdk.Display(None)
self.assertEquals(display.__grefcount__, 1)
display.close()
self.assertEquals(display.__grefcount__, 1)
if gobject.pygobject_version >= (2,13):
dispref = display.weak_ref()
del display
self.assertEqual(dispref(), None)
else:
del display
self.assertEquals(self._collect(), 1)
| lgpl-2.1 | -1,882,351,300,071,696,100 | 28.915663 | 73 | 0.499799 | false |
encbladexp/makeblog | makeblog/test_tools.py | 1 | 2244 | # makeblog - A simple offline Blog.
# Copyright (C) 2013-2019 Stefan J. Betz <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from os import access, rmdir, mkdir, unlink, F_OK
from makeblog.tools import slugify, directorymaker, newfile, parse_args
class TestSlugify(TestCase):
def test_simpleslug(self):
self.assertEqual(slugify("Test"), "test")
def test_umlaute(self):
self.assertEqual(slugify("ä"), "ae")
self.assertEqual(slugify("ü"), "ue")
self.assertEqual(slugify("ö"), "oe")
self.assertEqual(slugify("ß"), "ss")
def test_whitespace(self):
self.assertEqual(slugify(" "), "-")
def test_slug_exclude(self):
self.assertEqual(slugify(":"), "")
class TestDirectorymaker(TestCase):
def test_simpledir(self):
self.assertEqual(directorymaker("test"), "dst/test")
self.assertTrue(access("dst", F_OK))
self.assertFalse(access("dst/test", F_OK))
def tearDown(self):
rmdir("dst")
class TestNewfile(TestCase):
def setUp(self):
mkdir("posts")
mkdir("drafts")
parse_args()
def test_newfile(self):
self.assertEqual(newfile("test"), "posts/1-test.html")
with open("posts/1-test.html", "w") as f:
f.write("test")
self.assertEqual(newfile("test"), "posts/2-test.html")
def test_draft(self):
self.assertEqual(newfile("test", True), "drafts/1-test.html")
def tearDown(self):
if access("posts/1-test.html", F_OK):
unlink("posts/1-test.html")
rmdir("posts")
rmdir("drafts")
| gpl-3.0 | -8,729,568,014,015,002,000 | 32.432836 | 71 | 0.658036 | false |
MatthewShao/mitmproxy | mitmproxy/tools/console/keybindings.py | 8 | 4686 | import urwid
import blinker
import textwrap
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import signals
HELP_HEIGHT = 5
keybinding_focus_change = blinker.Signal()
class KeyItem(urwid.WidgetWrap):
def __init__(self, walker, binding, focused):
self.walker, self.binding, self.focused = walker, binding, focused
super().__init__(None)
self._w = self.get_widget()
def get_widget(self):
cmd = textwrap.dedent(self.binding.command).strip()
parts = [
(4, urwid.Text([("focus", ">> " if self.focused else " ")])),
(10, urwid.Text([("title", self.binding.key)])),
(12, urwid.Text([("highlight", "\n".join(self.binding.contexts))])),
urwid.Text([("text", cmd)]),
]
return urwid.Columns(parts)
def get_edit_text(self):
return self._w[1].get_edit_text()
def selectable(self):
return True
def keypress(self, size, key):
return key
class KeyListWalker(urwid.ListWalker):
def __init__(self, master):
self.master = master
self.index = 0
self.focusobj = None
self.bindings = list(master.keymap.list("all"))
self.set_focus(0)
signals.keybindings_change.connect(self.sig_modified)
def sig_modified(self, sender):
self.bindings = list(self.master.keymap.list("all"))
self.set_focus(min(self.index, len(self.bindings) - 1))
self._modified()
def get_edit_text(self):
return self.focus_obj.get_edit_text()
def _get(self, pos):
binding = self.bindings[pos]
return KeyItem(self, binding, pos == self.index)
def get_focus(self):
return self.focus_obj, self.index
def set_focus(self, index):
binding = self.bindings[index]
self.index = index
self.focus_obj = self._get(self.index)
keybinding_focus_change.send(binding.help or "")
def get_next(self, pos):
if pos >= len(self.bindings) - 1:
return None, None
pos = pos + 1
return self._get(pos), pos
def get_prev(self, pos):
pos = pos - 1
if pos < 0:
return None, None
return self._get(pos), pos
class KeyList(urwid.ListBox):
def __init__(self, master):
self.master = master
self.walker = KeyListWalker(master)
super().__init__(self.walker)
def keypress(self, size, key):
if key == "m_select":
foc, idx = self.get_focus()
# Act here
elif key == "m_start":
self.set_focus(0)
self.walker._modified()
elif key == "m_end":
self.set_focus(len(self.walker.bindings) - 1)
self.walker._modified()
return super().keypress(size, key)
class KeyHelp(urwid.Frame):
def __init__(self, master):
self.master = master
super().__init__(self.widget(""))
self.set_active(False)
keybinding_focus_change.connect(self.sig_mod)
def set_active(self, val):
h = urwid.Text("Key Binding Help")
style = "heading" if val else "heading_inactive"
self.header = urwid.AttrWrap(h, style)
def widget(self, txt):
cols, _ = self.master.ui.get_cols_rows()
return urwid.ListBox(
[urwid.Text(i) for i in textwrap.wrap(txt, cols)]
)
def sig_mod(self, txt):
self.set_body(self.widget(txt))
class KeyBindings(urwid.Pile, layoutwidget.LayoutWidget):
title = "Key Bindings"
keyctx = "keybindings"
def __init__(self, master):
oh = KeyHelp(master)
super().__init__(
[
KeyList(master),
(HELP_HEIGHT, oh),
]
)
self.master = master
def get_focused_binding(self):
if self.focus_position != 0:
return None
f = self.widget_list[0]
return f.walker.get_focus()[0].binding
def keypress(self, size, key):
if key == "m_next":
self.focus_position = (
self.focus_position + 1
) % len(self.widget_list)
self.widget_list[1].set_active(self.focus_position == 1)
key = None
# This is essentially a copypasta from urwid.Pile's keypress handler.
# So much for "closed for modification, but open for extension".
item_rows = None
if len(size) == 2:
item_rows = self.get_item_rows(size, focus = True)
i = self.widget_list.index(self.focus_item)
tsize = self.get_item_size(size, i, True, item_rows)
return self.focus_item.keypress(tsize, key)
| mit | 5,222,438,098,474,063,000 | 28.471698 | 80 | 0.569996 | false |
Nick-Hall/gramps | gramps/gen/plug/docgen/paragraphstyle.py | 5 | 11461 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .fontstyle import FontStyle
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".paragraphstyle")
#-------------------------------------------------------------------------
#
# Paragraph alignment
#
#-------------------------------------------------------------------------
PARA_ALIGN_CENTER = 0
PARA_ALIGN_LEFT = 1
PARA_ALIGN_RIGHT = 2
PARA_ALIGN_JUSTIFY = 3
#------------------------------------------------------------------------
#
# ParagraphStyle
#
#------------------------------------------------------------------------
class ParagraphStyle:
"""
Defines the characteristics of a paragraph. The characteristics are:
font (a :class:`.FontStyle` instance), right margin, left margin,
first indent, top margin, bottom margin, alignment, level, top border,
bottom border, right border, left border, padding, and background color.
"""
def __init__(self, source=None):
"""
:param source: if not None, then the ParagraphStyle is created using the
values of the source instead of the default values.
"""
if source:
self.font = FontStyle(source.font)
self.rmargin = source.rmargin
self.lmargin = source.lmargin
self.first_indent = source.first_indent
self.tmargin = source.tmargin
self.bmargin = source.bmargin
self.align = source.align
self.level = source.level
self.top_border = source.top_border
self.bottom_border = source.bottom_border
self.right_border = source.right_border
self.left_border = source.left_border
self.pad = source.pad
self.bgcolor = source.bgcolor
self.description = source.description
self.tabs = source.tabs
else:
self.font = FontStyle()
self.rmargin = 0
self.lmargin = 0
self.tmargin = 0
self.bmargin = 0
self.first_indent = 0
self.align = PARA_ALIGN_LEFT
self.level = 0
self.top_border = 0
self.bottom_border = 0
self.right_border = 0
self.left_border = 0
self.pad = 0
self.bgcolor = (255, 255, 255)
self.description = ""
self.tabs = []
def set_description(self, text):
"""
Set the description of the paragraph
"""
self.description = text
def get_description(self):
"""
Return the description of the paragraph
"""
return self.description
def set(self, rmargin=None, lmargin=None, first_indent=None,
tmargin=None, bmargin=None, align=None,
tborder=None, bborder=None, rborder=None, lborder=None,
pad=None, bgcolor=None, font=None):
"""
Allows the values of the object to be set.
:param rmargin: right indent in centimeters
:param lmargin: left indent in centimeters
:param first_indent: first line indent in centimeters
:param tmargin: space above paragraph in centimeters
:param bmargin: space below paragraph in centimeters
:param align: alignment type (PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER, or PARA_ALIGN_JUSTIFY)
:param tborder: non zero indicates that a top border should be used
:param bborder: non zero indicates that a bottom border should be used
:param rborder: non zero indicates that a right border should be used
:param lborder: non zero indicates that a left border should be used
:param pad: padding in centimeters
:param bgcolor: background color of the paragraph as an RGB tuple.
:param font: FontStyle instance that defines the font
"""
if font is not None:
self.font = FontStyle(font)
if pad is not None:
self.set_padding(pad)
if tborder is not None:
self.set_top_border(tborder)
if bborder is not None:
self.set_bottom_border(bborder)
if rborder is not None:
self.set_right_border(rborder)
if lborder is not None:
self.set_left_border(lborder)
if bgcolor is not None:
self.set_background_color(bgcolor)
if align is not None:
self.set_alignment(align)
if rmargin is not None:
self.set_right_margin(rmargin)
if lmargin is not None:
self.set_left_margin(lmargin)
if first_indent is not None:
self.set_first_indent(first_indent)
if tmargin is not None:
self.set_top_margin(tmargin)
if bmargin is not None:
self.set_bottom_margin(bmargin)
def set_header_level(self, level):
"""
Set the header level for the paragraph. This is useful for
numbered paragraphs. A value of 1 indicates a header level
format of X, a value of two implies X.X, etc. A value of zero
means no header level.
"""
self.level = level
def get_header_level(self):
"Return the header level of the paragraph"
return self.level
def set_font(self, font):
"""
Set the font style of the paragraph.
:param font: :class:`.FontStyle` object containing the font definition
to use.
"""
self.font = FontStyle(font)
def get_font(self):
"Return the :class:`.FontStyle` of the paragraph"
return self.font
def set_padding(self, val):
"""
Set the paragraph padding in centimeters
:param val: floating point value indicating the padding in centimeters
"""
self.pad = val
def get_padding(self):
"""Return a the padding of the paragraph"""
return self.pad
def set_top_border(self, val):
"""
Set the presence or absence of top border.
:param val: True indicates a border should be used, False indicates
no border.
"""
self.top_border = val
def get_top_border(self):
"Return 1 if a top border is specified"
return self.top_border
def set_bottom_border(self, val):
"""
Set the presence or absence of bottom border.
:param val: True indicates a border should be used, False
indicates no border.
"""
self.bottom_border = val
def get_bottom_border(self):
"Return 1 if a bottom border is specified"
return self.bottom_border
def set_left_border(self, val):
"""
Set the presence or absence of left border.
:param val: True indicates a border should be used, False
indicates no border.
"""
self.left_border = val
def get_left_border(self):
"Return 1 if a left border is specified"
return self.left_border
def set_right_border(self, val):
"""
Set the presence or absence of rigth border.
:param val: True indicates a border should be used, False
indicates no border.
"""
self.right_border = val
def get_right_border(self):
"Return 1 if a right border is specified"
return self.right_border
def get_background_color(self):
"""
Return a tuple indicating the RGB components of the background
color
"""
return self.bgcolor
def set_background_color(self, color):
"""
Set the background color of the paragraph.
:param color: tuple representing the RGB components of a color
(0,0,0) to (255,255,255)
"""
self.bgcolor = color
def set_alignment(self, align):
"""
Set the paragraph alignment.
:param align: PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER,
or PARA_ALIGN_JUSTIFY
"""
self.align = align
def get_alignment(self):
"Return the alignment of the paragraph"
return self.align
def get_alignment_text(self):
"""
Return a text string representing the alignment, either 'left',
'right', 'center', or 'justify'
"""
if self.align == PARA_ALIGN_LEFT:
return "left"
elif self.align == PARA_ALIGN_CENTER:
return "center"
elif self.align == PARA_ALIGN_RIGHT:
return "right"
elif self.align == PARA_ALIGN_JUSTIFY:
return "justify"
return "unknown"
def set_left_margin(self, value):
"sets the left indent in centimeters"
self.lmargin = value
def set_right_margin(self, value):
"sets the right indent in centimeters"
self.rmargin = value
def set_first_indent(self, value):
"sets the first line indent in centimeters"
self.first_indent = value
def set_top_margin(self, value):
"sets the space above paragraph in centimeters"
self.tmargin = value
def set_bottom_margin(self, value):
"sets the space below paragraph in centimeters"
self.bmargin = value
def get_left_margin(self):
"returns the left indent in centimeters"
return self.lmargin
def get_right_margin(self):
"returns the right indent in centimeters"
return self.rmargin
def get_first_indent(self):
"returns the first line indent in centimeters"
return self.first_indent
def get_top_margin(self):
"returns the space above paragraph in centimeters"
return self.tmargin
def get_bottom_margin(self):
"returns the space below paragraph in centimeters"
return self.bmargin
def set_tabs(self, tab_stops):
assert isinstance(tab_stops, list)
self.tabs = tab_stops
def get_tabs(self):
return self.tabs
| gpl-2.0 | 3,227,552,544,895,539,700 | 31.933908 | 114 | 0.566966 | false |
MPIBGC-TEE/CompartmentalSystems | src/CompartmentalSystems/helpers_reservoir.py | 1 | 44364 | # vim:set ff=unix expandtab ts=4 sw=4:
from typing import Callable, Tuple, Sequence, Set, Dict
from functools import lru_cache, _CacheInfo, _lru_cache_wrapper
import numpy as np
import matplotlib.pyplot as plt
import inspect
from collections import namedtuple
from numbers import Number
from scipy.integrate import odeint, quad
from scipy.interpolate import lagrange
from scipy.optimize import brentq
from scipy.stats import norm
from string import Template
from sympy import (
gcd,
diag,
lambdify,
DiracDelta,
solve,
Matrix,
Symbol,
Expr,
diff,
simplify,
eye
)
from sympy.polys.polyerrors import PolynomialError
from sympy.core.function import UndefinedFunction, Function, sympify
from sympy import Symbol
from collections.abc import Iterable
import networkx as nx
import igraph as ig
from frozendict import frozendict
from .BlockOde import BlockOde
from .myOdeResult import solve_ivp_pwc
ALPHA_14C = 1.18e-12
DECAY_RATE_14C_YEARLY = np.log(2) / 5730
DECAY_RATE_14C_DAILY = DECAY_RATE_14C_YEARLY / 365.25
def combine(m1, m2, m1_to_m2, m2_to_m1, intersect=False):
m1_sv_set, m1_in_fluxes, m1_out_fluxes, m1_internal_fluxes = m1
m2_sv_set, m2_in_fluxes, m2_out_fluxes, m2_internal_fluxes = m2
intersect_sv_set = m1_sv_set & m2_sv_set
if intersect_sv_set and not intersect:
raise(ValueError("How to handle pools %s?" % str(intersect_sv_set)))
sv_set = m1_sv_set | m2_sv_set
# create external in_fluxes
in_fluxes = dict()
# add all external in_fluxes of m1
for k, v in m1_in_fluxes.items():
if k in in_fluxes.keys():
in_fluxes[k] += v
else:
in_fluxes[k] = v
# remove flux from in_flux if it becomes internal
for pool_to in in_fluxes.keys():
for (pool_from, a), flux in m2_to_m1.items():
if a == pool_to:
in_fluxes[pool_to] -= flux
# add all external in_fluxes of m2
for k, v in m2_in_fluxes.items():
if k in in_fluxes.keys():
in_fluxes[k] += v
else:
in_fluxes[k] = v
# remove flux from in_flux if it becomes internal
for pool_to in in_fluxes.keys():
for (pool_from, a), flux in m1_to_m2.items():
if a == pool_to:
in_fluxes[pool_to] -= flux
# create external out_fluxes
out_fluxes = dict()
# add all external out_fluxes from m1
for k, v in m1_out_fluxes.items():
if k in out_fluxes.keys():
out_fluxes[k] += v
else:
out_fluxes[k] = v
# remove flux from out_flux if it becomes internal
for pool_from in out_fluxes.keys():
for (a, pool_to), flux in m1_to_m2.items():
if a == pool_from:
out_fluxes[pool_from] -= flux
# add all external out_fluxes from m2
for k, v in m2_out_fluxes.items():
if k in out_fluxes.keys():
out_fluxes[k] += v
else:
out_fluxes[k] = v
# remove flux from out_flux if it becomes internal
for pool_from in out_fluxes.keys():
for (a, pool_to), flux in m2_to_m1.items():
if a == pool_from:
out_fluxes[pool_from] -= flux
# create internal fluxes
internal_fluxes = dict()
dicts = [m1_internal_fluxes, m2_internal_fluxes, m1_to_m2, m2_to_m1]
for d in dicts:
for k, v in d.items():
if k in internal_fluxes.keys():
internal_fluxes[k] += v
else:
internal_fluxes[k] = v
# overwrite in_fluxes and out_fluxes for intersection pools
for sv in intersect_sv_set:
in_fluxes[sv] = intersect[0][sv]
out_fluxes[sv] = intersect[1][sv]
clean_in_fluxes = {k: v for k, v in in_fluxes.items() if v != 0}
clean_out_fluxes = {k: v for k, v in out_fluxes.items() if v != 0}
clean_internal_fluxes = {k: v for k, v in internal_fluxes.items() if v != 0}
return sv_set, clean_in_fluxes, clean_out_fluxes, clean_internal_fluxes
def extract(m, sv_set, ignore_other_pools=False, supersede=False):
m_sv_set, m_in_fluxes, m_out_fluxes, m_internal_fluxes = m
assert(sv_set.issubset(m_sv_set))
in_fluxes = {pool: flux for pool, flux in m_in_fluxes.items() if pool in sv_set}
out_fluxes = {pool: flux for pool, flux in m_out_fluxes.items() if pool in sv_set}
internal_fluxes = {
(pool_from, pool_to): flux
for (pool_from, pool_to), flux in m_internal_fluxes.items()
if (pool_from in sv_set) and (pool_to in sv_set)
}
for (pool_from, pool_to), flux in m_internal_fluxes.items():
# internal flux becomes influx if not ignored
if not ignore_other_pools:
if (pool_from not in sv_set) and (pool_to in sv_set):
if pool_to in in_fluxes.keys():
in_fluxes[pool_to] += flux
else:
in_fluxes[pool_to] = flux
# internal flux becomes outflux if not ignored
if not ignore_other_pools:
if (pool_from in sv_set) and (pool_to not in sv_set):
if pool_from in out_fluxes.keys():
out_fluxes[pool_from] += flux
else:
out_fluxes[pool_from] = flux
# overwrite in_fluxes and out_fluxes if desired
if supersede:
for sv, flux in supersede[0].items():
in_fluxes[sv] = flux
for sv, flux in supersede[1].items():
out_fluxes[sv] = flux
for (pool_from, pool_to), flux in supersede[2].items():
internal_fluxes[pool_from, pool_to] = flux
clean_in_fluxes = {k: v for k, v in in_fluxes.items() if v != 0}
clean_out_fluxes = {k: v for k, v in out_fluxes.items() if v != 0}
clean_internal_fluxes = {k: v for k, v in internal_fluxes.items() if v != 0}
return sv_set, clean_in_fluxes, clean_out_fluxes, clean_internal_fluxes
def nxgraphs(
state_vector: Tuple[Symbol],
in_fluxes: Dict[Symbol, Expr],
internal_fluxes: Dict[Tuple[Symbol, Symbol], Expr],
out_fluxes: Dict[Symbol, Expr],
) -> nx.DiGraph:
G = nx.DiGraph()
node_names = [str(sv) for sv in state_vector]
G.add_nodes_from(node_names)
in_flux_targets, out_flux_sources = [
[str(k) for k in d.keys()]
for d in (in_fluxes, out_fluxes)
]
virtual_in_flux_sources = ["virtual_in_" + str(t) for t in in_flux_targets]
for n in virtual_in_flux_sources:
G.add_node(n, virtual=True)
for i in range(len(in_flux_targets)):
G.add_edge(
virtual_in_flux_sources[i],
in_flux_targets[i],
expr=in_fluxes[Symbol(in_flux_targets[i])]
)
virtual_out_flux_targets = [
"virtual_out_" + str(t)
for t in out_flux_sources
]
for n in virtual_out_flux_targets:
G.add_node(n, virtual=True)
for i in range(len(out_flux_sources)):
G.add_edge(
out_flux_sources[i],
virtual_out_flux_targets[i],
expr=out_fluxes[Symbol(out_flux_sources[i])]
)
#for c in internal_connections:
for c in internal_fluxes.keys():
G.add_edge(str(c[0]), str(c[1]),expr=internal_fluxes[c])
return G
def igraph_func_plot(
Gnx: nx.DiGraph, # note that Gnx has to have a 'virtual' attribute on some verteces
node_color_func: Callable[[nx.DiGraph,str],str],
edge_color_func: Callable[[nx.DiGraph,str,str],str],
) -> ig.drawing.Plot:
G = ig.Graph.from_networkx(Gnx)
vertex_size = [1 if v['virtual'] else 50 for v in G.vs]
vertex_color= [node_color_func(Gnx,v) for v in Gnx.nodes]
vertex_label = [v['_nx_name'] if not v['virtual'] else '' for v in G.vs]
edge_color = [edge_color_func(Gnx,s,t) for s, t in Gnx.edges]
edge_label= [Gnx.get_edge_data(s,t)['expr'] for s, t in Gnx.edges]
layout = G.layout('sugiyama')
pl = ig.plot(
G,
layout=layout,
vertex_size=vertex_size,
vertex_label=vertex_label,
vertex_color=vertex_color,
vertex_label_size=9,
edge_color=edge_color,
edge_label=edge_label,
edge_label_size=4,
)
return pl
def igraph_plot(
state_vector: Matrix,
in_fluxes: frozendict,
internal_fluxes: frozendict,
out_fluxes: frozendict
) -> ig.drawing.Plot:
Gnx = nxgraphs(state_vector, in_fluxes, internal_fluxes, out_fluxes)
def n_color(
G: nx.DiGraph,
node_name: str
) -> str:
return 'grey'
def e_color(
G: nx.DiGraph,
s: str,
t: str
) -> str:
return "blue" if G.in_degree(s) ==0 else (
'red' if G.out_degree(t) == 0 else 'black'
)
return igraph_func_plot(
Gnx,
node_color_func=n_color,
edge_color_func=e_color
)
def igraph_part_plot(
state_vector: Tuple[Symbol],
in_fluxes: Dict[Symbol, Expr],
internal_fluxes: Dict[Tuple[Symbol, Symbol], Expr],
out_fluxes: Dict[Symbol, Expr],
part_dict: Dict[Set[str], str]
) -> ig.drawing.Plot:
Gnx = nxgraphs(state_vector, in_fluxes, internal_fluxes, out_fluxes)
def n_color(G,node_name):
cs=set({})
for var_set, color in part_dict.items():
var_set_str = frozenset({str(v) for v in var_set})
# we could have multicolored nodes if the variable set overlap
# but igraph does not support it
cs = cs.union(set({color})) if node_name in var_set_str else cs
return 'grey' if len(cs) == 0 else list(cs)[0]
def e_color(
G: nx.DiGraph,
s: str,
t: str
) -> str:
return "blue" if G.in_degree(s) ==0 else (
'red' if G.out_degree(t) == 0 else 'black'
)
return igraph_func_plot(
Gnx,
node_color_func=n_color,
edge_color_func=e_color
)
def to_int_keys_1(flux_by_sym, state_vector):
return {list(state_vector).index(k):v for k,v in flux_by_sym.items()}
def to_int_keys_2(fluxes_by_sym_tup, state_vector):
return{
(list(state_vector).index(k[0]),list(state_vector).index(k[1])):v
for k,v in fluxes_by_sym_tup.items()
}
def in_or_out_flux_tuple(
state_vector,
in_or_out_fluxes_by_symbol
):
keys = in_or_out_fluxes_by_symbol.keys()
def f(ind):
v = state_vector[ind]
return in_or_out_fluxes_by_symbol[v] if v in keys else 0
return Matrix([f(ind) for ind in range(len(state_vector))])
def release_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
):
decomp_rates = []
for pool in range(len(state_vector)):
if pool in out_fluxes_by_index.keys():
decomp_flux = out_fluxes_by_index[pool]
else:
decomp_flux = 0
decomp_flux += sum([flux for (i,j), flux in internal_fluxes_by_index.items()
if i == pool])
decomp_rates.append(simplify(decomp_flux/state_vector[pool]))
R = diag(*decomp_rates)
return R
def release_operator_2(
out_fluxes_by_symbol,
internal_fluxes_by_symbol,
state_vector
):
return release_operator_1(
to_int_keys_1(out_fluxes_by_symbol, state_vector),
to_int_keys_2(internal_fluxes_by_symbol,state_vector),
state_vector
)
def tranfer_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
):
R = release_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
)
# calculate transition operator
return transfer_operator_3(
internal_fluxes_by_index,
R,
state_vector
)
def transfer_operator_2(
out_fluxes_by_symbol,
internal_fluxes_by_symbol,
state_vector
):
return tranfer_operator_1(
to_int_keys_1( out_fluxes_by_symbol, state_vector),
to_int_keys_2( internal_fluxes_by_symbol, state_vector),
state_vector
)
def transfer_operator_3(
# this is just a shortcut if we know R already
internal_fluxes_by_index,
release_operator,
state_vector
):
# calculate transition operator
T = -eye(len(state_vector))
for (i, j), flux in internal_fluxes_by_index.items():
T[j, i] = flux/state_vector[i]/release_operator[i, i]
return T
def compartmental_matrix_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
):
C = -1*release_operator_1(
out_fluxes_by_index,
internal_fluxes_by_index,
state_vector
)
for (i, j), flux in internal_fluxes_by_index.items():
C[j, i] = flux/state_vector[i]
return C
def compartmental_matrix_2(
out_fluxes_by_symbol,
internal_fluxes_by_symbol,
state_vector
):
return compartmental_matrix_1(
to_int_keys_1( out_fluxes_by_symbol, state_vector),
to_int_keys_2( internal_fluxes_by_symbol, state_vector),
state_vector
)
def in_fluxes_by_index(state_vector, u):
return {
pool_nr: u[pool_nr]
for pool_nr in range(state_vector.rows)
}
def in_fluxes_by_symbol(state_vector,u):
return {
state_vector[pool_nr]: u[pool_nr]
for pool_nr in range(state_vector.rows)
if u[pool_nr] != 0
}
def out_fluxes_by_index(state_vector,B):
output_fluxes = dict()
# calculate outputs
for pool in range(state_vector.rows):
outp = -sum(B[:, pool]) * state_vector[pool]
s_outp = simplify(outp)
if s_outp:
output_fluxes[pool] = s_outp
return output_fluxes
def out_fluxes_by_symbol(state_vector,B):
fbi = out_fluxes_by_index(state_vector,B)
return {
state_vector[pool_nr]: flux
for pool_nr, flux in fbi.items()
}
def internal_fluxes_by_index(state_vector,B):
# calculate internal fluxes
internal_fluxes = dict()
pipes = [(i,j) for i in range(state_vector.rows)
for j in range(state_vector.rows) if i != j]
for pool_from, pool_to in pipes:
flux = B[pool_to, pool_from] * state_vector[pool_from]
s_flux = simplify(flux)
if s_flux:
internal_fluxes[(pool_from, pool_to)] = s_flux
return internal_fluxes
def internal_fluxes_by_symbol(state_vector,B):
fbi = internal_fluxes_by_index(state_vector,B)
return {
(state_vector[tup[0]],state_vector[tup[1]]): flux
for tup,flux in fbi.items()
}
#def fluxes_by_symbol(state_vector, fluxes_by_index):
# internal_fluxes, out_fluxes = fluxes_by_index
def warning(txt):
print('############################################')
calling_frame = inspect.getouterframes(inspect.currentframe(), 2)
func_name = calling_frame[1][3]
print("Warning in function {0}:".format(func_name))
print(txt)
def deprecation_warning(txt):
print('############################################')
calling_frame = inspect.getouterframes(inspect.currentframe(), 2)
func_name = calling_frame[1][3]
print("The function {0} is deprecated".format(func_name))
print(txt)
def flux_dict_string(d, indent=0):
s = ""
for k, val in d.items():
s += ' '*indent+str(k)+": "+str(val)+"\n"
return s
def func_subs(t, Func_expr, func, t0):
"""
returns the function part_func
where part_func(_,_,...) =func(_,t=t0,_..) (func partially applied to t0)
The position of argument t in the argument list is found
by examining the Func_expression argument.
Args:
t (sympy.symbol): the symbol to be replaced by t0
t0 (value) : the value to which the function will be applied
func (function) : a python function
Func_exprs (sympy.Function) : An expression for an undefined Function
"""
assert(isinstance(type(Func_expr), UndefinedFunction))
pos = Func_expr.args.index(t)
def frozen(*args):
# tuples are immutable
L = list(args)
L.insert(pos, t0)
new_args = tuple(L)
return func(*new_args)
return frozen
def jacobian(vec, state_vec):
dim1 = vec.rows
dim2 = state_vec.rows
return Matrix(dim1, dim2, lambda i, j: diff(vec[i], state_vec[j]))
# fixme: test
def has_pw(expr):
if expr.is_Matrix:
for c in list(expr):
if has_pw(c):
return True
return False
if expr.is_Piecewise:
return True
for a in expr.args:
if has_pw(a):
return True
return False
def is_DiracDelta(expr):
"""Check if expr is a Dirac delta function."""
if len(expr.args) != 1:
return False
arg = expr.args[0]
return DiracDelta(arg) == expr
def parse_input_function(u_i, time_symbol):
"""Return an ordered list of jumps in the input function u.
Args:
u (SymPy expression): input function in :math:`\\dot{x} = B\\,x + u`
Returns:
ascending list of jumps in u
"""
impulse_times = []
pieces = []
def rek(expr, imp_t, p):
if hasattr(expr, 'args'):
for arg in expr.args:
if is_DiracDelta(arg):
dirac_arg = arg.args[0]
zeros = solve(dirac_arg)
imp_t += zeros
if arg.is_Piecewise:
for pw_arg in arg.args:
cond = pw_arg[1]
# 'if not cond' led to strange behavior
if cond != True: # noqa: E712
atoms = cond.args
zeros = solve(atoms[0] - atoms[1])
p += zeros
rek(arg, imp_t, p)
rek(u_i, impulse_times, pieces)
impulses = []
impulse_times = sorted(impulse_times)
for impulse_time in impulse_times:
intensity = u_i.coeff(DiracDelta(impulse_time-time_symbol))
impulses.append({'time': impulse_time, 'intensity': intensity})
jump_times = sorted(pieces + impulse_times)
return (impulses, jump_times)
def factor_out_from_matrix(M):
if has_pw(M):
return(1)
try:
return gcd(list(M))
except(PolynomialError):
# print('no factoring out possible')
# fixme: does not work if a function of X, t is in the expressios,
# we could make it work...if we really wanted to
return 1
def numerical_function_from_expression(expr, tup, parameter_dict, func_set):
# the function returns a function that given numeric arguments
# returns a numeric result.
# This is a more specific requirement than a function returned by lambdify
# which can still return symbolic
# results if the tuple argument to lambdify does not contain all free
# symbols of the lambdified expression.
# To avoid this case here we check this.
expr_par = expr.subs(parameter_dict)
ss_expr = expr_par.free_symbols
ss_tup = set([s for s in tup])
if not(ss_expr.issubset(ss_tup)):
raise Exception(
"""The free symbols of the expression: ${0} are not a subset of
the symbols in the tuple argument:${1}
""".format(ss_expr, ss_tup))
cut_func_set = make_cut_func_set(func_set)
# expr_par=expr.subs(parameter_dict)
expr_func = lambdify(tup, expr_par, modules=[cut_func_set, 'numpy'])
def expr_func_safe_0_over_0(*val):
with np.errstate(invalid='raise'):
try:
res = expr_func(*val)
except FloatingPointError as e:
if e.args[0] == 'invalid value encountered in double_scalars':
with np.errstate(invalid='ignore'):
res = expr_func(*val)
res = np.nan_to_num(res, copy=False)
return res
return expr_func_safe_0_over_0
def numerical_rhs(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_dict
):
FL = numerical_function_from_expression(
rhs,
(time_symbol,)+tuple(state_vector),
parameter_dict,
func_dict
)
# 2.) Write a wrapper that transformes Matrices to numpy.ndarrays and
# accepts array instead of the separate arguments for the states)
def num_rhs(t, X):
# we need the arguments to be numpy arrays to be able to catch 0/0
# Holger: I made 0/0 being caught by a subfunction in
# numerical_function_from_expression
# Y = (np.array([x]) for x in X) # Markus' version, lead to
# deprecation warnings, so I rewrote it to (Holger):
Y = np.array([x for x in X]) #
Fval = FL(t, *Y)
return Fval.reshape(X.shape,)
return num_rhs
def numerical_rhs_old(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_set,
times
):
FL = numerical_function_from_expression(
rhs,
tuple(state_vector) + (time_symbol,),
parameter_dict,
func_set
)
# 2.) Write a wrapper that transformes Matrices numpy.ndarrays and accepts
# array instead of the separate arguments for the states)
def num_rhs(X, t):
Fval = FL(*X, t)
return Fval.reshape(X.shape,)
def bounded_num_rhs(X, t):
# fixme 1:
# maybe odeint (or another integrator)
# can be told >>not<< to look outside
# the interval
# fixme 2:
# actually the times vector is not the smallest
# possible allowed set but the intersection of
# all the intervals where the
# time dependent functions are defined
# this should be tested in init
t_max = times[-1]
# fixme: we should die hard here, because now we think we can compute
# the state transition operator till any time in the future,
# but it is actually biased by the fact, that we use the last value
# over and over again
# and hence assume some "constant" future
if t > t_max:
res = num_rhs(X, t_max)
else:
res = num_rhs(X, t)
# print('brhs', 't', t, 'X', X, 'res', res)
# print('t', t)
return res
return bounded_num_rhs
def numsol_symbolic_system_old(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_set,
start_values,
times
):
nr_pools = len(state_vector)
if times[0] == times[-1]:
return start_values.reshape((1, nr_pools))
num_rhs = numerical_rhs_old(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_set,
times
)
return odeint(num_rhs, start_values, times, mxstep=10000)
def numsol_symbolical_system(
state_vector,
time_symbol,
rhs,
parameter_dicts,
func_dicts,
start_values,
times,
disc_times=()
):
assert(isinstance(parameter_dicts, Iterable))
assert(isinstance(func_dicts, Iterable))
nr_pools = len(state_vector)
t_min = times[0]
t_max = times[-1]
if times[0] == times[-1]:
return start_values.reshape((1, nr_pools))
num_rhss = tuple(
numerical_rhs(
state_vector,
time_symbol,
rhs,
parameter_dict,
func_dict
)
for parameter_dict, func_dict in zip(parameter_dicts, func_dicts)
)
res = solve_ivp_pwc(
rhss=num_rhss,
t_span=(t_min, t_max),
y0=start_values,
t_eval=tuple(times),
disc_times=disc_times
)
# adapt to the old ode_int interface
# since our code at the moment expects it
values = np.rollaxis(res.y, -1, 0)
return (values, res.sol)
def arrange_subplots(n):
if n <= 3:
rows = 1
cols = n
if n == 4:
rows = 2
cols = 2
if n >= 5:
rows = n // 3
if n % 3 != 0:
rows += 1
cols = 3
return (rows, cols)
def melt(ndarr, identifiers=None):
shape = ndarr.shape
if identifiers is None:
identifiers = [range(shape[dim]) for dim in range(len(shape))]
def rek(struct, ids, melted_list, dim):
if type(struct) != np.ndarray:
melted_list.append(ids + [struct])
else:
shape = struct.shape
for k in range(shape[0]):
rek(struct[k], ids + [identifiers[dim][k]], melted_list, dim+1)
melted_list = []
rek(ndarr, [], melted_list, 0)
rows = len(melted_list)
cols = len(melted_list[0])
melted = np.array(melted_list).reshape((rows, cols))
return melted
# fixme: test
# compute inverse of CDF at u for quantiles or generation of random variables
#def generalized_inverse_CDF(CDF, u, start_dist=1e-4, tol=1e-8):
def generalized_inverse_CDF(CDF, u, x1=0.0, tol=1e-8):
y1 = -1
def f(a):
# print("HR 398", x1, y1, u)
return u-CDF(a)
x0 = 0.0
y1 = f(x1)
if (y1 <= 0):
if x1 == 0.0:
# print("schon fertig", "\n"*200)
return x1
else:
x1 = 0.0
y1 = f(x1)
if y1 <= 0:
return x1
# go so far to the right such that CDF(x1) > u, the bisect in
# interval [0, x1]
while y1 >= 0:
x0 = x1
x1 = x1*2 + 0.1
y1 = f(x1)
if np.isnan(y1):
res = np.nan
else:
res, root_results = brentq(f, x0, x1, xtol=tol, full_output=True)
if not root_results.converged:
print("quantile convegence failed")
# if f(res) > tol: res = np.nan
# print('gi_res', res)
# print('finished', method_f.__name__, 'on [0,', x1, ']')
return res
# draw a random variable with given CDF
def draw_rv(CDF):
return generalized_inverse_CDF(CDF, np.random.uniform())
# return function g, such that g(normally distributed sv) is distributed
# according to CDF
def stochastic_collocation_transform(M, CDF):
# collocation points for normal distribution,
# taken from Table 10 in Appendix 3 of Grzelak2015SSRN
cc_data = {
2: [1],
3: [0.0, 1.7321],
4: [0.7420, 2.3344],
5: [0.0, 1.3556, 2.8570],
6: [0.6167, 1.8892, 3.3243],
7: [0.0, 1.1544, 2.3668, 3.7504],
8: [0.5391, 1.6365, 2.8025, 4.1445],
9: [0.0, 1.0233, 2.0768, 3.2054, 4.5127],
10: [0.4849, 1.4660, 2.8463, 3.5818, 4.8595], # noqa: E131
11: [0.0, 0.9289, 1.8760, 2.8651, 3.9362, 5.1880] # noqa: E131
}
if M not in cc_data.keys():
return None
cc_points = [-x for x in reversed(cc_data[M]) if x != 0.0] + cc_data[M]
cc_points = np.array(cc_points)
# print('start computing collocation transform')
ys = np.array(
[generalized_inverse_CDF(CDF, norm.cdf(x)) for x in cc_points]
)
# print('ys', ys)
# print('finished computing collocation transform')
return lagrange(cc_points, ys)
# Metropolis-Hastings sampling for PDFs with nonnegative support
# no thinning, no burn-in period
def MH_sampling(N, PDF, start=1.0):
xvec = np.ndarray((N,))
x = start
PDF_x = PDF(x)
norm_cdf_x = norm.cdf(x)
for i in range(N):
xs = -1.0
while xs <= 0:
xs = x + np.random.normal()
PDF_xs = PDF(xs)
A1 = PDF_xs/PDF_x
norm_cdf_xs = norm.cdf(xs)
A2 = norm_cdf_x/norm_cdf_xs
A = A1 * A2
if np.random.uniform() < A:
x = xs
PDF_x = PDF_xs
norm_cdf_x = norm_cdf_xs
xvec[i] = x
return xvec
def save_csv(filename, melted, header):
np.savetxt(
filename,
melted,
header=header,
delimiter=',',
fmt="%10.8f",
comments=''
)
def load_csv(filename):
return np.loadtxt(filename, skiprows=1, delimiter=',')
def tup2str(tup):
# uses for stoichiometric models
string = Template("${f}_${s}").substitute(f=tup[0], s=tup[1])
return(string)
# use only every (k_1,k_2,...,k_n)th element of the n-dimensional numpy array
# data,
# strides is a list of k_j of length n
# always inlcude first and last elements
def stride(data, strides):
if isinstance(strides, int):
strides = [strides]
index_list = []
for dim in range(data.ndim):
n = data.shape[dim]
stride = strides[dim]
ind = np.arange(0, n, stride).tolist()
if (n-1) % stride != 0:
ind.append(n-1)
index_list.append(ind)
return data[np.ix_(*index_list)]
def is_compartmental(M):
gen = range(M.shape[0])
return all(
[
M.is_square,
all([M[j, j] <= 0 for j in gen]),
all([sum(M[:, j]) <= 0 for j in gen])
]
)
def make_cut_func_set(func_set):
def unify_index(expr):
# for the case Function('f'):f_numeric
if isinstance(expr, UndefinedFunction):
res = str(expr)
# for the case {f(x, y): f_numeric} f(x, y)
elif isinstance(expr, Symbol):
res = str(expr)
elif isinstance(expr, Function):
res = str(type(expr))
elif isinstance(expr, str):
expr = sympify(expr)
res = unify_index(expr)
else:
print(type(expr))
raise(TypeError(
"""
funcset indices should be indexed by instances of
sympy.core.functions.UndefinedFunction
"""
))
return res
cut_func_set = {unify_index(key): val for key, val in func_set.items()}
return cut_func_set
def f_of_t_maker(sol_funcs, ol):
def ot(t):
sv = [sol_funcs[i](t) for i in range(len(sol_funcs))]
tup = tuple(sv) + (t,)
res = ol(*tup)
return res
return ot
def const_of_t_maker(const):
def const_arr_fun(possible_vec_arg):
if isinstance(possible_vec_arg, Number):
return const # also a number
else:
return const*np.ones_like(possible_vec_arg)
return const_arr_fun
def x_phi_ode(
srm,
parameter_dicts,
func_dicts,
x_block_name='x',
phi_block_name='phi',
disc_times=()
):
nr_pools = srm.nr_pools
sol_rhss = []
for pd, fd in zip(parameter_dicts, func_dicts):
sol_rhs = numerical_rhs(
srm.state_vector,
srm.time_symbol,
srm.F,
pd,
fd
)
sol_rhss.append(sol_rhs)
B_sym = srm.compartmental_matrix
tup = (srm.time_symbol,) + tuple(srm.state_vector)
B_funcs_non_lin = []
for pd, fd in zip(parameter_dicts, func_dicts):
B_func_non_lin = numerical_function_from_expression(
B_sym,
tup,
pd,
fd
)
B_funcs_non_lin.append(B_func_non_lin)
def Phi_rhs_maker(B_func_non_lin):
def Phi_rhs(t, x, Phi_2d):
return np.matmul(B_func_non_lin(t, *x), Phi_2d)
return Phi_rhs
Phi_rhss = []
for B_func_non_lin in B_funcs_non_lin:
Phi_rhss.append(Phi_rhs_maker(B_func_non_lin))
functionss = []
for sol_rhs, Phi_rhs in zip(sol_rhss, Phi_rhss):
functions = [
(sol_rhs, [srm.time_symbol.name, x_block_name]),
(Phi_rhs, [srm.time_symbol.name, x_block_name, phi_block_name])
]
functionss.append(functions)
return BlockOde(
time_str=srm.time_symbol.name,
block_names_and_shapes=[
(x_block_name, (nr_pools,)),
(phi_block_name, (nr_pools, nr_pools,))
],
functionss=functionss,
disc_times=disc_times
)
def integrate_array_func_for_nested_boundaries(
f: Callable[[float], np.ndarray],
integrator: Callable[
[
Callable[[float], np.ndarray],
float,
float
],
np.ndarray
], # e.g. array_quad_result
tuples: Sequence[Tuple[float, float]]
) -> Sequence[float]:
# we assume that the first (a,b) tuple contains the second,
# the second the third and so on from outside to inside
def compute(f, tuples, results: Sequence[float]):
(a_out, b_out), *tail = tuples
if len(tail) == 0:
# number=quad(f, a_out, b_out)[0]
arr = integrator(f, a_out, b_out)
else:
(a_in, b_in) = tail[0]
results = compute(f, tail, results)
arr = (
integrator(f, a_out, a_in)
+ results[0]
+ integrator(f, b_in, b_out)
)
results = [arr] + results
return results
return compute(f, tuples, [])
def array_quad_result(
f: Callable[[float], np.ndarray],
a: float,
b: float,
epsrel=1e-3, # epsabs would be a dangerous default
*args,
**kwargs
) -> np.ndarray:
# integrates a vectorvalued function of a single argument
# we transform the result array of the function into a one dimensional
# vector compute the result for every component
# and reshape the result to the form of the integrand
test = f(a)
n = len(test.flatten())
vec = np.array(
[quad(
lambda t:f(t).reshape(n,)[i],
a,
b,
*args,
epsrel=epsrel,
**kwargs
)[0] for i in range(n)]
)
return vec.reshape(test.shape)
def array_integration_by_ode(
f: Callable[[float], np.ndarray],
a: float,
b: float,
*args,
**kwargs
) -> np.ndarray:
# another integrator like array_quad_result
test = f(a)
n = len(test.flatten())
def rhs(tau, X):
# although we do not need X we have to provide a
# righthandside s uitable for solve_ivp
# avoid overshooting if the solver
# tries to look where the integrand might not be defined
if tau < a or tau > b:
return 0
else:
return f(tau).flatten()
ys = solve_ivp_pwc(
rhss=(rhs,),
y0=np.zeros(n),
t_span=(a, b)
).y
val = ys[:, -1].reshape(test.shape)
return val
def array_integration_by_values(
f: Callable[[float], np.ndarray],
taus: np.ndarray,
*args,
**kwargs,
) -> np.ndarray:
# only allow taus as vector
assert(len(taus.shape) == 1)
assert(len(taus) > 0)
test = f(taus[0])
# create a big 2 dimensional array suitable for trapz
integrand_vals = np.stack([f(tau).flatten() for tau in taus], 1)
vec = np.trapz(y=integrand_vals, x=taus)
return vec.reshape(test.shape)
def x_phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name):
x_s = np.array(x_s)
nr_pools = len(x_s)
start_Phi_2d = np.identity(nr_pools)
start_blocks = [
(x_block_name, x_s),
(phi_block_name, start_Phi_2d)
]
blivp = block_ode.blockIvp(start_blocks)
return blivp
def phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name):
blivp = x_phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name)
phi_func = blivp.block_solve_functions(t_span=(s, t_max))[phi_block_name]
return phi_func
@lru_cache()
def x_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name):
blivp = x_phi_tmax(s, t_max, block_ode, x_s, x_block_name, phi_block_name)
x_func = blivp.block_solve_functions(t_span=(s, t_max))[x_block_name]
return x_func
_CacheStats = namedtuple(
'CacheStats',
['hitss', 'missess', 'currsizes', 'hitting_ratios']
)
def custom_lru_cache_wrapper(maxsize=None, typed=False, stats=False):
if stats:
hitss = []
missess = []
currsizes = []
hitting_ratios = []
def decorating_function(user_function):
func = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
def wrapper(*args, **kwargs):
nonlocal stats, hitss, missess, currsizes, hitting_ratios
result = func(*args, **kwargs)
if stats:
hitss.append(func.cache_info().hits)
missess.append(func.cache_info().misses)
currsizes.append(func.cache_info().currsize)
hitting_ratios.append(
round(hitss[-1]/(hitss[-1]+missess[-1])*100.0, 2)
)
return result
wrapper.cache_info = func.cache_info
if stats:
def cache_stats():
nonlocal hitss, missess, currsizes
return _CacheStats(hitss, missess, currsizes, hitting_ratios)
wrapper.cache_stats = cache_stats
def plot_hitss():
nonlocal hitss
plt.plot(range(len(hitss)), hitss)
plt.title('Hitss')
plt.show()
wrapper.plot_hitss = plot_hitss
def plot_hit_history():
nonlocal hitss
plt.scatter(
range(len(hitss)-1),
np.diff(hitss),
s=1,
alpha=0.2
)
plt.title('Hit history')
plt.show()
wrapper.plot_hit_history = plot_hit_history
def plot_hitting_ratios():
nonlocal hitss, hitting_ratios
plt.plot(
range(len(hitss)),
hitting_ratios
)
plt.title('Hitting ratios')
plt.show()
wrapper.plot_hitting_ratios = plot_hitting_ratios
def plot_currsizes():
nonlocal currsizes
plt.plot(
range(len(currsizes)),
currsizes
)
plt.title('Currsizes')
plt.show()
wrapper.plot_currsizes = plot_currsizes
def plot_hitting_ratios_over_currsizes():
nonlocal hitting_ratios, currsizes
plt.plot(
range(len(hitting_ratios)),
[hitting_ratios[i]/currsizes[i]
for i in range(len(hitting_ratios))]
)
plt.title('Hitting ratios over currsizes')
plt.show()
wrapper.plot_hitting_ratios_over_currsizes =\
plot_hitting_ratios_over_currsizes
def plot_hitting_ratios_vs_currsizes():
nonlocal hitting_ratios, currsizes
plt.plot(
currsizes,
hitting_ratios
)
plt.title('Hitting ratios vs currsizes')
plt.show()
wrapper.plot_hitting_ratios_vs_currsizes =\
plot_hitting_ratios_vs_currsizes
def cache_clear():
nonlocal hitss, missess, currsizes
hitss = []
missess = []
currsizes = []
func.cache_clear()
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def print_quantile_error_statisctics(qs_ode, qs_pi):
print('ODE :', ['{: 7.2f}'.format(v) for v in qs_ode])
print('Expl. :', ['{: 7.2f}'.format(v) for v in qs_pi])
abs_err = np.abs(qs_ode-qs_pi)
print('abs. err. :', ['{: 7.2f}'.format(v) for v in abs_err])
rel_err = np.abs(qs_ode-qs_pi)/qs_pi * 100
print('rel. err. (%):', ['{: 7.2f}'.format(v) for v in rel_err])
print()
print('mean abs. err :', '{: 6.2f}'.format(abs_err.mean()))
print('mean rel. err (%):', '{: 6.2f}'.format(rel_err.mean()))
print('max. abs. err :', '{: 6.2f}'.format(np.max(abs_err)))
print('max. rel. err (%):', '{: 6.2f}'.format(np.max(rel_err)))
print()
def net_Fs_from_discrete_Bs_and_xs(Bs, xs):
nr_pools = xs.shape[1]
nt = len(Bs)
net_Fs = np.zeros((nt, nr_pools, nr_pools))
for k in range(nt):
for j in range(nr_pools):
for i in range(nr_pools):
if i != j:
net_Fs[k, i, j] = Bs[k, i, j] * xs[k, j]
return net_Fs
def net_Rs_from_discrete_Bs_and_xs(Bs, xs, decay_corr=None):
nr_pools = xs.shape[1]
nt = len(Bs)
if decay_corr is None:
decay_corr = np.ones((nt,))
net_Rs = np.zeros((nt, nr_pools))
for k in range(nt):
for j in range(nr_pools):
net_Rs[k, j] = (1-sum(Bs[k, :, j])*decay_corr[k]) * xs[k, j]
return net_Rs
def net_Us_from_discrete_Bs_and_xs(Bs, xs):
nr_pools = xs.shape[1]
nt = len(Bs)
net_Us = np.zeros((nt, nr_pools))
for k in range(nt):
net_Us[k] = xs[k+1] - Bs[k] @ xs[k]
return net_Us
def check_parameter_dict_complete(model, parameter_dict, func_set):
"""Check if the parameter set the function set are complete
to enable a model run.
Args:
model (:class:`~.smooth_reservoir_model.SmoothReservoirModel`):
The reservoir model on which the model run bases.
parameter_dict (dict): ``{x: y}`` with ``x`` being a SymPy symbol
and ``y`` being a numerical value.
func_set (dict): ``{f: func}`` with ``f`` being a SymPy symbol and
``func`` being a Python function. Defaults to ``dict()``.
Returns:
free_symbols (set): set of free symbols, parameter_dict is complete if
``free_symbols`` is the empty set
"""
free_symbols = model.F.subs(parameter_dict).free_symbols
# print('fs', free_symbols)
free_symbols -= {model.time_symbol}
# print(free_symbols)
free_symbols -= set(model.state_vector)
# print(free_symbols)
# remove function names, are given as strings
free_names = set([symbol.name for symbol in free_symbols])
func_names = set([key for key in func_set.keys()])
free_names = free_names - func_names
return free_names
def F_Delta_14C(C12, C14, alpha=None):
if alpha is None:
alpha = ALPHA_14C
C12[C12 == 0] = np.nan
return (C14/C12/alpha - 1) * 1000
def densities_to_distributions(
densities: Callable[[float],np.ndarray],
nr_pools: int
)->Callable[[float],np.ndarray]:
def distributions(A: np.float) ->np.ndarray:
return np.array(
[
quad(
lambda a:densities(a)[i],
-np.inf,
A
)[0]
for i in range(nr_pools)
]
)
return distributions
def pool_wise_bin_densities_from_smooth_densities_and_index(
densities: Callable[[float],np.ndarray],
nr_pools: int,
dt: float,
)->Callable[[int],np.ndarray]:
def content(ai:int)->np.ndarray:
da = dt
return np.array(
[
quad(
lambda a:densities(a)[i],
ai*da,
(ai+1)*da
)[0] / da
for i in range(nr_pools)
]
)
return content
def pool_wise_bin_densities_from_smooth_densities_and_indices(
densities: Callable[[float],np.ndarray],
nr_pools: int,
dt: float,
)->Callable[[np.ndarray],np.ndarray]:
bin_density = pool_wise_bin_densities_from_smooth_densities_and_index(
densities,
nr_pools,
dt
)
# vectorize it
def bin_densities(age_bin_indices: np.ndarray)->np.ndarray:
return np.stack(
[
bin_density(ai)
for ai in age_bin_indices
],
axis=1
)
return bin_densities
def negative_indicies_to_zero(
f: Callable[[np.ndarray],np.ndarray]
)->Callable[[np.ndarray],np.ndarray]:
def wrapper(age_bin_indices):
arr_true = f(age_bin_indices)
nr_pools = arr_true.shape[0]
return np.stack(
[
np.where(
age_bin_indices >=0,
arr_true[ip,:],
0
)
for ip in range(nr_pools)
]
)
return wrapper
# make sure that the start age distribution
# yields zero for negative ages or indices
def p0_maker(
start_age_densities_of_bin: Callable[[int],np.ndarray],
):
def p0(ai):
res = start_age_densities_of_bin(ai)
if ai >= 0:
return res
else:
return np.zeros_like(res)
return p0
| mit | 7,113,966,077,673,015,000 | 27.329502 | 88 | 0.557952 | false |
yuxiang-zhou/deepmachine | deepmachine/contrib/training/Recognition.py | 1 | 4013 | # basic library
import os
import shutil
import math
import time
import h5py
import menpo.io as mio
import menpo3d.io as m3io
import numpy as np
from pathlib import Path
from functools import partial
# deepmachine
import keras
import tensorflow as tf
import deepmachine as dm
from deepmachine.utils.machine import multi_gpu_model
# flag definitions
from deepmachine.flags import FLAGS
def format_folder(FLAGS):
post_fix = 'lr{:.5f}_d{:.3f}_b{:03d}'.format(
FLAGS.lr, FLAGS.lr_decay, FLAGS.batch_size
)
logdir = FLAGS.logdir if 'model_' in FLAGS.logdir else "{}/model_{}".format(
FLAGS.logdir, post_fix
)
return logdir
def main():
tf.reset_default_graph()
BATCH_SIZE = FLAGS.batch_size
INPUT_SHAPE = 112
INPUT_CHANNELS = 3
NF = 64
N_CLASSES = 8631
LR = FLAGS.lr
LOGDIR = format_folder(FLAGS)
# Dataset
def build_data():
features = dm.utils.union_dict([
dm.data.provider.features.image_feature(),
dm.data.provider.features.tensor_feature('uv'),
dm.data.provider.features.array_feature('label'),
dm.data.provider.features.lms_feature('landmarks'),
])
dataset = dm.data.provider.TFRecordProvider(
FLAGS.dataset_path,
features,
resolvers={
'image': partial(dm.data.provider.resolvers.image_resolver, output_shape=[INPUT_SHAPE, INPUT_SHAPE]),
'uv': partial(dm.data.provider.resolvers.tensor_resolver, input_shape=[INPUT_SHAPE,INPUT_SHAPE, 2]),
'landmarks': partial(dm.data.provider.resolvers.heatmap_resolver, n_lms=5, output_shape=[INPUT_SHAPE, INPUT_SHAPE]),
'label': partial(dm.data.provider.resolvers.label_resolver, input_shape=[1], n_class=N_CLASSES),
}
)
dataset = dm.data.provider.DatasetQueue(
dataset, n_proccess=FLAGS.no_thread, batch_size=BATCH_SIZE)
tf_data = dataset.get('image', 'uv', 'landmarks', 'label')
# batch_input = tf.concat([
# tf_data['image'], tf_data['uv']
# ], axis=-1)
batch_input = tf_data['image']
label = tf_data['label']
label = tf.squeeze(label)
return [batch_input, label], [label, label]
# Model
def build_model():
input_image = dm.layers.Input(
shape=[INPUT_SHAPE, INPUT_SHAPE, INPUT_CHANNELS], name='input_image')
embeding, softmax = dm.networks.ArcFace(
[input_image], 512, nf=NF, n_classes=N_CLASSES,
batch_norm='BatchNormalization'
)
train_model = dm.DeepMachine(
inputs=[input_image], outputs=[embeding, softmax])
n_gpu = len(FLAGS.gpu.split(','))
if n_gpu > 1:
train_model = multi_gpu_model(train_model, gpus=n_gpu)
def arc_loss(y_true, y_pred, s=64., m1=1., m2=0.3, m3=0.):
# arc feature
arc = y_pred * y_true
arc = tf.acos(arc)
arc = tf.cos(arc * m1 + m2) - m3
arc = arc * s
# softmax
pred_softmax = dm.K.softmax(arc)
return dm.losses.categorical_crossentropy(y_true, pred_softmax)
train_model.compile(
optimizer=dm.optimizers.Adam(lr=LR),
loss=[dm.losses.dummy, arc_loss],
)
return train_model
arcface = build_model()
def lr_sch_fn(epoch):
new_lr = LR
if epoch >= 15:
new_lr /= 10.
if epoch >= 22:
new_lr /= 10.
if epoch >= 26:
new_lr /= 10.
return new_lr
lr_sch = dm.callbacks.LearningRateScheduler(
schedule=lr_sch_fn)
lr_sch.set_model(arcface)
arcface.fit(
build_data(),
epochs=30,
step_per_epoch=N_CLASSES * 50 // BATCH_SIZE,
logdir=LOGDIR,
lr_decay=0,
verbose=2,
callbacks=[
lr_sch
],
)
if __name__ == '__main__':
main()
| mit | 7,890,401,361,579,792,000 | 27.062937 | 132 | 0.570645 | false |
Cinntax/home-assistant | homeassistant/components/alexa/state_report.py | 1 | 5879 | """Alexa state report code."""
import asyncio
import json
import logging
import aiohttp
import async_timeout
from homeassistant.const import MATCH_ALL
from .const import API_CHANGE, Cause
from .entities import ENTITY_ADAPTERS
from .messages import AlexaResponse
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
async def async_enable_proactive_mode(hass, smart_home_config):
"""Enable the proactive mode.
Proactive mode makes this component report state changes to Alexa.
"""
# Validate we can get access token.
await smart_home_config.async_get_access_token()
async def async_entity_state_listener(changed_entity, old_state, new_state):
if not new_state:
return
if new_state.domain not in ENTITY_ADAPTERS:
return
if not smart_home_config.should_expose(changed_entity):
_LOGGER.debug("Not exposing %s because filtered by config", changed_entity)
return
alexa_changed_entity = ENTITY_ADAPTERS[new_state.domain](
hass, smart_home_config, new_state
)
for interface in alexa_changed_entity.interfaces():
if interface.properties_proactively_reported():
await async_send_changereport_message(
hass, smart_home_config, alexa_changed_entity
)
return
return hass.helpers.event.async_track_state_change(
MATCH_ALL, async_entity_state_listener
)
async def async_send_changereport_message(
hass, config, alexa_entity, *, invalidate_access_token=True
):
"""Send a ChangeReport message for an Alexa entity.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-with-changereport-events
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoint = alexa_entity.alexa_id()
# this sends all the properties of the Alexa Entity, whether they have
# changed or not. this should be improved, and properties that have not
# changed should be moved to the 'context' object
properties = list(alexa_entity.serialize_properties())
payload = {
API_CHANGE: {"cause": {"type": Cause.APP_INTERACTION}, "properties": properties}
}
message = AlexaResponse(name="ChangeReport", namespace="Alexa", payload=payload)
message.set_endpoint_full(token, endpoint)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.post(
config.endpoint,
headers=headers,
json=message_serialized,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout sending report to Alexa.")
return
response_text = await response.text()
_LOGGER.debug("Sent: %s", json.dumps(message_serialized))
_LOGGER.debug("Received (%s): %s", response.status, response_text)
if response.status == 202:
return
response_json = json.loads(response_text)
if (
response_json["payload"]["code"] == "INVALID_ACCESS_TOKEN_EXCEPTION"
and not invalidate_access_token
):
config.async_invalidate_access_token()
return await async_send_changereport_message(
hass, config, alexa_entity, invalidate_access_token=False
)
_LOGGER.error(
"Error when sending ChangeReport to Alexa: %s: %s",
response_json["payload"]["code"],
response_json["payload"]["description"],
)
async def async_send_add_or_update_message(hass, config, entity_ids):
"""Send an AddOrUpdateReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#add-or-update-report
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append(alexa_entity.serialize_discovery())
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="AddOrUpdateReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
async def async_send_delete_message(hass, config, entity_ids):
"""Send an DeleteReport message for entities.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#deletereport-event
"""
token = await config.async_get_access_token()
headers = {"Authorization": f"Bearer {token}"}
endpoints = []
for entity_id in entity_ids:
domain = entity_id.split(".", 1)[0]
if domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[domain](hass, config, hass.states.get(entity_id))
endpoints.append({"endpointId": alexa_entity.alexa_id()})
payload = {"endpoints": endpoints, "scope": {"type": "BearerToken", "token": token}}
message = AlexaResponse(
name="DeleteReport", namespace="Alexa.Discovery", payload=payload
)
message_serialized = message.serialize()
session = hass.helpers.aiohttp_client.async_get_clientsession()
return await session.post(
config.endpoint, headers=headers, json=message_serialized, allow_redirects=True
)
| apache-2.0 | 4,879,994,860,031,726,000 | 30.607527 | 129 | 0.662528 | false |
Azulinho/ansible | lib/ansible/plugins/cache/__init__.py | 11 | 9280 | # (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import errno
from abc import ABCMeta, abstractmethod
from collections import MutableMapping
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes
from ansible.plugins.loader import cache_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(with_metaclass(ABCMeta, object)):
# Backwards compat only. Just import the global display instead
_display = display
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
self.plugin_name = self.__module__.split('.')[-1]
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._cache = {}
self._cache_dir = None
if C.CACHE_PLUGIN_CONNECTION:
# expects a dir path
self._cache_dir = os.path.expanduser(os.path.expandvars(C.CACHE_PLUGIN_CONNECTION))
if not self._cache_dir:
raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
"to be set (to a writeable directory path)" % self.plugin_name)
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError, IOError) as e:
raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = "%s/%s" % (self._cache_dir, key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
raise KeyError
except Exception as e:
raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = "%s/%s" % (self._cache_dir, key)
try:
self._dump(value, cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove("%s/%s" % (self._cache_dir, key))
except (OSError, IOError):
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
@abstractmethod
def _load(self, filepath):
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value, filepath):
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = cache_loader.get(C.CACHE_PLUGIN)
if not self._plugin:
raise AnsibleError('Unable to load the facts cache plugin (%s).' % (C.CACHE_PLUGIN))
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# in memory cache so plugins don't expire keys mid run
self._cache = {}
def __getitem__(self, key):
if not self._plugin.contains(key):
raise KeyError
return self._plugin.get(key)
def __setitem__(self, key, value):
self._plugin.set(key, value)
def __delitem__(self, key):
self._plugin.delete(key)
def __contains__(self, key):
return self._plugin.contains(key)
def __iter__(self):
return iter(self._plugin.keys())
def __len__(self):
return len(self._plugin.keys())
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
return dict(self)
def keys(self):
return self._plugin.keys()
def flush(self):
""" Flush the fact cache of all keys. """
self._plugin.flush()
def update(self, key, value):
host_cache = self._plugin.get(key)
host_cache.update(value)
self._plugin.set(key, host_cache)
| gpl-3.0 | 8,739,842,255,107,215,000 | 31.447552 | 156 | 0.589009 | false |
decause/decauseblog | conf.py | 1 | 37820 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "decause" # (translatable)
BLOG_TITLE = "decauseblog" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://blog-decause.rhcloud.com"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://blog-decause.rhcloud.com"
BLOG_EMAIL = "[email protected]"
BLOG_DESCRIPTION = "Hackademia & FLOSSophy" # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# bg Bulgarian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# nb Norwegian Bokmål
# nl Dutch
# pl Polish
# pt_br Portuguese (Brasil)
# ru Russian
# sk Slovak
# sl Slovene
# sr Serbian (Cyrillic)
# sv Swedish
# tr Turkish [NOT tr_TR]
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
}
# Someday, we'll blog in esperanto, espanol, and francais... but not today
#TRANSLATIONS = {
# DEFAULT_LANG: "",
# "es": "./es",
# "fr": "./fr",
# "eo": "./eo",
#}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('http://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('http://apple.com/', 'Apple'),
# ('http://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 3 theme,
# may present issues if the menu is too large.
# (in bootstrap3, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archive"),
("/categories/index.html", "Tags"),
("/stories/fedmsg.html", "fedmsg"),
("/rss.xml", "RSS feed"),
),
}
# Name of the theme to use.
THEME = "custom"
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "UTC"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used.
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, bootstrap and bootstrap3 already do.
# DATE_FANCINESS = 0
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.md", "posts", "post.tmpl"),
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.md", "stories", "story.tmpl"),
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.txt", "stories", "story.tmpl"),
("stories/*.html", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing listings to be processed and stored into
# the output. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = 'assets/img/decausevatar-50px.png'
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
# Warning: this option will change its default value to False in v8!
WRITE_TAG_CLOUD = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.xml (RSS feed for a category)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
FEED_LENGTH = 100
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ [email protected]:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
DEPLOY_COMMANDS = {
'default': [
"cp -R output/* ../blog/",
]
}
# For user.github.io OR organization.github.io pages, the DEPLOY branch
# MUST be 'master', and 'gh-pages' for other repositories.
# GITHUB_SOURCE_BRANCH = 'master'
# GITHUB_DEPLOY_BRANCH = 'gh-pages'
# The name of the remote where you wish to push to, using github_deploy.
# GITHUB_REMOTE_NAME = 'origin'
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <http://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
#
# Folders containing images to be used in normal posts or
# pages. Images will be scaled down according to IMAGE_THUMBNAIL_SIZE
# and MAX_IMAGE_SIZE options, but will have to be referenced manually
# to be visible on the site. The format is a dictionary of {source:
# relative destination}.
#
# IMAGE_FOLDERS = {'images': ''}
# IMAGE_THUMBNAIL_SIZE = 400
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# more pretty URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the RSS_FEED, if RSS_TEASERS is True (translatable)
RSS_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the RSS_READ_MORE_LINK and the //rss/item/link in
# RSS feeds. Minimum example for Piwik "pk_campaign=rss" and Google Analytics
# "utm_source=rss&utm_medium=rss&utm_campaign=rss". Advanced option used for
# traffic source tracking.
RSS_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = """
<a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike 4.0 International License</a>.
"""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="http://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, googleplus, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "decauseblog"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
ANNOTATIONS = False
# Create index.html for page (story) folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the STORY_INDEX
# will not be generated for that directory.
# STORY_INDEX = False
# Enable comments on story pages?
COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Extra options to pass to the pandoc comand.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# (translatable)
SOCIAL_BUTTONS_CODE = """
"""
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
INDEX_DISPLAY_POST_COUNT = 100
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# Strip HTML in the RSS feed? Default to False
# RSS_PLAIN = False
# A search form to search this site, for the sidebar. You can use a Google
# custom search (http://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = """
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
SEARCH_FORM = """
<!-- Custom search -->
<form method="get" id="search" action="//duckduckgo.com/"
class="navbar-form pull-right">
<input type="hidden" name="sites" value="%s"/>
<input type="hidden" name="k8" value="#444444"/>
<input type="hidden" name="k9" value="#D51920"/>
<input type="hidden" name="kt" value="h"/>
<input type="text" name="q" maxlength="255"
placeholder="Search…" class="span2" style="margin-top: 4px;"/>
<input type="submit" style="visibility: hidden; width:3em;" value="Go" />
</form>
<!-- End of custom search -->
""" % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Custom search with Google-->
# <form id="search" action="//www.google.com/search" method="get" class="navbar-form pull-left">
# <input type="hidden" name="q" value="site:%s" />
# <input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
# </form>
# <!-- End of custom search -->
#""" % SITE_URL
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
BODY_END = """
<!-- Piwik -->
<script type="text/javascript">
var _paq = _paq || [];
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//piwik-decause.rhcloud.com/";
_paq.push(['setTrackerUrl', u+'piwik.php']);
_paq.push(['setSiteId', 1]);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<noscript><p><img src="//piwik-decause.rhcloud.com/piwik.php?idsite=1" style="border:0;" alt="" /></p></noscript>
<!-- End Piwik Code -->
"""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# If you hate "Filenames with Capital Letters and Spaces.md", you should
# set this to true.
UNSLUGIFY_TITLES = True
# Additional metadata that is added to a post when creating a new_post
ADDITIONAL_METADATA = {"LICENSE": "CC-BY-SA-4.0", }
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# You can configure the logging handlers installed as plugins or change the
# log level of the default stderr handler.
# WARNING: The stderr handler allows only the loglevels of 'INFO' and 'DEBUG'.
# This is done for safety reasons, as blocking out anything other
# than 'DEBUG' may hide important information and break the user
# experience!
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'INFO', 'bubble': True},
# 'smtp': {
# 'from_addr': '[email protected]',
# 'recipients': ('[email protected]'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
# }
}
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
| agpl-3.0 | -5,818,107,910,771,448,000 | 37.755897 | 371 | 0.690317 | false |
karlht/services-tools | fxa-l10n/genPrintL10nEmails.py | 2 | 1038 | langs = [ "ca",
"cs",
"cy",
"da",
"de",
"en-US",
"es",
"es-CL",
"et",
"eu",
"fr",
"fy",
"he",
"hu",
"id",
"it",
"ja",
"ko",
"lt",
"nb-NO",
"nl",
"pa",
"pl",
"pt",
"pt-BR",
"rm",
"ru",
"sk",
"sl",
"sq",
"sr",
"sr-LATN",
"sv",
"tr",
"zh-CN",
"zh-TW",
"xx"]
print '#!/bin/sh'
print 'epoch=$(date +%s)'
print 'export COMMAND="./ve/bin/fxa-client --password 12345678"'
for lang in langs:
print "$COMMAND --email %[email protected] --lang %s create &> /dev/null" % (lang, lang)
print 'sleep 5'
for lang in langs:
print "./getRestmailText %[email protected]" % lang
for lang in langs:
print "$COMMAND --email %[email protected] destroy &> /dev/null" % (lang)
| mpl-2.0 | 7,665,612,473,063,577,000 | 18.961538 | 95 | 0.360308 | false |
NigelCleland/pdtools | docs/conf.py | 1 | 9188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# -- General configuration -----------------------------------------------------
# Create Mock Modules to get around C dependencies on Read the Docs
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['pandas.io.sql', 'pandas.tseries.offsets', 'matplotlib.pyplot ', 'tseries', 'offsets', 'tseries.offsets', 'io', 'sql', 'io.sql', 'pyplot', 'numpy', 'pandas', 'matplotlib', 'pytz']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pdtools
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'numpydoc',
'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pdtools'
copyright = u'2013, Nigel Cleland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pdtools.__version__
# The full version, including alpha/beta/rc tags.
release = pdtools.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pdtoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pdtools.tex', u'pdtools Documentation',
u'Nigel Cleland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pdtools', u'pdtools Documentation',
[u'Nigel Cleland'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pdtools', u'pdtools Documentation',
u'Nigel Cleland', 'pdtools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | bsd-3-clause | -1,587,073,996,493,870,300 | 31.242105 | 195 | 0.692643 | false |
SaschaMester/delicium | testing/legion/rpc_server.py | 5 | 4274 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The task RPC server code.
This server is an XML-RPC server which serves code from
rpc_methods.RPCMethods.
This server will run until shutdown is called on the server object. This can
be achieved in 2 ways:
- Calling the Quit RPC method defined in RPCMethods
- Not receiving any calls within the idle_timeout_secs time.
"""
import logging
import threading
import time
import xmlrpclib
import SimpleXMLRPCServer
import SocketServer
#pylint: disable=relative-import
import common_lib
import rpc_methods
import ssl_util
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
"""Restricts access to only specified IP address.
This call assumes the server is RPCServer.
"""
def do_POST(self):
"""Verifies the task is authorized to perform RPCs."""
if self.client_address[0] != self.server.authorized_address:
logging.error('Received unauthorized RPC request from %s',
self.task_address[0])
self.send_response(403)
response = 'Forbidden'
self.send_header('Content-type', 'text/plain')
self.send_header('Content-length', str(len(response)))
self.end_headers()
self.wfile.write(response)
else:
return SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.do_POST(self)
class RpcServer(ssl_util.SslRpcServer,
SocketServer.ThreadingMixIn):
"""Restricts all endpoints to only specified IP addresses."""
def __init__(self, authorized_address,
idle_timeout_secs=common_lib.DEFAULT_TIMEOUT_SECS):
ssl_util.SslRpcServer.__init__(
self, (common_lib.SERVER_ADDRESS, common_lib.SERVER_PORT),
allow_none=True, logRequests=False,
requestHandler=RequestHandler)
self.authorized_address = authorized_address
self.idle_timeout_secs = idle_timeout_secs
self.register_instance(rpc_methods.RPCMethods(self))
self._shutdown_requested_event = threading.Event()
self._rpc_received_event = threading.Event()
self._idle_thread = threading.Thread(target=self._CheckForIdleQuit)
def shutdown(self):
"""Shutdown the server.
This overloaded method sets the _shutdown_requested_event to allow the
idle timeout thread to quit.
"""
self._shutdown_requested_event.set()
SimpleXMLRPCServer.SimpleXMLRPCServer.shutdown(self)
logging.info('Server shutdown complete')
def serve_forever(self, poll_interval=0.5):
"""Serve forever.
This overloaded method starts the idle timeout thread before calling
serve_forever. This ensures the idle timer thread doesn't get started
without the server running.
Args:
poll_interval: The interval to poll for shutdown.
"""
logging.info('RPC server starting')
self._idle_thread.start()
SimpleXMLRPCServer.SimpleXMLRPCServer.serve_forever(self, poll_interval)
def _dispatch(self, method, params):
"""Dispatch the call to the correct method with the provided params.
This overloaded method adds logging to help trace connection and
call problems.
Args:
method: The method name to call.
params: A tuple of parameters to pass.
Returns:
The result of the parent class' _dispatch method.
"""
logging.debug('Calling %s%s', method, params)
self._rpc_received_event.set()
return SimpleXMLRPCServer.SimpleXMLRPCServer._dispatch(self, method, params)
def _CheckForIdleQuit(self):
"""Check for, and exit, if the server is idle for too long.
This method must be run in a separate thread to avoid a deadlock when
calling server.shutdown.
"""
timeout = time.time() + self.idle_timeout_secs
while time.time() < timeout:
if self._shutdown_requested_event.is_set():
# An external source called shutdown()
return
elif self._rpc_received_event.is_set():
logging.debug('Resetting the idle timeout')
timeout = time.time() + self.idle_timeout_secs
self._rpc_received_event.clear()
time.sleep(1)
# We timed out, kill the server
logging.warning('Shutting down the server due to the idle timeout')
self.shutdown()
| bsd-3-clause | -6,239,894,375,502,028,000 | 32.390625 | 80 | 0.708236 | false |
SydneyUniLibrary/auto-holds | autoholds/sierrasettings-sample.py | 1 | 1874 | # Copyright 2016 Susan Bennett, David Mitchell, Jim Nicholls
#
# This file is part of AutoHolds.
#
# AutoHolds is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AutoHolds is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AutoHolds. If not, see <http://www.gnu.org/licenses/>.
# TODO: Copy this file to sierrasettings.py and
# edit the setting towards the bottom of this file
class SierraApiSettings:
def __init__(self, **kwargs):
self.base_url = kwargs['base_url']
self.client_key = kwargs['client_key']
self.client_secret = kwargs['client_secret']
def __str__(self):
return str(self.base_url)
def __repr__(self):
return 'SierraApiSettings(base_url=%r)' % self.base_url
class SierraSqlSettings:
def __init__(self, **kwargs):
self.host = kwargs['host']
self.port = kwargs['port']
self.user = kwargs['user']
self.password = kwargs['password']
def __str__(self):
return str(self.host)
def __repr__(self):
return 'SierraSqlSettings(host=%r, port=%r)' % (self.host, self.port)
SIERRA_API = SierraApiSettings(
base_url='''https://sierra-app-server.mylibrary.url/iii/sierra-api''',
client_key='''key''',
client_secret='''secret''',
)
SIERRA_SQL = SierraSqlSettings(
host='''sierra-db-server.mylibrary.url''',
port=1032,
user='''autoholds''',
password='''pass''',
)
| gpl-3.0 | -6,940,413,179,614,745,000 | 28.28125 | 78 | 0.66222 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/structuredefinition.py | 1 | 11857 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/StructureDefinition) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class StructureDefinition(domainresource.DomainResource):
""" Structural Definition.
A definition of a FHIR structure. This resource is used to describe the
underlying resources, data types defined in FHIR, and also for describing
extensions, and constraints on resources and data types.
"""
resource_name = "StructureDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.abstract = None
""" Whether the structure is abstract.
Type `bool`. """
self.base = None
""" Structure that this set of constraints applies to.
Type `str`. """
self.code = None
""" Assist with indexing and finding.
List of `Coding` items (represented as `dict` in JSON). """
self.constrainedType = None
""" Any datatype or resource, including abstract ones.
Type `str`. """
self.contact = None
""" Contact details of the publisher.
List of `StructureDefinitionContact` items (represented as `dict` in JSON). """
self.context = None
""" Where the extension can be used in instances.
List of `str` items. """
self.contextType = None
""" resource | datatype | mapping | extension.
Type `str`. """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date for this version of the StructureDefinition.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the StructureDefinition.
Type `str`. """
self.differential = None
""" Differential view of the structure.
Type `StructureDefinitionDifferential` (represented as `dict` in JSON). """
self.display = None
""" Use this name when displaying the value.
Type `str`. """
self.experimental = None
""" If for testing purposes, not real usage.
Type `bool`. """
self.fhirVersion = None
""" FHIR Version this StructureDefinition targets.
Type `str`. """
self.identifier = None
""" Other identifiers for the StructureDefinition.
List of `Identifier` items (represented as `dict` in JSON). """
self.kind = None
""" datatype | resource | logical.
Type `str`. """
self.mapping = None
""" External specification that the content is mapped to.
List of `StructureDefinitionMapping` items (represented as `dict` in JSON). """
self.name = None
""" Informal name for this StructureDefinition.
Type `str`. """
self.publisher = None
""" Name of the publisher (Organization or individual).
Type `str`. """
self.requirements = None
""" Scope and Usage this structure definition is for.
Type `str`. """
self.snapshot = None
""" Snapshot view of the structure.
Type `StructureDefinitionSnapshot` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired.
Type `str`. """
self.url = None
""" Absolute URL used to reference this StructureDefinition.
Type `str`. """
self.useContext = None
""" Content intends to support these contexts.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.version = None
""" Logical id for this version of the StructureDefinition.
Type `str`. """
super(StructureDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureDefinition, self).elementProperties()
js.extend([
("abstract", "abstract", bool, False, None, True),
("base", "base", str, False, None, False),
("code", "code", coding.Coding, True, None, False),
("constrainedType", "constrainedType", str, False, None, False),
("contact", "contact", StructureDefinitionContact, True, None, False),
("context", "context", str, True, None, False),
("contextType", "contextType", str, False, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("differential", "differential", StructureDefinitionDifferential, False, None, False),
("display", "display", str, False, None, False),
("experimental", "experimental", bool, False, None, False),
("fhirVersion", "fhirVersion", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("kind", "kind", str, False, None, True),
("mapping", "mapping", StructureDefinitionMapping, True, None, False),
("name", "name", str, False, None, True),
("publisher", "publisher", str, False, None, False),
("requirements", "requirements", str, False, None, False),
("snapshot", "snapshot", StructureDefinitionSnapshot, False, None, False),
("status", "status", str, False, None, True),
("url", "url", str, False, None, True),
("useContext", "useContext", codeableconcept.CodeableConcept, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class StructureDefinitionContact(backboneelement.BackboneElement):
""" Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_name = "StructureDefinitionContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of a individual to contact.
Type `str`. """
self.telecom = None
""" Contact details for individual or publisher.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(StructureDefinitionContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureDefinitionContact, self).elementProperties()
js.extend([
("name", "name", str, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
class StructureDefinitionDifferential(backboneelement.BackboneElement):
""" Differential view of the structure.
A differential view is expressed relative to the base StructureDefinition -
a statement of differences that it applies.
"""
resource_name = "StructureDefinitionDifferential"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.element = None
""" Definition of elements in the resource (if no StructureDefinition).
List of `ElementDefinition` items (represented as `dict` in JSON). """
super(StructureDefinitionDifferential, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureDefinitionDifferential, self).elementProperties()
js.extend([
("element", "element", elementdefinition.ElementDefinition, True, None, True),
])
return js
class StructureDefinitionMapping(backboneelement.BackboneElement):
""" External specification that the content is mapped to.
An external specification that the content is mapped to.
"""
resource_name = "StructureDefinitionMapping"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comments = None
""" Versions, Issues, Scope limitations etc..
Type `str`. """
self.identity = None
""" Internal id when this mapping is used.
Type `str`. """
self.name = None
""" Names what this mapping refers to.
Type `str`. """
self.uri = None
""" Identifies what this mapping refers to.
Type `str`. """
super(StructureDefinitionMapping, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureDefinitionMapping, self).elementProperties()
js.extend([
("comments", "comments", str, False, None, False),
("identity", "identity", str, False, None, True),
("name", "name", str, False, None, False),
("uri", "uri", str, False, None, False),
])
return js
class StructureDefinitionSnapshot(backboneelement.BackboneElement):
""" Snapshot view of the structure.
A snapshot view is expressed in a stand alone form that can be used and
interpreted without considering the base StructureDefinition.
"""
resource_name = "StructureDefinitionSnapshot"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.element = None
""" Definition of elements in the resource (if no StructureDefinition).
List of `ElementDefinition` items (represented as `dict` in JSON). """
super(StructureDefinitionSnapshot, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureDefinitionSnapshot, self).elementProperties()
js.extend([
("element", "element", elementdefinition.ElementDefinition, True, None, True),
])
return js
from . import codeableconcept
from . import coding
from . import contactpoint
from . import elementdefinition
from . import fhirdate
from . import identifier
| bsd-3-clause | -8,748,287,118,281,944,000 | 37.125402 | 110 | 0.604706 | false |
NewpTone/hotzenplotz | hotzenplotz/openstack/common/rpc/matchmaker.py | 1 | 7587 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import contextlib
import itertools
import json
from hotzenplotz.openstack.common import cfg
from hotzenplotz.openstack.common.gettextutils import _
from hotzenplotz.openstack.common import log as logging
matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('matchmaker_ringfile',
default='/etc/nova/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts)
LOG = logging.getLogger(__name__)
contextmanager = contextlib.contextmanager
class MatchMakerException(Exception):
"""Signified a match could not be found."""
message = _("Match not found by MatchMaker.")
class Exchange(object):
"""
Implements lookups.
Subclass this to support hashtables, dns, etc.
"""
def __init__(self):
pass
def run(self, key):
raise NotImplementedError()
class Binding(object):
"""
A binding on which to perform a lookup.
"""
def __init__(self):
pass
def test(self, key):
raise NotImplementedError()
class MatchMakerBase(object):
"""Match Maker Base Class."""
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
#NOTE(ewindisch): kept the following method in case we implement the
# underlying support.
#def add_negate_binding(self, binding, rule, last=True):
# self.bindings.append((binding, rule, True, last))
def queues(self, key):
workers = []
# bit is for negate bindings - if we choose to implement it.
# last stops processing rules if this matches.
for (binding, exchange, bit, last) in self.bindings:
if binding.test(key):
workers.extend(exchange.run(key))
# Support last.
if last:
return workers
return workers
class DirectBinding(Binding):
"""
Specifies a host in the key via a '.' character
Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct.
"""
def test(self, key):
if '.' in key:
return True
return False
class TopicBinding(Binding):
"""
Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange.
"""
def test(self, key):
if '.' not in key:
return True
return False
class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key):
if key.startswith('fanout~'):
return True
return False
class StubExchange(Exchange):
"""Exchange that does nothing."""
def run(self, key):
return [(key, None)]
class RingExchange(Exchange):
"""
Match Maker where hosts are loaded from a static file containing
a hashmap (JSON formatted).
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ringfile, 'r')
self.ring = json.load(fh)
fh.close()
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
if key in self.ring0:
return True
return False
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
return [(key.split('.')[0] + '.localhost', 'localhost')]
class DirectExchange(Exchange):
"""
Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
b, e = key.split('.', 1)
return [(b, e)]
class MatchMakerRing(MatchMakerBase):
"""
Match Maker where hosts are loaded from a static hashmap.
"""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
class MatchMakerLocalhost(MatchMakerBase):
"""
Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange())
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange())
class MatchMakerStub(MatchMakerBase):
"""
Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq)
"""
def __init__(self):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange())
self.add_binding(TopicBinding(), StubExchange())
| apache-2.0 | -4,742,951,009,940,241,000 | 28.406977 | 78 | 0.619481 | false |
toshywoshy/ansible | test/units/modules/network/fortios/test_fortios_log_fortianalyzer_override_filter.py | 21 | 9758 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_fortianalyzer_override_filter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_fortianalyzer_override_filter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_fortianalyzer_override_filter_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer_override_filter': {
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer_override_filter.fortios_log_fortianalyzer(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_fortianalyzer_override_filter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer_override_filter': {
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer_override_filter.fortios_log_fortianalyzer(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_fortianalyzer_override_filter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer_override_filter': {
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer_override_filter.fortios_log_fortianalyzer(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_fortianalyzer_override_filter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer_override_filter': {
'random_attribute_not_valid': 'tag',
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer_override_filter.fortios_log_fortianalyzer(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 | 1,994,078,615,659,859,200 | 36.102662 | 133 | 0.602992 | false |
JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osgfpdepth.py | 1 | 35481 | #!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgfpdepth"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgText
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgfpdepth.cpp'
# OpenSceneGraph example, osgfpdepth.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/ColorMask>
#include <osg/CullFace>
#include <osg/Depth>
#include <osg/FrameBufferObject>
#include <osg/Geode>
#include <osg/Geometry>
#include <osg/GLExtensions>
#include <osg/Node>
#include <osg/NodeCallback>
#include <osg/Notify>
#include <osg/observer_ptr>
#include <osg/Projection>
#include <osg/Switch>
#include <osg/Texture2D>
#include <osgDB/ReadFile>
#include <osgGA/GUIEventHandler>
#include <osgUtil/Optimizer>
#include <osgText/Text>
#include <osgViewer/Renderer>
#include <osgViewer/Viewer>
#include <osgViewer/ViewerEventHandlers>
#include <iostream>
#include <sstream>
# Demonstration of floating point depth buffers. The most basic way to use
# * a floating point depth buffer in OpenGL is to create a frame buffer
# * object, attach a color and floating point depth texture, render,
# * and then copy the color texture to the screen. When doing
# * multisampling we can't use textures directly, so we have to create
# * render buffers with the proper format. Then we let OSG handle the
# * details of resolving the multisampling.
# *
# * When using a floating point depth buffer, it's advantageous to
# * reverse the depth buffer range (and the depth test, of course) so
# * that 0.0 corresponds to the far plane. See
# * e.g. http:#www.humus.name/index.php?ID=25 for details.
#
using namespace osg
using namespace std
# createFBO() and destroyFBO(), and the supporting classes and
# * functions below, are only used to test possible valid frame buffer
# * configurations at startup. They wouldn't be used in a normal OSG
# * program unless we wanted to enumerate all the valid FBO
# * combinations and let the user choose between them.
#
# Properties of an FBO that we will try to create
class FboConfig :
FboConfig()
: colorFormat(0), depthFormat(0), redbits(0), depthBits(0),
depthSamples(0), coverageSamples(0)
FboConfig( string name_, GLenum colorFormat_, GLenum depthFormat_,
int redbits_, int depthBits_, int depthSamples_ = 0,
coverageSamples_ = 0)
: name(name_), colorFormat(colorFormat_), depthFormat(depthFormat_),
redbits(redbits_), depthBits(depthBits_), depthSamples(depthSamples_),
coverageSamples(coverageSamples_)
name = string()
colorFormat = GLenum()
depthFormat = GLenum()
redbits = int()
depthBits = int()
depthSamples = int()
coverageSamples = int()
# Properties of a buffer
class BufferConfig :
BufferConfig()
BufferConfig( string name_, GLenum format_, int bits_)
: name(name_), format(format_), bits(bits_)
name = string()
format = GLenum()
bits = int()
typedef vector<BufferConfig> BufferConfigList
validConfigs = vector<FboConfig>()
# Ugly global variables for the viewport width and height
int width, height
# This is only used when testing possible frame buffer configurations
# to find valid ones.
class FboData :
tex = ref_ptr<Texture2D>() # color texture
depthTex = ref_ptr<Texture2D>() # depth texture
fb = ref_ptr<FrameBufferObject>() # render framebuffer
resolveFB = ref_ptr<FrameBufferObject>() # multisample resolve target
makeDepthTexture = Texture2D*(int width, int height, GLenum internalFormat)
# Assemble lists of the valid buffer configurations, along with the
# possibilities for multisample coverage antialiasing, if any.
def getPossibleConfigs(gc, colorConfigs, depthConfigs, coverageConfigs):
maxSamples = 0
coverageSampleConfigs = 0
contextID = gc.getState().getContextID()
colorConfigs.push_back(BufferConfig("RGBA8", GL_RGBA8, 8))
depthConfigs.push_back(BufferConfig("D24", GL_DEPTH_COMPONENT24, 24))
fboe = FBOExtensions.instance(contextID, True)
if not fboe.isSupported() :
return
if fboe.isMultisampleSupported() :
glGetIntegerv(GL_MAX_SAMPLES_EXT, maxSamples)
# isMultisampleCoverageSupported
if isGLExtensionSupported(contextID,
"GL_NV_framebuffer_multisample_coverage") :
glGetIntegerv(GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV,
coverageSampleConfigs)
coverageConfigs.resize(coverageSampleConfigs * 2 + 4)
glGetIntegerv(GL_MULTISAMPLE_COVERAGE_MODES_NV, coverageConfigs[0])
if isGLExtensionSupported(contextID, "GL_ARB_depth_buffer_float") :
depthConfigs.push_back(BufferConfig("D32F", GL_DEPTH_COMPONENT32F, 32))
elif isGLExtensionSupported(contextID, "GL_NV_depth_buffer_float") :
depthConfigs.push_back(BufferConfig("D32F", GL_DEPTH_COMPONENT32F_NV,
32))
def checkFramebufferStatus(gc, silent):
state = *gc.getState()
contextID = state.getContextID()
fboe = FBOExtensions.instance(contextID, True)
switch(fboe.glCheckFramebufferStatus(GL_FRAMEBUFFER_EXT))
case GL_FRAMEBUFFER_COMPLETE_EXT:
break
case GL_FRAMEBUFFER_UNSUPPORTED_EXT:
if not silent :
print "Unsupported framebuffer format\n"
return False
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT:
if not silent :
print "Framebuffer incomplete, missing attachment\n"
return False
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT:
if not silent :
print "Framebuffer incomplete, duplicate attachment\n"
return False
case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT:
if not silent :
print "Framebuffer incomplete, attached images must have same dimensions\n"
return False
case GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT:
if not silent :
print "Framebuffer incomplete, attached images must have same format\n"
return False
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT:
if not silent :
print "Framebuffer incomplete, missing draw buffer\n"
return False
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT:
if not silent :
print "Framebuffer incomplete, missing read buffer\n"
return False
default:
return False
return True
# Attempt to create an FBO with a certain configuration. If the FBO
# is created with fewer bits in any of its parameters, the creation
# is deemed to have failed. Even though the result is a valid FBO,
# we're only interested in discrete, valid configurations.
def createFBO(gc, config, data):
result = True
multisample = config.depthSamples > 0
csaa = config.coverageSamples > config.depthSamples
data.fb = FrameBufferObject()
texWidth = 512, texHeight = 512
data.tex = Texture2D()
data.tex.setTextureSize(texWidth, texHeight)
data.tex.setInternalFormat(config.colorFormat)
data.tex.setSourceFormat(GL_RGBA)
data.tex.setSourceType(GL_FLOAT)
data.tex.setFilter(Texture.MIN_FILTER, Texture.LINEAR_MIPMAP_LINEAR)
data.tex.setFilter(Texture.MAG_FILTER, Texture.LINEAR)
data.tex.setWrap(Texture.WRAP_S, Texture.CLAMP_TO_EDGE)
data.tex.setWrap(Texture.WRAP_T, Texture.CLAMP_TO_EDGE)
colorRB = 0
depthRB = 0
if multisample :
data.resolveFB = FrameBufferObject()
data.resolveFB.setAttachment(Camera.COLOR_BUFFER,
FrameBufferAttachment(data.tex))
colorRB = RenderBuffer(texWidth, texHeight, config.colorFormat,
config.coverageSamples, config.depthSamples)
data.fb.setAttachment(Camera.COLOR_BUFFER,
FrameBufferAttachment(colorRB))
depthRB = RenderBuffer(texWidth, texHeight, config.depthFormat,
config.coverageSamples, config.depthSamples)
data.fb.setAttachment(Camera.DEPTH_BUFFER,
FrameBufferAttachment(depthRB))
else:
data.depthTex = makeDepthTexture(texWidth, texHeight,
config.depthFormat)
data.fb.setAttachment(Camera.COLOR_BUFFER,
FrameBufferAttachment(data.tex))
data.fb.setAttachment(Camera.DEPTH_BUFFER,
FrameBufferAttachment(data.depthTex))
state = *gc.getState()
contextID = state.getContextID()
fboe = FBOExtensions.instance(contextID, True)
data.fb.apply(state)
result = checkFramebufferStatus(gc, True)
if not result :
fboe.glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0)
return False
query = int()
if multisample :
colorRBID = colorRB.getObjectID(contextID, fboe)
fboe.glBindRenderbuffer(GL_RENDERBUFFER_EXT, colorRBID)
if csaa :
fboe.glGetRenderbufferParameteriv(GL_RENDERBUFFER_EXT,
GL_RENDERBUFFER_COVERAGE_SAMPLES_NV,
query)
if query < config.coverageSamples :
result = False
else:
config.coverageSamples = query
fboe.glGetRenderbufferParameteriv(GL_RENDERBUFFER_EXT,
GL_RENDERBUFFER_COLOR_SAMPLES_NV,
query)
if query < config.depthSamples :
result = False
else:
config.depthSamples = query # report back the actual number
else:
fboe.glGetRenderbufferParameteriv(GL_RENDERBUFFER_EXT,
GL_RENDERBUFFER_SAMPLES_EXT,
query)
if query < config.depthSamples :
result = False
else:
config.depthSamples = query
glGetIntegerv( GL_RED_BITS, query)
if query not = config.redbits :
result = False
glGetIntegerv(GL_DEPTH_BITS, query)
if query not = config.depthBits :
result = False
if result and multisample and data.resolveFB.valid() :
data.resolveFB.apply(state)
result = checkFramebufferStatus(gc, True)
if result :
glGetIntegerv( GL_RED_BITS, query)
if query not = config.redbits :
result = False
fboe.glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0)
return result
def destroyFBO(gc, data):
data.tex = 0
data.depthTex = 0
data.fb = 0
data.resolveFB = 0
state = *gc.getState()
availableTime = 100.0
RenderBuffer.flushDeletedRenderBuffers(state.getContextID(), 0.0,
availableTime)
availableTime = 100.0
FrameBufferObject.flushDeletedFrameBufferObjects(state.getContextID(),
0.0, availableTime)
setAttachmentsFromConfig = void(Camera* camera, FboConfig config)
makeTexturesAndGeometry = Switch*(int width, int height, Switch* sw = 0)
# Application state accessed from event handlers and main function
# contains state that can be changed by the user and the OSG classes
# used to display / indicate that state.
#
# camera - Camera with fbo, using either fp depth buffer or fixed
# switch child 0 - texture containing rendering of scene
# switch child 1 - fp depth buffer as texture
# switch child 2 - integer depth buffer as texture
# textNotAvailable- "not available" text if texture isn't valid.
class AppState (Referenced) :
AppState(osgViewer.Viewer* viewer_)
setStateFromConfig = void( FboConfig config)
advanceConfig = void(int increment)
updateDisplayedTexture = void()
updateNear = void()
virtual ~AppState()
sw = ref_ptr<Switch>() # switch between displayed texture
displayScene = bool()
invertRange = bool()
currentConfig = int()
viewer = osgViewer.Viewer*()
zNear = double()
camera = ref_ptr<Camera>()
# text displayed on the screen showing the user's choices
textProjection = ref_ptr<Projection>()
configText = ref_ptr<osgText.Text>()
zNearText = ref_ptr<osgText.Text>()
textNotAvailable = ref_ptr<Geode>()
textInverted = ref_ptr<Geode>()
AppState.AppState(osgViewer.Viewer* viewer_)
: displayScene(True), invertRange(True), currentConfig(0),
viewer(viewer_), zNear(0.03125)
sw = Switch()
fontName = string("fonts/arial.ttf")
# Text description of current config
configText = osgText.Text()
configText.setDataVariance(Object.DYNAMIC)
configText.setFont(fontName)
configText.setPosition(Vec3(50.0, 50.0, 0.0))
configText.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
textGeode = Geode()
textGeode.addDrawable(configText)
# Text for the near plane distance
zNearText = osgText.Text()
zNearText.setDataVariance(Object.DYNAMIC)
zNearText.setFont(fontName)
zNearText.setPosition(Vec3(1230.0, 50.0, 0.0))
zNearText.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
zNearText.setAlignment(osgText.Text.RIGHT_BASE_LINE)
textGeode.addDrawable(zNearText)
# Projection that lets the text be placed in pixels.
textProjection = Projection()
textProjection.setMatrix(Matrix.ortho2D(0,1280,0,1024))
textProjection.addChild(textGeode)
# "texture not available" text displayed when the user trys to
# display the depth texture while multisampling.
noCanDo = osgText.Text()
noCanDo.setFont(fontName)
noCanDo.setPosition(Vec3(512.0, 384.0, 0.0))
noCanDo.setColor(Vec4(1.0, 0.0, 0.0, 1.0))
noCanDo.setText("not available")
textNotAvailable = Geode()
textNotAvailable.addDrawable(noCanDo)
textProjection.addChild(textNotAvailable)
# Is the depth test inverted = osgText: if (inverted) else Text()
inverted.setFont(fontName)
inverted.setPosition(Vec3(512.0, 50.0, 0.0))
inverted.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
inverted.setText("inverted depth test")
textInverted = Geode()
textInverted.addDrawable(inverted)
textInverted.setNodeMask(~0u)
textProjection.addChild(textInverted)
textProjection.getOrCreateStateSet().setRenderBinDetails(11, "RenderBin")
void AppState.setStateFromConfig( FboConfig config)
camera = viewer.getSlave(0)._camera
setAttachmentsFromConfig(camera, config)
renderer = dynamic_cast<osgViewer.Renderer*>(camera.getRenderer())
if renderer :
renderer.setCameraRequiresSetUp(True)
if configText.valid() :
configText.setText(validConfigs[currentConfig].name)
configText.update()
updateDisplayedTexture()
void AppState.advanceConfig(int increment)
currentConfig = (currentConfig + increment) % validConfigs.size()
setStateFromConfig(validConfigs[currentConfig])
void AppState.updateDisplayedTexture()
if displayScene :
sw.setSingleChildOn(0)
elif validConfigs[currentConfig].depthSamples > 0
or validConfigs[currentConfig].coverageSamples > 0 :
sw.setAllChildrenOff()
elif validConfigs[currentConfig].depthFormat not = GL_DEPTH_COMPONENT24 :
sw.setSingleChildOn(2)
else:
sw.setSingleChildOn(3)
if displayScene
or (validConfigs[currentConfig].depthSamples == 0
and validConfigs[currentConfig].coverageSamples == 0) :
textNotAvailable.setNodeMask(0u)
else:
textNotAvailable.setNodeMask(~0u)
void AppState.updateNear()
# Assume that the viewing frustum is symmetric.
double fovy, aspectRatio, cNear, cFar
viewer.getCamera().getProjectionMatrixAsPerspective(fovy, aspectRatio,
cNear, cFar)
viewer.getCamera().setProjectionMatrixAsPerspective(fovy, aspectRatio,
zNear, cFar)
nearStream = stringstream()
nearStream, "near: ", zNear
zNearText.setText(nearStream.str())
zNearText.update()
class ConfigHandler (osgGA.GUIEventHandler) :
ConfigHandler(AppState* appState)
: _appState(appState)
virtual bool handle( osgGA.GUIEventAdapter ea,
osgGA.GUIActionAdapter aa,
Object*, NodeVisitor* #nv)
if ea.getHandled() : return False
viewer = dynamic_cast<osgViewer.Viewer*>(aa)
if not viewer : return False
switch(ea.getEventType())
case osgGA.GUIEventAdapter.KEYUP:
if ea.getKey()==ord("d") :
_appState.displayScene = not _appState.displayScene
_appState.updateDisplayedTexture()
return True
elif ea.getKey()==osgGA.GUIEventAdapter.KEY_Right or
ea.getKey()==osgGA.GUIEventAdapter.KEY_KP_Right :
_appState.advanceConfig(1)
return True
elif ea.getKey()==osgGA.GUIEventAdapter.KEY_Left or
ea.getKey()==osgGA.GUIEventAdapter.KEY_KP_Left :
_appState.advanceConfig(-1)
return True
break
default:
break
return False
def getUsage(usage):
usage.addKeyboardMouseBinding("d", "display depth texture")
usage.addKeyboardMouseBinding("right arrow",
"next frame buffer configuration")
usage.addKeyboardMouseBinding("left arrow",
"previous frame buffer configuration")
virtual ~ConfigHandler()
_appState = ref_ptr<AppState>()
class DepthHandler (osgGA.GUIEventHandler) :
DepthHandler(AppState *appState, Depth* depth)
: _appState(appState), _depth(depth)
depth.setDataVariance(Object.DYNAMIC)
virtual bool handle( osgGA.GUIEventAdapter ea,
osgGA.GUIActionAdapter #aa,
Object*, NodeVisitor* #nv)
if ea.getHandled() : return False
depth = ref_ptr<Depth>()
if not _depth.lock(depth) : return False
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYUP):
if ea.getKey() == ord("i") :
_appState.invertRange = not _appState.invertRange
if not _appState.invertRange :
_appState.camera.setClearDepth(1.0)
depth.setFunction(Depth.LESS)
depth.setRange(0.0, 1.0)
_appState.textInverted.setNodeMask(0u)
else:
_appState.camera.setClearDepth(0.0)
depth.setFunction(Depth.GEQUAL)
depth.setRange(1.0, 0.0)
_appState.textInverted.setNodeMask(~0u)
return True
elif ea.getKey()==osgGA.GUIEventAdapter.KEY_Up or
ea.getKey()==osgGA.GUIEventAdapter.KEY_KP_Up :
_appState.zNear *= 2.0
_appState.updateNear()
return True
elif ea.getKey()==osgGA.GUIEventAdapter.KEY_Down or
ea.getKey()==osgGA.GUIEventAdapter.KEY_KP_Down :
_appState.zNear *= .5
_appState.updateNear()
return True
break
default:
break
return False
def getUsage(usage):
usage.addKeyboardMouseBinding("i", "invert depth buffer range")
usage.addKeyboardMouseBinding("up arrow",
"double near plane distance")
usage.addKeyboardMouseBinding("down arrow",
"half near plane distance")
virtual ~DepthHandler()
_appState = ref_ptr<AppState>()
_depth = observer_ptr<Depth>()
def createTextureQuad(texture):
vertices = Vec3Array()
vertices.push_back(Vec3(-1.0, -1.0, 0.0))
vertices.push_back(Vec3(1.0, -1.0, 0.0))
vertices.push_back(Vec3(1.0, 1.0, 0.0))
vertices.push_back(Vec3(-1.0, 1.0, 0.0))
texcoord = Vec2Array()
texcoord.push_back(Vec2(0.0, 0.0))
texcoord.push_back(Vec2(1.0, 0.0))
texcoord.push_back(Vec2(1.0, 1.0))
texcoord.push_back(Vec2(0.0, 1.0))
geom = Geometry()
geom.setVertexArray(vertices)
geom.setTexCoordArray(0, texcoord)
geom.addPrimitiveSet(DrawArrays(GL_QUADS, 0, 4))
geode = Geode()
geode.addDrawable(geom)
geode.getOrCreateStateSet().setTextureAttributeAndModes(0, texture, StateAttribute.ON)
return geode
class ResizedCallback (osg.GraphicsContext.ResizedCallback) :
ResizedCallback(AppState* appState)
: _appState(appState)
resizedImplementation = void(GraphicsContext* gc, int x, int y, int width,
int height)
_appState = ref_ptr<AppState>()
void ResizedCallback.resizedImplementation(GraphicsContext* gc, int x, int y,
int width, int height)
gc.resizedImplementation(x, y, width, height)
makeTexturesAndGeometry(width, height, _appState.sw)
_appState.setStateFromConfig(validConfigs[_appState
.currentConfig])
viewer = _appState.viewer
vp = viewer.getSlave(0)._camera.getViewport()
if vp :
oldWidth = vp.width(), oldHeight = vp.height()
aspectRatioChange = (width / oldWidth) / (height / oldHeight)
vp.setViewport(0, 0, width, height)
if aspectRatioChange not = 1.0 :
master = viewer.getCamera()
switch (master.getProjectionResizePolicy())
case Camera.HORIZONTAL:
master.getProjectionMatrix()
*= Matrix.scale(1.0/aspectRatioChange,1.0,1.0)
break
case Camera.VERTICAL:
master.getProjectionMatrix()
*= Matrix.scale(1.0, aspectRatioChange,1.0)
break
default:
break
# Prefer GL_DEPTH_COMPONENT32F, otherwise use
# GL_DEPTH_COMPONENT32F_NV if available
depthTextureEnum = 0
# Standard OSG code for initializing osgViewer.Viewer with explicit
# creation of own graphics context. This is also a good time to test
# for valid frame buffer configurations we have a valid graphics
# context, but multithreading hasn't started, etc.
def setupGC(viewer, arguments):
x = -1, y = -1, width = -1, height = -1
while arguments.read("--window",x,y,width,height) :
wsi = GraphicsContext.getWindowingSystemInterface()
if not wsi :
OSG_NOTIFY(NOTICE), "View.setUpViewOnSingleScreen() : Error, no WindowSystemInterface available, cannot create windows."
return 0
ds = viewer.getDisplaySettings() : DisplaySettings: if (viewer.getDisplaySettings()) else instance()
si = GraphicsContext.ScreenIdentifier()
si.readDISPLAY()
# displayNum has not been set so reset it to 0.
if si.displayNum<0 : si.displayNum = 0
decoration = True
if x < 0 :
unsigned int w, h
wsi.getScreenResolution(si, w, h)
x = 0
y = 0
width = w
height = h
decoration = False
traits = GraphicsContext.Traits(ds)
traits.hostName = si.hostName
traits.displayNum = si.displayNum
traits.screenNum = si.screenNum
traits.x = x
traits.y = y
traits.width = width
traits.height = height
traits.windowDecoration = decoration
traits.doubleBuffer = True
traits.sharedContext = 0
gc = GraphicsContext.createGraphicsContext(traits)
gw = dynamic_cast<osgViewer.GraphicsWindow*>(gc)
if gw :
OSG_NOTIFY(INFO), "View.setUpViewOnSingleScreen - GraphicsWindow has been created successfully."
gw.getEventQueue().getCurrentEventState()
.setWindowRectangle(0, 0, width, height)
else:
OSG_NOTIFY(NOTICE), " GraphicsWindow has not been created successfully."
double fovy, aspectRatio, zNear, zFar
viewer.getCamera().getProjectionMatrixAsPerspective(fovy, aspectRatio,
zNear, zFar)
newAspectRatio = double(traits.width) / double(traits.height)
aspectRatioChange = newAspectRatio / aspectRatio
if aspectRatioChange not = 1.0 :
viewer.getCamera().getProjectionMatrix()
*= Matrix.scale(1.0/aspectRatioChange,1.0,1.0)
# Context has to be current to test for extensions
gc.realize()
gc.makeCurrent()
contextID = gc.getState().getContextID()
fboe = FBOExtensions.instance(contextID, True)
if not fboe.isSupported() :
OSG_NOTIFY(NOTICE), "Frame buffer objects are not supported\n"
gc.releaseContext()
gc.close(True)
return 0
if isGLExtensionSupported(contextID, "GL_ARB_depth_buffer_float") :
depthTextureEnum = GL_DEPTH_COMPONENT32F
elif isGLExtensionSupported(contextID, "GL_NV_depth_buffer_float") :
depthTextureEnum = GL_DEPTH_COMPONENT32F_NV
colorConfigs = BufferConfigList()
depthConfigs = BufferConfigList()
coverageConfigs = vector<int>()
getPossibleConfigs(gc, colorConfigs, depthConfigs, coverageConfigs)
coverageSampleConfigs = (coverageConfigs.size() - 4) / 2
print "color configs\nname\tbits\n"
for (BufferConfigList.const_iterator colorItr = colorConfigs.begin(),
colorEnd = colorConfigs.end()
not = colorEnd
++colorItr)
for (BufferConfigList.const_iterator depthItr = depthConfigs.begin(),
depthEnd = depthConfigs.end()
not = depthEnd
++depthItr)
root = colorItr.name + " " + depthItr.name
config = FboConfig(root, colorItr.format, depthItr.format,
colorItr.bits, depthItr.bits)
data = FboData()
if createFBO(gc, config, data) :
validConfigs.push_back(config)
destroyFBO(gc, data)
if coverageConfigs.size() > 0 :
#CSAA provides a list of all supported AA modes for
#quick enumeration
for (int kk = 0 kk < coverageSampleConfigs kk++)
msText = stringstream()
msText, root
config.depthSamples = coverageConfigs[kk*2+1]
config.coverageSamples = coverageConfigs[kk*2]
if config.coverageSamples == config.depthSamples :
# Normal antialiasing
msText, " - ", config.depthSamples, " MSAA"
else:
# coverage antialiasing
msText, " - ", config.coverageSamples, "/", config.depthSamples, " CSAA"
config.name = msText.str()
if createFBO(gc, config, data) :
validConfigs.push_back( config)
destroyFBO(gc, data)
if validConfigs.empty() :
print "no valid frame buffer configurations not \n"
return 0
print "valid frame buffer configurations:\n"
for (vector<FboConfig>.iterator itr = validConfigs.begin(),
end = validConfigs.end()
not = end
++itr)
print itr.name, "\n"
gc.releaseContext()
return gc.release()
colorTexture = ref_ptr<Texture2D>()
depthTexture = ref_ptr<Texture2D>()
depthTexture24 = ref_ptr<Texture2D>()
def makeDepthTexture(width, height, internalFormat):
depthTex = Texture2D()
depthTex.setTextureSize(width, height)
depthTex.setSourceFormat(GL_DEPTH_COMPONENT)
depthTex.setSourceType(GL_FLOAT)
depthTex.setInternalFormat(internalFormat)
depthTex.setFilter(Texture2D.MIN_FILTER, Texture2D.NEAREST)
depthTex.setFilter(Texture2D.MAG_FILTER, Texture2D.NEAREST)
depthTex.setWrap(Texture.WRAP_S, Texture.CLAMP_TO_EDGE)
depthTex.setWrap(Texture.WRAP_T, Texture.CLAMP_TO_EDGE)
return depthTex
def makeRttCamera(gc, width, height):
rttCamera = Camera()
rttCamera.setGraphicsContext(gc)
rttCamera.setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
rttCamera.setClearColor(Vec4(0.0, 0.4, 0.5, 0.0))
# normally the depth test is inverted, although the user can
# change that.
rttCamera.setClearDepth(0.0)
rttCamera.setViewport(0, 0, width, height)
rttCamera.setDrawBuffer(GL_FRONT)
rttCamera.setReadBuffer(GL_FRONT)
rttCamera.setRenderTargetImplementation(Camera.FRAME_BUFFER_OBJECT)
rttCamera.setComputeNearFarMode(CullSettings.DO_NOT_COMPUTE_NEAR_FAR)
return rttCamera
def setAttachmentsFromConfig(camera, config):
# XXX Detaching the old buffers may not be necessary.
if not camera.getBufferAttachmentMap().empty() :
camera.detach(Camera.COLOR_BUFFER)
camera.detach(Camera.DEPTH_BUFFER)
camera.attach(Camera.COLOR_BUFFER, colorTexture, 0, 0, False,
config.coverageSamples, config.depthSamples)
if config.coverageSamples not = 0 or config.depthSamples not = 0 :
camera.attach(Camera.DEPTH_BUFFER, config.depthFormat)
elif config.depthFormat == GL_DEPTH_COMPONENT24 :
camera.attach(Camera.DEPTH_BUFFER, depthTexture24)
else:
camera.attach(Camera.DEPTH_BUFFER, depthTexture)
# Create the parts of the local scene graph used to display the final
# results.
def makeTexturesAndGeometry(width, height, sw):
if not sw :
sw = Switch()
colorTexture = Texture2D()
colorTexture.setTextureSize(width, height)
colorTexture.setInternalFormat(GL_RGBA)
colorTexture.setFilter(Texture2D.MIN_FILTER, Texture2D.LINEAR)
colorTexture.setFilter(Texture2D.MAG_FILTER, Texture2D.LINEAR)
colorTexture.setWrap(Texture.WRAP_S, Texture.CLAMP_TO_EDGE)
colorTexture.setWrap(Texture.WRAP_T, Texture.CLAMP_TO_EDGE)
colorTexture.setBorderColor(Vec4(0, 0, 0, 0))
depthTexture24 = makeDepthTexture(width, height, GL_DEPTH_COMPONENT24)
if depthTextureEnum :
depthTexture = makeDepthTexture(width, height, depthTextureEnum)
depthTexture = depthTexture24
sw.removeChildren(0, sw.getNumChildren())
sw.addChild(createTextureQuad(colorTexture))
sw.addChild(createTextureQuad(depthTexture))
sw.addChild(createTextureQuad(depthTexture24))
sw.setSingleChildOn(0)
return sw
def main(argv):
# use an ArgumentParser object to manage the program arguments.
arguments = ArgumentParser(argv)
arguments.getApplicationUsage().setDescription(arguments.getApplicationName()
+ " demonstrates using a floating point depth buffer.\nThe user can invert the depth buffer range and choose among available multi-sample configurations.")
arguments.getApplicationUsage().setCommandLineUsage(arguments.getApplicationName()+" [options] filename ...")
arguments.getApplicationUsage().addCommandLineOption("--far <number>", "Set far plane value")
# if user request help write it out to cout.
if arguments.read("-h") or arguments.read("--help") :
arguments.getApplicationUsage().write(std.cout)
return 1
zFar = 500.0
while arguments.read("--far", zFar) :
# construct the viewer.
viewer = osgViewer.Viewer()
appState = AppState(viewer)
viewer.addEventHandler(osgViewer.StatsHandler)()
viewer.addEventHandler(osgViewer.WindowSizeHandler)()
viewer.addEventHandler(osgViewer.ScreenCaptureHandler)()
# The aspect ratio is set to the correct ratio for the window in
# setupGC().
viewer.getCamera()
.setProjectionMatrixAsPerspective(40.0, 1.0, appState.zNear, zFar)
gc = setupGC(viewer, arguments)
if not gc :
return 1
gc.setResizedCallback(ResizedCallback(appState))
traits = gc.getTraits()
width = traits.width
height = traits.height
if arguments.argc()<=1 :
arguments.getApplicationUsage().write(std.cout,osg.ApplicationUsage.COMMAND_LINE_OPTION)
return 1
loadedModel = osgDB.readNodeFiles(arguments)
if not loadedModel :
cerr, "couldn't load ", argv[1], "\n"
return 1
optimizer = osgUtil.Optimizer()
optimizer.optimize(loadedModel)
# creates texture to be rendered
sw = makeTexturesAndGeometry(width, height, appState.sw)
rttCamera = makeRttCamera(gc, width, height)
rttCamera.setRenderOrder(Camera.PRE_RENDER)
viewer.addSlave(rttCamera)
appState.camera = rttCamera
# geometry and slave camera to display the result
displayRoot = Group()
displayRoot.addChild(sw)
displayRoot.addChild(appState.textProjection)
displaySS = displayRoot.getOrCreateStateSet()
displaySS.setMode(GL_LIGHTING, StateAttribute.OFF)
displaySS.setMode(GL_DEPTH_TEST, StateAttribute.OFF)
texCamera = Camera()
texCamera.setGraphicsContext(gc)
texCamera.setClearMask(GL_COLOR_BUFFER_BIT)
texCamera.setClearColor(Vec4(0.0, 0.0, 0.0, 0.0))
texCamera.setReferenceFrame(Camera.ABSOLUTE_RF)
texCamera.setViewport(0, 0, width, height)
texCamera.setDrawBuffer(GL_BACK)
texCamera.setReadBuffer(GL_BACK)
texCamera.addChild(displayRoot)
texCamera.setAllowEventFocus(False)
texCamera.setCullingMode(CullSettings.NO_CULLING)
texCamera.setProjectionResizePolicy(Camera.FIXED)
viewer.addSlave(texCamera, Matrixd(), Matrixd(), False)
viewer.addEventHandler(ConfigHandler(appState))
# add model to the viewer.
sceneRoot = Group()
sceneSS = sceneRoot.getOrCreateStateSet()
depth = Depth(Depth.GEQUAL, 1.0, 0.0)
sceneSS.setAttributeAndModes(depth,(StateAttribute.ON
| StateAttribute.OVERRIDE))
#if 0
# Hack to work around Blender osg export bug
sceneSS.setAttributeAndModes(CullFace(CullFace.BACK))
#endif
sceneRoot.addChild(loadedModel)
appState.setStateFromConfig(validConfigs[0])
appState.updateNear()
viewer.addEventHandler(DepthHandler(appState, depth))
# add the help handler
viewer.addEventHandler(osgViewer
.HelpHandler(arguments.getApplicationUsage()))
viewer.setSceneData(sceneRoot)
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | -8,710,005,781,620,259,000 | 38.555184 | 180 | 0.65311 | false |
uniteddiversity/mediadrop | mediadrop/plugin/events.py | 9 | 14024 | # This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Abstract events which plugins subscribe to and are called by the app.
"""
from collections import deque
import logging
from sqlalchemy.orm.interfaces import MapperExtension
__all__ = ['Event', 'GeneratorEvent', 'FetchFirstResultEvent', 'observes']
log = logging.getLogger(__name__)
class Event(object):
"""
An arbitrary event that's triggered and observed by different parts of the app.
>>> e = Event()
>>> e.observers.append(lambda x: x)
>>> e('x')
"""
def __init__(self, args=()):
self.args = args and tuple(args) or None
self.pre_observers = deque()
self.post_observers = deque()
@property
def observers(self):
return tuple(self.pre_observers) + tuple(self.post_observers)
def __call__(self, *args, **kwargs):
# This is helpful for events which are triggered explicitly in the code
# (e.g. Environment.loaded)
for observer in self.observers:
observer(*args, **kwargs)
def __iter__(self):
return iter(self.observers)
class GeneratorEvent(Event):
"""
An arbitrary event that yields all results from all observers.
"""
def is_list_like(self, value):
if isinstance(value, basestring):
return False
try:
iter(value)
except TypeError:
return False
return True
def __call__(self, *args, **kwargs):
for observer in self.observers:
result = observer(*args, **kwargs)
if self.is_list_like(result):
for item in result:
yield item
else:
yield result
class FetchFirstResultEvent(Event):
"""
An arbitrary event that return the first result from its observers
"""
def __call__(self, *args, **kwargs):
for observer in self.observers:
result = observer(*args, **kwargs)
if result is not None:
return result
return None
class observes(object):
"""
Register the decorated function as an observer of the given event.
"""
def __init__(self, *events, **kwargs):
self.events = events
self.appendleft = kwargs.pop('appendleft', False)
self.run_before = kwargs.pop('run_before', False)
if kwargs:
first_key = list(kwargs)[0]
raise TypeError('TypeError: observes() got an unexpected keyword argument %r' % first_key)
def __call__(self, func):
for event in self.events:
observers = event.post_observers
if self.run_before:
observers = event.pre_observers
if self.appendleft:
observers.appendleft(func)
else:
observers.append(func)
return func
class MapperObserver(MapperExtension):
"""
Fire events whenever the mapper triggers any kind of row modification.
"""
def __init__(self, event_group):
self.event_group = event_group
def after_delete(self, mapper, connection, instance):
self.event_group.after_delete(instance)
def after_insert(self, mapper, connection, instance):
self.event_group.after_insert(instance)
def after_update(self, mapper, connection, instance):
self.event_group.after_update(instance)
def before_delete(self, mapper, connection, instance):
self.event_group.before_delete(instance)
def before_insert(self, mapper, connection, instance):
self.event_group.before_insert(instance)
def before_update(self, mapper, connection, instance):
self.event_group.before_update(instance)
###############################################################################
# Application Setup
class Environment(object):
before_route_setup = Event(['mapper'])
after_route_setup = Event(['mapper'])
# TODO: deprecation warning
routes = after_route_setup
routes = Event(['mapper'])
init_model = Event([])
loaded = Event(['config'])
# fires when a new database was initialized (tables created)
database_initialized = Event([])
# an existing database was migrated to a newer DB schema
database_migrated = Event([])
# the environment has been loaded, the database is ready to use
database_ready = Event([])
###############################################################################
# Controllers
class Admin(object):
class CategoriesController(object):
index = Event(['**kwargs'])
bulk = Event(['**kwargs'])
edit = Event(['**kwargs'])
save = Event(['**kwargs'])
class CommentsController(object):
index = Event(['**kwargs'])
save_status = Event(['**kwargs'])
save_edit = Event(['**kwargs'])
class IndexController(object):
index = Event(['**kwargs'])
media_table = Event(['**kwargs'])
class MediaController(object):
bulk = Event(['type=None, ids=None, **kwargs'])
index = Event(['**kwargs'])
edit = Event(['**kwargs'])
save = Event(['**kwargs'])
add_file = Event(['**kwargs'])
edit_file = Event(['**kwargs'])
merge_stubs = Event(['**kwargs'])
save_thumb = Event(['**kwargs'])
update_status = Event(['**kwargs'])
class PodcastsController(object):
index = Event(['**kwargs'])
edit = Event(['**kwargs'])
save = Event(['**kwargs'])
save_thumb = Event(['**kwargs'])
class TagsController(object):
index = Event(['**kwargs'])
edit = Event(['**kwargs'])
save = Event(['**kwargs'])
bulk = Event(['**kwargs'])
class UsersController(object):
index = Event(['**kwargs'])
edit = Event(['**kwargs'])
save = Event(['**kwargs'])
delete = Event(['**kwargs'])
class GroupsController(object):
index = Event(['**kwargs'])
edit = Event(['**kwargs'])
save = Event(['**kwargs'])
delete = Event(['**kwargs'])
class Players(object):
HTML5OrFlashPrefsForm = Event(['form'])
SublimePlayerPrefsForm = Event(['form'])
YoutubeFlashPlayerPrefsForm = Event(['form'])
class PlayersController(object):
delete = Event(['**kwargs'])
disable = Event(['**kwargs'])
edit = Event(['**kwargs'])
enable = Event(['**kwargs'])
index = Event(['**kwargs'])
reorder = Event(['**kwargs'])
class Settings(object):
AdvertisingForm = Event(['form'])
AnalyticsForm = Event(['form'])
APIForm = Event(['form'])
AppearanceForm = Event(['form'])
CommentsForm = Event(['form'])
GeneralForm = Event(['form'])
NotificationsForm = Event(['form'])
PopularityForm = Event(['form'])
SiteMapsForm = Event(['form'])
UploadForm = Event(['form'])
class SettingsController(object):
advertising_save = Event(['**kwargs'])
analytics_save = Event(['**kwargs'])
appearance_save = Event(['**kwargs'])
comments_save = Event(['**kwargs'])
googleapi_save = Event(['**kwargs'])
general_save = Event(['**kwargs'])
notifications_save = Event(['**kwargs'])
popularity_save = Event(['**kwargs'])
# probably this event will be renamed to 'api_save' in a future version
save_api = Event(['**kwargs'])
sitemaps_save = Event(['**kwargs'])
upload_save = Event(['**kwargs'])
class Storage(object):
LocalFileStorageForm = Event(['form'])
FTPStorageForm = Event(['form'])
RemoteURLStorageForm = Event(['form'])
class StorageController(object):
delete = Event(['**kwargs'])
disable = Event(['**kwargs'])
edit = Event(['**kwargs'])
enable = Event(['**kwargs'])
index = Event(['**kwargs'])
class API(object):
class MediaController(object):
index = Event(['**kwargs'])
get = Event(['**kwargs'])
class CategoriesController(object):
index = Event(['**kwargs'])
more = Event(['**kwargs'])
# feed observers (if they are not marked as "run_before=True") must support
# pure string output (from beaker cache) instead of a dict with template
# variables.
feed = Event(['limit', '**kwargs'])
class ErrorController(object):
document = Event(['**kwargs'])
report = Event(['**kwargs'])
class LoginController(object):
login = Event(['**kwargs'])
login_handler = Event(['**kwargs'])
logout_handler = Event(['**kwargs'])
post_login = Event(['**kwargs'])
post_logout = Event(['**kwargs'])
class MediaController(object):
index = Event(['**kwargs'])
comment = Event(['**kwargs'])
explore = Event(['**kwargs'])
embed_player = Event(['xhtml'])
jwplayer_rtmp_mrss = Event(['**kwargs'])
rate = Event(['**kwargs'])
view = Event(['**kwargs'])
class PodcastsController(object):
index = Event(['**kwargs'])
view = Event(['**kwargs'])
feed = Event(['**kwargs'])
class SitemapsController(object):
# observers (if they are not marked as "run_before=True") must support pure
# string output (from beaker cache) instead of a dict with template variables.
google = Event(['page', 'limit', '**kwargs'])
mrss = Event(['**kwargs'])
latest = Event(['limit', 'skip', '**kwargs'])
featured = Event(['limit', 'skip', '**kwargs'])
class UploadController(object):
index = Event(['**kwargs'])
submit = Event(['**kwargs'])
submit_async = Event(['**kwargs'])
success = Event(['**kwargs'])
failure = Event(['**kwargs'])
###############################################################################
# Models
class Media(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
# event is triggered when the encoding status changes from 'not encoded' to
# 'encoded'
encoding_done = Event(['instance'])
class MediaFile(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class Podcast(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class Comment(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class Category(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class Tag(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class Setting(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class MultiSetting(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
class User(object):
before_delete = Event(['instance'])
after_delete = Event(['instance'])
before_insert = Event(['instance'])
after_insert = Event(['instance'])
before_update = Event(['instance'])
after_update = Event(['instance'])
###############################################################################
# Forms
PostCommentForm = Event(['form'])
UploadForm = Event(['form'])
LoginForm = Event(['form'])
Admin.CategoryForm = Event(['form'])
Admin.CategoryRowForm = Event(['form'])
Admin.EditCommentForm = Event(['form'])
Admin.MediaForm = Event(['form'])
Admin.AddFileForm = Event(['form'])
Admin.EditFileForm = Event(['form'])
Admin.UpdateStatusForm = Event(['form'])
Admin.SearchForm = Event(['form'])
Admin.PodcastForm = Event(['form'])
Admin.PodcastFilterForm = Event(['form'])
Admin.UserForm = Event(['form'])
Admin.GroupForm = Event(['form'])
Admin.TagForm = Event(['form'])
Admin.TagRowForm = Event(['form'])
Admin.ThumbForm = Event(['form'])
###############################################################################
# Miscellaneous... may require refactoring
media_types = GeneratorEvent([])
plugin_settings_links = GeneratorEvent([])
EncodeMediaFile = Event(['media_file'])
page_title = FetchFirstResultEvent('default=None, category=None, \
media=None, podcast=None, upload=None, **kwargs')
meta_keywords = FetchFirstResultEvent('category=None, media=None, \
podcast=None, upload=None, **kwargs')
meta_description = FetchFirstResultEvent('category=None, media=None, \
podcast=None, upload=None, **kwargs')
meta_robots_noindex = FetchFirstResultEvent('categories=None, rss=None, **kwargs')
| gpl-3.0 | 1,169,767,692,310,516,000 | 32.075472 | 102 | 0.59063 | false |
fleeto/SS.Moker | dataloader.py | 1 | 2747 | import re
import os.path
import urllib
import json
import imp
import sys
import string
left_sign = "_|"
right_sign = "|_"
base_path = os.path.dirname(os.path.realpath(sys.argv[0]))
def get_data(method, path):
"""
According the requesting url and http method , search in the mock_index.json, then parse the parameters in data files,
and the last, return it to the client.
:param method: "POST" or "GET"
:param path: "URL"
:return: http_status_code and message_string
"""
f = open(os.path.join(base_path, 'mock_index.json'), 'r')
config_str = f.read()
f.close()
config = json.loads(config_str)
for service in config:
if method in service['method']:
path_pattern = service['path']
if re.match(path_pattern, path):
sys.stderr.write("opening '%s' ....\n" % os.path.join(base_path, 'data', service['datafile']))
fi = open(os.path.join(base_path, 'data', service['datafile']), 'r')
data = fi.read()
data = data.decode('utf-8')
data = parse_data(data)
data = data.encode(service['response']['encoding'])
if service['response']['url']:
data = urllib.quote_plus(data)
return 200, data, service
return 404, ''
def parse_data(data):
"""
Get parameters from data file contents, then processing the parameters get its values.
:param data: string from data file
:return: parameters had been replaced into values.
"""
rex = re.compile(re.escape(left_sign) + "(.*?)" + re.escape(right_sign))
params = rex.findall(data)
result_list = {}
if len(params) > 0:
for param in params:
r_param = string.replace(param, "'", '"')
result_list[left_sign + param + right_sign] = process_param(json.loads(r_param))
for k, v in result_list.items():
data = string.replace(data, k, v)
return data
def process_param(param):
"""
find the right plug-in to process the parameter.
param = {}
param['type'] = 'datafile' //It's the parameters type, and the corresponding python file name in the plugins dir.
param['method'] = 'random' //each plugin can have more than one method to get values.
param['param1'] = 'param1_value' //others is the parameter passed to the plugins.
:param param: parameters get from the data files
:return: parameter values
"""
plugin_type = param['type']
sys.stderr.write("Plug in %s is started \n" % plugin_type)
param_mod = imp.load_source(plugin_type, os.path.join(base_path, 'plugins', plugin_type + ".py"))
param_value = param_mod.get_value(param)
return param_value
| lgpl-3.0 | 2,996,529,063,696,241,700 | 35.144737 | 122 | 0.609028 | false |
codocedo/fca | examples/algorithms_addIntent/ex7_ps_partitions.py | 1 | 2623 | """
FCA - Python libraries to support FCA tasks
Copyright (C) 2017 Victor Codocedo
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Kyori code.
from __future__ import print_function
import argparse
from fca.defs.patterns.hypergraphs import PartitionPattern
from fca.algorithms import dict_printer
from fca.algorithms.addIntent import AddIntent
from fca.io import read_representations
from fca.io.transformers import List2IntervalsTransformer
class List2PartitionsTransformer(List2IntervalsTransformer):
"""
Transforms a list of values to a partition containing equivalence classes of indices
[0,1,0,1,1] -> [set([0,2]), set([1,3,4])]
"""
def real_attributes(self, *args):
return list([tuple(sorted(i)) for i in args])
def parse(self, lst):
hashes = {}
for i, j in enumerate(lst):
hashes.setdefault(j, []).append(i)
return [set(i) for i in hashes.values()]
def exec_ex7(filepath):
"""
Example 7 - Partition Pattern Structures with AddIntent
Generates partitions based on equivalence classes,
using a custom Transformer (List2PartitionsTransformer)
"""
dict_printer(
AddIntent(
read_representations(
filepath,
transformer=List2PartitionsTransformer(int),
transposed=True,
file_manager_params={'style': 'tab'}
),
pattern=PartitionPattern,
lazy=False,
silent=False
).lat
)
if __name__ == '__main__':
__parser__ = argparse.ArgumentParser(description='Example 7 - Partition Pattern Structures with AddIntent:\n Generates partitions based on equivalence classes,\n using a custom Transformer (List2PartitionsTransformer)\n ')
__parser__.add_argument('context_path', metavar='context_path', type=str, help='path to the formal context')
__args__ = __parser__.parse_args()
exec_ex7(__args__.context_path)
# okay decompiling ex7_hyg_pat.pyc
| gpl-3.0 | 3,497,951,857,735,478,300 | 37.014493 | 289 | 0.677087 | false |
bdupharm/sqlalchemy | test/orm/test_joins.py | 2 | 100495 | from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
import operator
from sqlalchemy import *
from sqlalchemy import exc as sa_exc, util
from sqlalchemy.sql import compiler, table, column
from sqlalchemy.engine import default
from sqlalchemy.orm import *
from sqlalchemy.orm import attributes
from sqlalchemy.testing import eq_
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.testing import AssertsCompiledSQL, engines
from sqlalchemy.testing.schema import Column
from test.orm import _fixtures
from sqlalchemy.testing import fixtures
from sqlalchemy.orm.util import join, outerjoin, with_parent
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
Node, composite_pk_table, users, Keyword, items, Dingaling, \
order_items, item_keywords, Item, User, dingalings, \
Address, keywords, CompositePk, nodes, Order, orders, \
addresses = cls.classes.Node, \
cls.tables.composite_pk_table, cls.tables.users, \
cls.classes.Keyword, cls.tables.items, \
cls.classes.Dingaling, cls.tables.order_items, \
cls.tables.item_keywords, cls.classes.Item, \
cls.classes.User, cls.tables.dingalings, \
cls.classes.Address, cls.tables.keywords, \
cls.classes.CompositePk, cls.tables.nodes, \
cls.classes.Order, cls.tables.orders, cls.tables.addresses
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', order_by=addresses.c.id),
'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o
})
mapper(Address, addresses, properties={
'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o
})
mapper(Dingaling, dingalings)
mapper(Order, orders, properties={
'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m
'address':relationship(Address), # m2o
})
mapper(Item, items, properties={
'keywords':relationship(Keyword, secondary=item_keywords) #m2m
})
mapper(Keyword, keywords)
mapper(Node, nodes, properties={
'children':relationship(Node,
backref=backref('parent', remote_side=[nodes.c.id])
)
})
mapper(CompositePk, composite_pk_table)
configure_mappers()
class InheritedJoinTest(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('companies', metadata,
Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)))
Table('people', metadata,
Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('company_id', Integer, ForeignKey('companies.company_id')),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True),
Column('status', String(30)),
Column('engineer_name', String(50)),
Column('primary_language', String(50)),
)
Table('machines', metadata,
Column('machine_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('engineer_id', Integer, ForeignKey('engineers.person_id')))
Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50))
)
Table('boss', metadata,
Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True),
Column('golf_swing', String(30)),
)
Table('paperwork', metadata,
Column('paperwork_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('description', String(50)),
Column('person_id', Integer, ForeignKey('people.person_id')))
@classmethod
def setup_classes(cls):
paperwork, people, companies, boss, managers, machines, engineers = (cls.tables.paperwork,
cls.tables.people,
cls.tables.companies,
cls.tables.boss,
cls.tables.managers,
cls.tables.machines,
cls.tables.engineers)
class Company(cls.Comparable):
pass
class Person(cls.Comparable):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(cls.Comparable):
pass
class Paperwork(cls.Comparable):
pass
mapper(Company, companies, properties={
'employees':relationship(Person, order_by=people.c.person_id)
})
mapper(Machine, machines)
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
properties={
'paperwork':relationship(Paperwork, order_by=paperwork.c.paperwork_id)
})
mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer', properties={
'machines':relationship(Machine, order_by=machines.c.machine_id)
})
mapper(Manager, managers,
inherits=Person, polymorphic_identity='manager')
mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss')
mapper(Paperwork, paperwork)
def test_single_prop(self):
Company = self.classes.Company
sess = create_session()
self.assert_compile(
sess.query(Company).join(Company.employees),
"SELECT companies.company_id AS companies_company_id, companies.name AS companies_name "
"FROM companies JOIN people ON companies.company_id = people.company_id"
, use_default_dialect = True
)
def test_force_via_select_from(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = create_session()
self.assert_compile(
sess.query(Company).\
filter(Company.company_id==Engineer.company_id).\
filter(Engineer.primary_language=='java'),
"SELECT companies.company_id AS companies_company_id, companies.name AS companies_name "
"FROM companies, people, engineers "
"WHERE companies.company_id = people.company_id AND engineers.primary_language "
"= :primary_language_1",
use_default_dialect=True
)
self.assert_compile(
sess.query(Company).select_from(Company, Engineer).\
filter(Company.company_id==Engineer.company_id).\
filter(Engineer.primary_language=='java'),
"SELECT companies.company_id AS companies_company_id, companies.name AS companies_name "
"FROM companies, people JOIN engineers ON people.person_id = engineers.person_id "
"WHERE companies.company_id = people.company_id AND engineers.primary_language ="
" :primary_language_1",
use_default_dialect=True
)
def test_single_prop_of_type(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = create_session()
self.assert_compile(
sess.query(Company).join(Company.employees.of_type(Engineer)),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN "
"(people JOIN engineers ON people.person_id = engineers.person_id) "
"ON companies.company_id = people.company_id"
, use_default_dialect = True
)
def test_prop_with_polymorphic_1(self):
Person, Manager, Paperwork = (self.classes.Person,
self.classes.Manager,
self.classes.Paperwork)
sess = create_session()
self.assert_compile(
sess.query(Person).with_polymorphic(Manager).
order_by(Person.person_id).
join('paperwork').filter(Paperwork.description.like('%review%')),
"SELECT people.person_id AS people_person_id, people.company_id AS"
" people_company_id, "
"people.name AS people_name, people.type AS people_type, managers.person_id "
"AS managers_person_id, "
"managers.status AS managers_status, managers.manager_name AS "
"managers_manager_name FROM people "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id JOIN "
"paperwork ON people.person_id = "
"paperwork.person_id WHERE paperwork.description LIKE :description_1 "
"ORDER BY people.person_id"
, use_default_dialect=True
)
def test_prop_with_polymorphic_2(self):
Person, Manager, Paperwork = (self.classes.Person,
self.classes.Manager,
self.classes.Paperwork)
sess = create_session()
self.assert_compile(
sess.query(Person).with_polymorphic(Manager).
order_by(Person.person_id).
join('paperwork', aliased=True).
filter(Paperwork.description.like('%review%')),
"SELECT people.person_id AS people_person_id, people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, managers.person_id "
"AS managers_person_id, "
"managers.status AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN managers ON people.person_id = managers.person_id JOIN "
"paperwork AS paperwork_1 ON people.person_id = paperwork_1.person_id "
"WHERE paperwork_1.description LIKE :description_1 ORDER BY people.person_id"
, use_default_dialect=True
)
def test_explicit_polymorphic_join_one(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = create_session()
self.assert_compile(
sess.query(Company).join(Engineer).filter(Engineer.engineer_name=='vlad'),
"SELECT companies.company_id AS companies_company_id, companies.name AS "
"companies_name "
"FROM companies JOIN (people JOIN engineers "
"ON people.person_id = engineers.person_id) "
"ON "
"companies.company_id = people.company_id "
"WHERE engineers.engineer_name = :engineer_name_1"
, use_default_dialect=True
)
def test_explicit_polymorphic_join_two(self):
Company, Engineer = self.classes.Company, self.classes.Engineer
sess = create_session()
self.assert_compile(
sess.query(Company).join(Engineer, Company.company_id==Engineer.company_id).
filter(Engineer.engineer_name=='vlad'),
"SELECT companies.company_id AS companies_company_id, companies.name "
"AS companies_name "
"FROM companies JOIN "
"(people JOIN engineers ON people.person_id = engineers.person_id) "
"ON "
"companies.company_id = people.company_id "
"WHERE engineers.engineer_name = :engineer_name_1"
, use_default_dialect=True
)
def test_multiple_adaption(self):
"""test that multiple filter() adapters get chained together "
and work correctly within a multiple-entry join()."""
people, Company, Machine, engineers, machines, Engineer = (self.tables.people,
self.classes.Company,
self.classes.Machine,
self.tables.engineers,
self.tables.machines,
self.classes.Engineer)
sess = create_session()
self.assert_compile(
sess.query(Company).join(people.join(engineers), Company.employees).
filter(Engineer.name=='dilbert'),
"SELECT companies.company_id AS companies_company_id, companies.name AS "
"companies_name "
"FROM companies JOIN (people "
"JOIN engineers ON people.person_id = "
"engineers.person_id) ON companies.company_id = "
"people.company_id WHERE people.name = :name_1"
, use_default_dialect = True
)
mach_alias = machines.select()
self.assert_compile(
sess.query(Company).join(people.join(engineers), Company.employees).
join(mach_alias, Engineer.machines, from_joinpoint=True).
filter(Engineer.name=='dilbert').filter(Machine.name=='foo'),
"SELECT companies.company_id AS companies_company_id, companies.name AS "
"companies_name "
"FROM companies JOIN (people "
"JOIN engineers ON people.person_id = "
"engineers.person_id) ON companies.company_id = "
"people.company_id JOIN "
"(SELECT machines.machine_id AS machine_id, machines.name AS name, "
"machines.engineer_id AS engineer_id "
"FROM machines) AS anon_1 ON engineers.person_id = anon_1.engineer_id "
"WHERE people.name = :name_1 AND anon_1.name = :name_2"
, use_default_dialect = True
)
def test_auto_aliasing_multi_link(self):
# test [ticket:2903]
sess = create_session()
Company, Engineer, Manager, Boss = self.classes.Company, \
self.classes.Engineer, \
self.classes.Manager, self.classes.Boss
q = sess.query(Company).\
join(Company.employees.of_type(Engineer)).\
join(Company.employees.of_type(Manager)).\
join(Company.employees.of_type(Boss))
self.assert_compile(q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"JOIN (people JOIN engineers ON people.person_id = engineers.person_id) "
"ON companies.company_id = people.company_id "
"JOIN (people AS people_1 JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id) "
"ON companies.company_id = people_1.company_id "
"JOIN (people AS people_2 JOIN managers AS managers_2 "
"ON people_2.person_id = managers_2.person_id JOIN boss AS boss_1 "
"ON managers_2.person_id = boss_1.boss_id) "
"ON companies.company_id = people_2.company_id",
use_default_dialect=True
)
class JoinOnSynonymTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
users, addresses = (cls.tables.users, cls.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address),
'ad_syn': synonym("addresses")
})
mapper(Address, addresses)
def test_join_on_synonym(self):
User = self.classes.User
self.assert_compile(
Session().query(User).join(User.ad_syn),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id"
)
class JoinTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_single_name(self):
User = self.classes.User
sess = create_session()
self.assert_compile(
sess.query(User).join("orders"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id"
)
assert_raises(
sa_exc.InvalidRequestError,
sess.query(User).join, "user",
)
self.assert_compile(
sess.query(User).join("orders", "items"),
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id"
)
# test overlapping paths. User->orders is used by both joins, but rendered once.
self.assert_compile(
sess.query(User).join("orders", "items").join("orders", "address"),
"SELECT users.id AS users_id, users.name AS users_name FROM users JOIN orders "
"ON users.id = orders.user_id JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id JOIN items ON items.id = order_items_1.item_id JOIN addresses "
"ON addresses.id = orders.address_id"
)
def test_invalid_kwarg_join(self):
User = self.classes.User
sess = create_session()
assert_raises_message(
TypeError,
"unknown arguments: bar, foob",
sess.query(User).join, "address", foob="bar", bar="bat"
)
assert_raises_message(
TypeError,
"unknown arguments: bar, foob",
sess.query(User).outerjoin, "address", foob="bar", bar="bat"
)
def test_left_is_none(self):
User = self.classes.User
Address = self.classes.Address
sess = create_session()
assert_raises_message(
sa_exc.InvalidRequestError,
"Don't know how to join from x; please use select_from\(\) to "
"establish the left entity/selectable of this join",
sess.query(literal_column('x'), User).join, Address
)
def test_isouter_flag(self):
User = self.classes.User
self.assert_compile(
create_session().query(User).join('orders', isouter=True),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users LEFT OUTER JOIN orders ON users.id = orders.user_id"
)
def test_full_flag(self):
User = self.classes.User
self.assert_compile(
create_session().query(User).outerjoin('orders', full=True),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users FULL OUTER JOIN orders ON users.id = orders.user_id"
)
def test_multi_tuple_form(self):
"""test the 'tuple' form of join, now superseded
by the two-element join() form.
Not deprecating this style as of yet.
"""
Item, Order, User = (self.classes.Item,
self.classes.Order,
self.classes.User)
sess = create_session()
#assert_raises(
# sa.exc.SADeprecationWarning,
# sess.query(User).join, (Order, User.id==Order.user_id)
#)
self.assert_compile(
sess.query(User).join((Order, User.id == Order.user_id)),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id",
)
self.assert_compile(
sess.query(User).join(
(Order, User.id == Order.user_id),
(Item, Order.items)),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id JOIN items ON items.id = "
"order_items_1.item_id",
)
# the old "backwards" form
self.assert_compile(
sess.query(User).join(("orders", Order)),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id",
)
def test_single_prop_1(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(User).join(User.orders),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id"
)
def test_single_prop_2(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(User).join(Order.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders JOIN users ON users.id = orders.user_id"
)
def test_single_prop_3(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias1 = aliased(Order)
self.assert_compile(
sess.query(User).join(oalias1.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders AS orders_1 JOIN users ON users.id = orders_1.user_id"
)
def test_single_prop_4(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias1 = aliased(Order)
oalias2 = aliased(Order)
# another nonsensical query. (from [ticket:1537]).
# in this case, the contract of "left to right" is honored
self.assert_compile(
sess.query(User).join(oalias1.user).join(oalias2.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders AS orders_1 JOIN users ON users.id = orders_1.user_id, "
"orders AS orders_2 JOIN users ON users.id = orders_2.user_id"
)
def test_single_prop_5(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(User).join(User.orders, Order.items),
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id"
)
def test_single_prop_6(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
ualias = aliased(User)
self.assert_compile(
sess.query(ualias).join(ualias.orders),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 JOIN orders ON users_1.id = orders.user_id"
)
def test_single_prop_7(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
# this query is somewhat nonsensical. the old system didn't render a correct
# query for this. In this case its the most faithful to what was asked -
# there's no linkage between User.orders and "oalias", so two FROM elements
# are generated.
oalias = aliased(Order)
self.assert_compile(
sess.query(User).join(User.orders, oalias.items),
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"JOIN orders ON users.id = orders.user_id, "
"orders AS orders_1 JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id"
)
def test_single_prop_8(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
# same as before using an aliased() for User as well
ualias = aliased(User)
oalias = aliased(Order)
self.assert_compile(
sess.query(ualias).join(ualias.orders, oalias.items),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 "
"JOIN orders ON users_1.id = orders.user_id, "
"orders AS orders_1 JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id"
)
def test_single_prop_9(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(User).filter(User.name == 'ed').from_self().
join(User.orders),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE users.name = :name_1) AS anon_1 JOIN orders ON anon_1.users_id = orders.user_id"
)
def test_single_prop_10(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(User).join(User.addresses, aliased=True).
filter(Address.email_address == 'foo'),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id "
"WHERE addresses_1.email_address = :email_address_1"
)
def test_single_prop_11(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(User).join(User.orders, Order.items, aliased=True).
filter(Item.id == 10),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders AS orders_1 ON users.id = orders_1.user_id "
"JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id "
"WHERE items_1.id = :id_1"
)
def test_single_prop_12(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias1 = aliased(Order)
# test #1 for [ticket:1706]
ualias = aliased(User)
self.assert_compile(
sess.query(ualias).
join(oalias1, ualias.orders).\
join(Address, ualias.addresses),
"SELECT users_1.id AS users_1_id, users_1.name AS "
"users_1_name FROM users AS users_1 JOIN orders AS orders_1 "
"ON users_1.id = orders_1.user_id JOIN addresses ON users_1.id "
"= addresses.user_id"
)
def test_single_prop_13(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
# test #2 for [ticket:1706]
ualias = aliased(User)
ualias2 = aliased(User)
self.assert_compile(
sess.query(ualias).
join(Address, ualias.addresses).
join(ualias2, Address.user).
join(Order, ualias.orders),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users "
"AS users_1 JOIN addresses ON users_1.id = addresses.user_id JOIN users AS users_2 "
"ON users_2.id = addresses.user_id JOIN orders ON users_1.id = orders.user_id"
)
def test_overlapping_paths(self):
User = self.classes.User
for aliased in (True,False):
# load a user who has an order that contains item id 3 and address id 1 (order 3, owned by jack)
result = create_session().query(User).join('orders', 'items', aliased=aliased).\
filter_by(id=3).join('orders','address', aliased=aliased).filter_by(id=1).all()
assert [User(id=7, name='jack')] == result
def test_overlapping_paths_multilevel(self):
User = self.classes.User
s = Session()
q = s.query(User).\
join('orders').\
join('addresses').\
join('orders', 'items').\
join('addresses', 'dingaling')
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN addresses ON users.id = addresses.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"JOIN dingalings ON addresses.id = dingalings.address_id"
)
def test_overlapping_paths_outerjoin(self):
User = self.classes.User
result = create_session().query(User).outerjoin('orders', 'items').\
filter_by(id=3).outerjoin('orders','address').filter_by(id=1).all()
assert [User(id=7, name='jack')] == result
def test_raises_on_dupe_target_rel(self):
User = self.classes.User
assert_raises_message(
sa.exc.SAWarning,
"Pathed join target Order.items has already been joined to; "
"skipping",
lambda: create_session().query(User).outerjoin('orders', 'items').\
outerjoin('orders', 'items')
)
def test_from_joinpoint(self):
Item, User, Order = (self.classes.Item,
self.classes.User,
self.classes.Order)
sess = create_session()
for oalias,ialias in [(True, True), (False, False), (True, False), (False, True)]:
eq_(
sess.query(User).join('orders', aliased=oalias).\
join('items',
from_joinpoint=True,
aliased=ialias).\
filter(Item.description == 'item 4').all(),
[User(name='jack')]
)
# use middle criterion
eq_(
sess.query(User).join('orders', aliased=oalias).\
filter(Order.user_id==9).\
join('items', from_joinpoint=True,
aliased=ialias).\
filter(Item.description=='item 4').all(),
[]
)
orderalias = aliased(Order)
itemalias = aliased(Item)
eq_(
sess.query(User).join(orderalias, 'orders').
join(itemalias, 'items', from_joinpoint=True).
filter(itemalias.description == 'item 4').all(),
[User(name='jack')]
)
eq_(
sess.query(User).join(orderalias, 'orders').
join(itemalias, 'items', from_joinpoint=True).
filter(orderalias.user_id==9).\
filter(itemalias.description=='item 4').all(),
[]
)
def test_join_nonmapped_column(self):
"""test that the search for a 'left' doesn't trip on non-mapped cols"""
Order, User = self.classes.Order, self.classes.User
sess = create_session()
# intentionally join() with a non-existent "left" side
self.assert_compile(
sess.query(User.id, literal_column('foo')).join(Order.user),
"SELECT users.id AS users_id, foo FROM "
"orders JOIN users ON users.id = orders.user_id"
)
def test_backwards_join(self):
User, Address = self.classes.User, self.classes.Address
# a more controversial feature. join from
# User->Address, but the onclause is Address.user.
sess = create_session()
eq_(
sess.query(User).join(Address.user).\
filter(Address.email_address=='[email protected]').all(),
[User(id=8,name='ed')]
)
# its actually not so controversial if you view it in terms
# of multiple entities.
eq_(
sess.query(User, Address).join(Address.user).filter(Address.email_address=='[email protected]').all(),
[(User(id=8,name='ed'), Address(email_address='[email protected]'))]
)
# this was the controversial part. now, raise an error if the feature is abused.
# before the error raise was added, this would silently work.....
assert_raises(
sa_exc.InvalidRequestError,
sess.query(User).join, Address, Address.user,
)
# but this one would silently fail
adalias = aliased(Address)
assert_raises(
sa_exc.InvalidRequestError,
sess.query(User).join, adalias, Address.user,
)
def test_multiple_with_aliases(self):
Order, User = self.classes.Order, self.classes.User
sess = create_session()
ualias = aliased(User)
oalias1 = aliased(Order)
oalias2 = aliased(Order)
self.assert_compile(
sess.query(ualias).join(oalias1, ualias.orders).
join(oalias2, ualias.orders).
filter(or_(oalias1.user_id==9, oalias2.user_id==7)),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 "
"JOIN orders AS orders_1 ON users_1.id = orders_1.user_id JOIN orders AS orders_2 ON "
"users_1.id = orders_2.user_id WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2",
use_default_dialect=True
)
def test_select_from_orm_joins(self):
User, Order = self.classes.User, self.classes.Order
sess = create_session()
ualias = aliased(User)
oalias1 = aliased(Order)
oalias2 = aliased(Order)
self.assert_compile(
join(User, oalias2, User.id==oalias2.user_id),
"users JOIN orders AS orders_1 ON users.id = orders_1.user_id",
use_default_dialect=True
)
self.assert_compile(
join(ualias, oalias1, ualias.orders),
"users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id",
use_default_dialect=True
)
self.assert_compile(
sess.query(ualias).select_from(join(ualias, oalias1, ualias.orders)),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 "
"JOIN orders AS orders_1 ON users_1.id = orders_1.user_id",
use_default_dialect=True
)
self.assert_compile(
sess.query(User, ualias).select_from(join(ualias, oalias1, ualias.orders)),
"SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, "
"users_1.name AS users_1_name FROM users, users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id",
use_default_dialect=True
)
# this fails (and we cant quite fix right now).
if False:
self.assert_compile(
sess.query(User, ualias).\
join(oalias1, ualias.orders).\
join(oalias2, User.id==oalias2.user_id).\
filter(or_(oalias1.user_id==9, oalias2.user_id==7)),
"SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, users_1.name AS "
"users_1_name FROM users JOIN orders AS orders_2 ON users.id = orders_2.user_id, "
"users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id "
"WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2",
use_default_dialect=True
)
# this is the same thing using explicit orm.join() (which now offers multiple again)
self.assert_compile(
sess.query(User, ualias).\
select_from(
join(ualias, oalias1, ualias.orders),
join(User, oalias2, User.id==oalias2.user_id),
).\
filter(or_(oalias1.user_id==9, oalias2.user_id==7)),
"SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, users_1.name AS "
"users_1_name FROM users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id, "
"users JOIN orders AS orders_2 ON users.id = orders_2.user_id "
"WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2",
use_default_dialect=True
)
def test_overlapping_backwards_joins(self):
User, Order = self.classes.User, self.classes.Order
sess = create_session()
oalias1 = aliased(Order)
oalias2 = aliased(Order)
# this is invalid SQL - joins from orders_1/orders_2 to User twice.
# but that is what was asked for so they get it !
self.assert_compile(
sess.query(User).join(oalias1.user).join(oalias2.user),
"SELECT users.id AS users_id, users.name AS users_name FROM orders AS orders_1 "
"JOIN users ON users.id = orders_1.user_id, orders AS orders_2 JOIN users ON users.id = orders_2.user_id",
use_default_dialect=True,
)
def test_replace_multiple_from_clause(self):
"""test adding joins onto multiple FROM clauses"""
User, Order, Address = (self.classes.User,
self.classes.Order,
self.classes.Address)
sess = create_session()
self.assert_compile(
sess.query(Address, User).join(Address.dingaling).join(User.orders, Order.items),
"SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address, users.id AS users_id, "
"users.name AS users_name FROM addresses JOIN dingalings ON addresses.id = dingalings.address_id, "
"users JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id",
use_default_dialect = True
)
def test_multiple_adaption(self):
Item, Order, User = (self.classes.Item,
self.classes.Order,
self.classes.User)
sess = create_session()
self.assert_compile(
sess.query(User).join(User.orders, Order.items, aliased=True).filter(Order.id==7).filter(Item.id==8),
"SELECT users.id AS users_id, users.name AS users_name FROM users JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id WHERE orders_1.id = :id_1 AND items_1.id = :id_2",
use_default_dialect=True
)
def test_onclause_conditional_adaption(self):
Item, Order, orders, order_items, User = (self.classes.Item,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.User)
sess = create_session()
# this is now a very weird test, nobody should really
# be using the aliased flag in this way.
self.assert_compile(
sess.query(User).join(User.orders, aliased=True).
join(Item,
and_(Order.id==order_items.c.order_id, order_items.c.item_id==Item.id),
from_joinpoint=True, aliased=True
),
"SELECT users.id AS users_id, users.name AS users_name FROM users JOIN "
"orders AS orders_1 ON users.id = orders_1.user_id JOIN items AS items_1 "
"ON orders_1.id = order_items.order_id AND order_items.item_id = items_1.id",
use_default_dialect=True
)
oalias = orders.select()
self.assert_compile(
sess.query(User).join(oalias, User.orders).
join(Item,
and_(Order.id==order_items.c.order_id, order_items.c.item_id==Item.id),
from_joinpoint=True
),
"SELECT users.id AS users_id, users.name AS users_name FROM users JOIN "
"(SELECT orders.id AS id, orders.user_id AS user_id, orders.address_id AS address_id, orders.description "
"AS description, orders.isopen AS isopen FROM orders) AS anon_1 ON users.id = anon_1.user_id JOIN items "
"ON anon_1.id = order_items.order_id AND order_items.item_id = items.id",
use_default_dialect=True
)
# query.join(<stuff>, aliased=True).join(target, sql_expression)
# or: query.join(path_to_some_joined_table_mapper).join(target, sql_expression)
def test_pure_expression_error(self):
addresses, users = self.tables.addresses, self.tables.users
sess = create_session()
self.assert_compile(
sess.query(users).join(addresses),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id"
)
def test_orderby_arg_bug(self):
User, users, Order = (self.classes.User,
self.tables.users,
self.classes.Order)
sess = create_session()
# no arg error
result = sess.query(User).join('orders', aliased=True).order_by(Order.id).reset_joinpoint().order_by(users.c.id).all()
def test_no_onclause(self):
Item, User, Order = (self.classes.Item,
self.classes.User,
self.classes.Order)
sess = create_session()
eq_(
sess.query(User).select_from(join(User, Order).join(Item, Order.items)).filter(Item.description == 'item 4').all(),
[User(name='jack')]
)
eq_(
sess.query(User.name).select_from(join(User, Order).join(Item, Order.items)).filter(Item.description == 'item 4').all(),
[('jack',)]
)
eq_(
sess.query(User).join(Order).join(Item, Order.items)
.filter(Item.description == 'item 4').all(),
[User(name='jack')]
)
def test_clause_onclause(self):
Item, Order, users, order_items, User = (self.classes.Item,
self.classes.Order,
self.tables.users,
self.tables.order_items,
self.classes.User)
sess = create_session()
eq_(
sess.query(User).join(Order, User.id==Order.user_id).
join(order_items, Order.id==order_items.c.order_id).
join(Item, order_items.c.item_id==Item.id).
filter(Item.description == 'item 4').all(),
[User(name='jack')]
)
eq_(
sess.query(User.name).join(Order, User.id==Order.user_id).
join(order_items, Order.id==order_items.c.order_id).
join(Item, order_items.c.item_id==Item.id).
filter(Item.description == 'item 4').all(),
[('jack',)]
)
ualias = aliased(User)
eq_(
sess.query(ualias.name).join(Order, ualias.id==Order.user_id).
join(order_items, Order.id==order_items.c.order_id).
join(Item, order_items.c.item_id==Item.id).
filter(Item.description == 'item 4').all(),
[('jack',)]
)
# explicit onclause with from_self(), means
# the onclause must be aliased against the query's custom
# FROM object
eq_(
sess.query(User).order_by(User.id).offset(2).
from_self().
join(Order, User.id==Order.user_id).
all(),
[User(name='fred')]
)
# same with an explicit select_from()
eq_(
sess.query(User).select_entity_from(select([users]).
order_by(User.id).offset(2).alias()).
join(Order, User.id==Order.user_id).
all(),
[User(name='fred')]
)
def test_aliased_classes(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
(address1, address2, address3, address4, address5) = sess.query(Address).all()
expected = [(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None)]
q = sess.query(User)
AdAlias = aliased(Address)
q = q.add_entity(AdAlias).select_from(outerjoin(User, AdAlias))
l = q.order_by(User.id, AdAlias.id).all()
eq_(l, expected)
sess.expunge_all()
q = sess.query(User).add_entity(AdAlias)
l = q.select_from(outerjoin(User, AdAlias)).filter(AdAlias.email_address=='[email protected]').all()
eq_(l, [(user8, address3)])
l = q.select_from(outerjoin(User, AdAlias, 'addresses')).filter(AdAlias.email_address=='[email protected]').all()
eq_(l, [(user8, address3)])
l = q.select_from(outerjoin(User, AdAlias, User.id==AdAlias.user_id)).filter(AdAlias.email_address=='[email protected]').all()
eq_(l, [(user8, address3)])
# this is the first test where we are joining "backwards" - from AdAlias to User even though
# the query is against User
q = sess.query(User, AdAlias)
l = q.join(AdAlias.user).filter(User.name=='ed').order_by(User.id, AdAlias.id)
eq_(l.all(), [(user8, address2),(user8, address3),(user8, address4),])
q = sess.query(User, AdAlias).select_from(join(AdAlias, User, AdAlias.user)).filter(User.name=='ed')
eq_(l.all(), [(user8, address2),(user8, address3),(user8, address4),])
def test_expression_onclauses(self):
Order, User = self.classes.Order, self.classes.User
sess = create_session()
subq = sess.query(User).subquery()
self.assert_compile(
sess.query(User).join(subq, User.name==subq.c.name),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN (SELECT users.id AS id, users.name "
"AS name FROM users) AS anon_1 ON users.name = anon_1.name",
use_default_dialect=True
)
subq = sess.query(Order).subquery()
self.assert_compile(
sess.query(User).join(subq, User.id==subq.c.user_id),
"SELECT users.id AS users_id, users.name AS users_name FROM "
"users JOIN (SELECT orders.id AS id, orders.user_id AS user_id, "
"orders.address_id AS address_id, orders.description AS "
"description, orders.isopen AS isopen FROM orders) AS "
"anon_1 ON users.id = anon_1.user_id",
use_default_dialect=True
)
self.assert_compile(
sess.query(User).join(Order, User.id==Order.user_id),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id",
use_default_dialect=True
)
def test_implicit_joins_from_aliases(self):
Item, User, Order = (self.classes.Item,
self.classes.User,
self.classes.Order)
sess = create_session()
OrderAlias = aliased(Order)
eq_(
sess.query(OrderAlias).join('items').filter_by(description='item 3').\
order_by(OrderAlias.id).all(),
[
Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1),
Order(address_id=4,description='order 2',isopen=0,user_id=9,id=2),
Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3)
]
)
eq_(
sess.query(User, OrderAlias, Item.description).
join(OrderAlias, 'orders').
join('items', from_joinpoint=True).
filter_by(description='item 3').\
order_by(User.id, OrderAlias.id).all(),
[
(User(name='jack',id=7), Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1), 'item 3'),
(User(name='jack',id=7), Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3), 'item 3'),
(User(name='fred',id=9), Order(address_id=4,description='order 2',isopen=0,user_id=9,id=2), 'item 3')
]
)
def test_aliased_classes_m2m(self):
Item, Order = self.classes.Item, self.classes.Order
sess = create_session()
(order1, order2, order3, order4, order5) = sess.query(Order).all()
(item1, item2, item3, item4, item5) = sess.query(Item).all()
expected = [
(order1, item1),
(order1, item2),
(order1, item3),
(order2, item1),
(order2, item2),
(order2, item3),
(order3, item3),
(order3, item4),
(order3, item5),
(order4, item1),
(order4, item5),
(order5, item5),
]
q = sess.query(Order)
q = q.add_entity(Item).select_from(join(Order, Item, 'items')).order_by(Order.id, Item.id)
l = q.all()
eq_(l, expected)
IAlias = aliased(Item)
q = sess.query(Order, IAlias).select_from(join(Order, IAlias, 'items')).filter(IAlias.description=='item 3')
l = q.all()
eq_(l,
[
(order1, item3),
(order2, item3),
(order3, item3),
]
)
def test_joins_from_adapted_entities(self):
User = self.classes.User
# test for #1853
session = create_session()
first = session.query(User)
second = session.query(User)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.outerjoin(*join)
self.assert_compile(joined,
'SELECT anon_1.users_id AS '
'anon_1_users_id, anon_1.users_name AS '
'anon_1_users_name FROM (SELECT users.id '
'AS users_id, users.name AS users_name '
'FROM users UNION SELECT users.id AS '
'users_id, users.name AS users_name FROM '
'users) AS anon_1 LEFT OUTER JOIN (SELECT '
'users.id AS id FROM users) AS anon_2 ON '
'anon_2.id = anon_1.users_id',
use_default_dialect=True)
first = session.query(User.id)
second = session.query(User.id)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.outerjoin(*join)
self.assert_compile(joined,
'SELECT anon_1.users_id AS anon_1_users_id '
'FROM (SELECT users.id AS users_id FROM '
'users UNION SELECT users.id AS users_id '
'FROM users) AS anon_1 LEFT OUTER JOIN '
'(SELECT users.id AS id FROM users) AS '
'anon_2 ON anon_2.id = anon_1.users_id',
use_default_dialect=True)
def test_joins_from_adapted_entities_isouter(self):
User = self.classes.User
# test for #1853
session = create_session()
first = session.query(User)
second = session.query(User)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.join(*join, isouter=True)
self.assert_compile(joined,
'SELECT anon_1.users_id AS '
'anon_1_users_id, anon_1.users_name AS '
'anon_1_users_name FROM (SELECT users.id '
'AS users_id, users.name AS users_name '
'FROM users UNION SELECT users.id AS '
'users_id, users.name AS users_name FROM '
'users) AS anon_1 LEFT OUTER JOIN (SELECT '
'users.id AS id FROM users) AS anon_2 ON '
'anon_2.id = anon_1.users_id',
use_default_dialect=True)
first = session.query(User.id)
second = session.query(User.id)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.join(*join, isouter=True)
self.assert_compile(joined,
'SELECT anon_1.users_id AS anon_1_users_id '
'FROM (SELECT users.id AS users_id FROM '
'users UNION SELECT users.id AS users_id '
'FROM users) AS anon_1 LEFT OUTER JOIN '
'(SELECT users.id AS id FROM users) AS '
'anon_2 ON anon_2.id = anon_1.users_id',
use_default_dialect=True)
def test_reset_joinpoint(self):
User = self.classes.User
for aliased in (True, False):
# load a user who has an order that contains item id 3 and address id 1 (order 3, owned by jack)
result = create_session().query(User).join('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().join('orders','address', aliased=aliased).filter_by(id=1).all()
assert [User(id=7, name='jack')] == result
result = create_session().query(User).join('orders', 'items', aliased=aliased, isouter=True).filter_by(id=3).reset_joinpoint().join('orders','address', aliased=aliased, isouter=True).filter_by(id=1).all()
assert [User(id=7, name='jack')] == result
result = create_session().query(User).outerjoin('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().outerjoin('orders','address', aliased=aliased).filter_by(id=1).all()
assert [User(id=7, name='jack')] == result
def test_overlap_with_aliases(self):
orders, User, users = (self.tables.orders,
self.classes.User,
self.tables.users)
oalias = orders.alias('oalias')
result = create_session().query(User).select_from(users.join(oalias)).filter(oalias.c.description.in_(["order 1", "order 2", "order 3"])).join('orders', 'items').order_by(User.id).all()
assert [User(id=7, name='jack'), User(id=9, name='fred')] == result
result = create_session().query(User).select_from(users.join(oalias)).filter(oalias.c.description.in_(["order 1", "order 2", "order 3"])).join('orders', 'items').filter_by(id=4).all()
assert [User(id=7, name='jack')] == result
def test_aliased(self):
"""test automatic generation of aliased joins."""
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
# test a basic aliasized path
q = sess.query(User).join('addresses', aliased=True).filter_by(email_address='[email protected]')
assert [User(id=7)] == q.all()
q = sess.query(User).join('addresses', aliased=True).filter(Address.email_address=='[email protected]')
assert [User(id=7)] == q.all()
q = sess.query(User).join('addresses', aliased=True).filter(or_(Address.email_address=='[email protected]', Address.email_address=='[email protected]'))
assert [User(id=7), User(id=9)] == q.all()
# test two aliasized paths, one to 'orders' and the other to 'orders','items'.
# one row is returned because user 7 has order 3 and also has order 1 which has item 1
# this tests a o2m join and a m2m join.
q = sess.query(User).join('orders', aliased=True).filter(Order.description=="order 3").join('orders', 'items', aliased=True).filter(Item.description=="item 1")
assert q.count() == 1
assert [User(id=7)] == q.all()
# test the control version - same joins but not aliased. rows are not returned because order 3 does not have item 1
q = sess.query(User).join('orders').filter(Order.description=="order 3").join('orders', 'items').filter(Item.description=="item 1")
assert [] == q.all()
assert q.count() == 0
# the left half of the join condition of the any() is aliased.
q = sess.query(User).join('orders', aliased=True).filter(Order.items.any(Item.description=='item 4'))
assert [User(id=7)] == q.all()
# test that aliasing gets reset when join() is called
q = sess.query(User).join('orders', aliased=True).filter(Order.description=="order 3").join('orders', aliased=True).filter(Order.description=="order 5")
assert q.count() == 1
assert [User(id=7)] == q.all()
def test_aliased_order_by(self):
User = self.classes.User
sess = create_session()
ualias = aliased(User)
eq_(
sess.query(User, ualias).filter(User.id > ualias.id).order_by(desc(ualias.id), User.name).all(),
[
(User(id=10,name='chuck'), User(id=9,name='fred')),
(User(id=10,name='chuck'), User(id=8,name='ed')),
(User(id=9,name='fred'), User(id=8,name='ed')),
(User(id=10,name='chuck'), User(id=7,name='jack')),
(User(id=8,name='ed'), User(id=7,name='jack')),
(User(id=9,name='fred'), User(id=7,name='jack'))
]
)
def test_plain_table(self):
addresses, User = self.tables.addresses, self.classes.User
sess = create_session()
eq_(
sess.query(User.name).join(addresses, User.id==addresses.c.user_id).order_by(User.id).all(),
[('jack',), ('ed',), ('ed',), ('ed',), ('fred',)]
)
def test_no_joinpoint_expr(self):
User, users = self.classes.User, self.tables.users
sess = create_session()
# these are consistent regardless of
# select_from() being present.
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't join table/selectable 'users' to itself",
sess.query(users.c.id).join, User
)
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't join table/selectable 'users' to itself",
sess.query(users.c.id).select_from(users).join, User
)
def test_select_from(self):
"""Test that the left edge of the join can be set reliably with select_from()."""
Item, Order, User = (self.classes.Item,
self.classes.Order,
self.classes.User)
sess = create_session()
self.assert_compile(
sess.query(Item.id).select_from(User).join(User.orders).join(Order.items),
"SELECT items.id AS items_id FROM users JOIN orders ON "
"users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items ON items.id = "
"order_items_1.item_id",
use_default_dialect=True
)
# here, the join really wants to add a second FROM clause
# for "Item". but select_from disallows that
self.assert_compile(
sess.query(Item.id).select_from(User).join(Item, User.id==Item.id),
"SELECT items.id AS items_id FROM users JOIN items ON users.id = items.id",
use_default_dialect=True
)
def test_from_self_resets_joinpaths(self):
"""test a join from from_self() doesn't confuse joins inside the subquery
with the outside.
"""
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = create_session()
self.assert_compile(
sess.query(Item).join(Item.keywords).from_self(Keyword).join(Item.keywords),
"SELECT keywords.id AS keywords_id, keywords.name AS keywords_name FROM "
"(SELECT items.id AS items_id, items.description AS items_description "
"FROM items JOIN item_keywords AS item_keywords_1 ON items.id = "
"item_keywords_1.item_id JOIN keywords ON keywords.id = item_keywords_1.keyword_id) "
"AS anon_1 JOIN item_keywords AS item_keywords_2 ON "
"anon_1.items_id = item_keywords_2.item_id "
"JOIN keywords ON "
"keywords.id = item_keywords_2.keyword_id",
use_default_dialect=True
)
class JoinFromSelectableTest(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = 'default'
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('table1', metadata,
Column('id', Integer, primary_key=True)
)
Table('table2', metadata,
Column('id', Integer, primary_key=True),
Column('t1_id', Integer)
)
@classmethod
def setup_classes(cls):
table1, table2 = cls.tables.table1, cls.tables.table2
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
mapper(T1, table1)
mapper(T2, table2)
def test_select_mapped_to_mapped_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
self.assert_compile(
sess.query(subq.c.count, T1.id).select_from(subq).join(T1, subq.c.t1_id==T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count FROM table2 "
"GROUP BY table2.t1_id) AS anon_1 JOIN table1 ON anon_1.t1_id = table1.id"
)
def test_select_mapped_to_mapped_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
self.assert_compile(
sess.query(subq.c.count, T1.id).join(T1, subq.c.t1_id==T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count FROM table2 "
"GROUP BY table2.t1_id) AS anon_1 JOIN table1 ON anon_1.t1_id = table1.id"
)
def test_select_mapped_to_select_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
self.assert_compile(
sess.query(subq.c.count, T1.id).select_from(T1).join(subq, subq.c.t1_id==T1.id),
"SELECT anon_1.count AS anon_1_count, table1.id AS table1_id "
"FROM table1 JOIN (SELECT table2.t1_id AS t1_id, "
"count(table2.id) AS count FROM table2 GROUP BY table2.t1_id) "
"AS anon_1 ON anon_1.t1_id = table1.id"
)
def test_select_mapped_to_select_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
assert_raises_message(
sa_exc.InvalidRequestError,
r"Can't construct a join from ",
sess.query(subq.c.count, T1.id).join, subq, subq.c.t1_id==T1.id,
)
def test_mapped_select_to_mapped_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't join table/selectable 'table1' to itself",
sess.query(T1.id, subq.c.count).join, T1, subq.c.t1_id == T1.id
)
self.assert_compile(
sess.query(T1.id, subq.c.count).select_from(subq).\
join(T1, subq.c.t1_id == T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"JOIN table1 ON anon_1.t1_id = table1.id"
)
def test_mapped_select_to_mapped_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
self.assert_compile(
sess.query(T1.id, subq.c.count).select_from(subq).join(T1, subq.c.t1_id==T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 JOIN table1 "
"ON anon_1.t1_id = table1.id"
)
def test_mapped_select_to_select_explicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
self.assert_compile(
sess.query(T1.id, subq.c.count).select_from(T1).join(subq, subq.c.t1_id==T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM table1 JOIN (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"ON anon_1.t1_id = table1.id"
)
def test_mapped_select_to_select_implicit_left(self):
T1, T2 = self.classes.T1, self.classes.T2
sess = Session()
subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\
group_by(T2.t1_id).subquery()
self.assert_compile(
sess.query(T1.id, subq.c.count).join(subq, subq.c.t1_id==T1.id),
"SELECT table1.id AS table1_id, anon_1.count AS anon_1_count "
"FROM table1 JOIN (SELECT table2.t1_id AS t1_id, count(table2.id) AS count "
"FROM table2 GROUP BY table2.t1_id) AS anon_1 "
"ON anon_1.t1_id = table1.id"
)
class MultiplePathTest(fixtures.MappedTest, AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30))
)
t2 = Table('t2', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30))
)
t1t2_1 = Table('t1t2_1', metadata,
Column('t1id', Integer, ForeignKey('t1.id')),
Column('t2id', Integer, ForeignKey('t2.id'))
)
t1t2_2 = Table('t1t2_2', metadata,
Column('t1id', Integer, ForeignKey('t1.id')),
Column('t2id', Integer, ForeignKey('t2.id'))
)
def test_basic(self):
t2, t1t2_1, t1t2_2, t1 = (self.tables.t2,
self.tables.t1t2_1,
self.tables.t1t2_2,
self.tables.t1)
class T1(object):
pass
class T2(object):
pass
mapper(T1, t1, properties={
't2s_1': relationship(T2, secondary=t1t2_1),
't2s_2': relationship(T2, secondary=t1t2_2),
})
mapper(T2, t2)
q = create_session().query(T1).join('t2s_1').filter(t2.c.id==5).reset_joinpoint().join('t2s_2')
self.assert_compile(
q,
"SELECT t1.id AS t1_id, t1.data AS t1_data FROM t1 JOIN t1t2_1 AS t1t2_1_1 "
"ON t1.id = t1t2_1_1.t1id JOIN t2 ON t2.id = t1t2_1_1.t2id JOIN t1t2_2 AS t1t2_2_1 "
"ON t1.id = t1t2_2_1.t1id JOIN t2 ON t2.id = t1t2_2_1.t2id WHERE t2.id = :id_1"
, use_default_dialect=True
)
class SelfRefMixedTest(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = 'once'
__dialect__ = default.DefaultDialect()
@classmethod
def define_tables(cls, metadata):
nodes = Table('nodes', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id'))
)
sub_table = Table('sub_table', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('node_id', Integer, ForeignKey('nodes.id')),
)
assoc_table = Table('assoc_table', metadata,
Column('left_id', Integer, ForeignKey('nodes.id')),
Column('right_id', Integer, ForeignKey('nodes.id'))
)
@classmethod
def setup_classes(cls):
nodes, assoc_table, sub_table = (cls.tables.nodes,
cls.tables.assoc_table,
cls.tables.sub_table)
class Node(cls.Comparable):
pass
class Sub(cls.Comparable):
pass
mapper(Node, nodes, properties={
'children':relationship(Node, lazy='select', join_depth=3,
backref=backref('parent', remote_side=[nodes.c.id])
),
'subs' : relationship(Sub),
'assoc':relationship(Node,
secondary=assoc_table,
primaryjoin=nodes.c.id==assoc_table.c.left_id,
secondaryjoin=nodes.c.id==assoc_table.c.right_id)
})
mapper(Sub, sub_table)
def test_o2m_aliased_plus_o2m(self):
Node, Sub = self.classes.Node, self.classes.Sub
sess = create_session()
n1 = aliased(Node)
self.assert_compile(
sess.query(Node).join(n1, Node.children).join(Sub, n1.subs),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id "
"FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN sub_table ON nodes_1.id = sub_table.node_id"
)
self.assert_compile(
sess.query(Node).join(n1, Node.children).join(Sub, Node.subs),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id "
"FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN sub_table ON nodes.id = sub_table.node_id"
)
def test_m2m_aliased_plus_o2m(self):
Node, Sub = self.classes.Node, self.classes.Sub
sess = create_session()
n1 = aliased(Node)
self.assert_compile(
sess.query(Node).join(n1, Node.assoc).join(Sub, n1.subs),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id "
"FROM nodes JOIN assoc_table AS assoc_table_1 ON nodes.id = "
"assoc_table_1.left_id JOIN nodes AS nodes_1 ON nodes_1.id = "
"assoc_table_1.right_id JOIN sub_table ON nodes_1.id = sub_table.node_id",
)
self.assert_compile(
sess.query(Node).join(n1, Node.assoc).join(Sub, Node.subs),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id "
"FROM nodes JOIN assoc_table AS assoc_table_1 ON nodes.id = "
"assoc_table_1.left_id JOIN nodes AS nodes_1 ON nodes_1.id = "
"assoc_table_1.right_id JOIN sub_table ON nodes.id = sub_table.node_id",
)
class CreateJoinsTest(fixtures.ORMTest, AssertsCompiledSQL):
__dialect__ = 'default'
def _inherits_fixture(self):
m = MetaData()
base = Table('base', m, Column('id', Integer, primary_key=True))
a = Table('a', m,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('b_id', Integer, ForeignKey('b.id')))
b = Table('b', m,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('c_id', Integer, ForeignKey('c.id')))
c = Table('c', m,
Column('id', Integer, ForeignKey('base.id'), primary_key=True))
class Base(object):
pass
class A(Base):
pass
class B(Base):
pass
class C(Base):
pass
mapper(Base, base)
mapper(A, a, inherits=Base, properties={'b':relationship(B, primaryjoin=a.c.b_id==b.c.id)})
mapper(B, b, inherits=Base, properties={'c':relationship(C, primaryjoin=b.c.c_id==c.c.id)})
mapper(C, c, inherits=Base)
return A, B, C, Base
def test_double_level_aliased_exists(self):
A, B, C, Base = self._inherits_fixture()
s = Session()
self.assert_compile(
s.query(A).filter(A.b.has(B.c.has(C.id==5))),
"SELECT a.id AS a_id, base.id AS base_id, a.b_id AS a_b_id "
"FROM base JOIN a ON base.id = a.id WHERE "
"EXISTS (SELECT 1 FROM (SELECT base.id AS base_id, b.id AS "
"b_id, b.c_id AS b_c_id FROM base JOIN b ON base.id = b.id) "
"AS anon_1 WHERE a.b_id = anon_1.b_id AND (EXISTS "
"(SELECT 1 FROM (SELECT base.id AS base_id, c.id AS c_id "
"FROM base JOIN c ON base.id = c.id) AS anon_2 "
"WHERE anon_1.b_c_id = anon_2.c_id AND anon_2.c_id = :id_1"
")))"
)
class JoinToNonPolyAliasesTest(fixtures.MappedTest, AssertsCompiledSQL):
"""test joins to an aliased selectable and that we can refer to that
aliased selectable in filter criteria.
Basically testing that the aliasing Query applies to with_polymorphic
targets doesn't leak into non-polymorphic mappers.
"""
__dialect__ = 'default'
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table("parent", metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
)
Table("child", metadata,
Column('id', Integer, primary_key=True),
Column('parent_id', Integer, ForeignKey('parent.id')),
Column('data', String(50))
)
@classmethod
def setup_mappers(cls):
parent, child = cls.tables.parent, cls.tables.child
class Parent(cls.Comparable):
pass
class Child(cls.Comparable):
pass
mp = mapper(Parent, parent)
mapper(Child, child)
derived = select([child]).alias()
npc = mapper(Child, derived, non_primary=True)
cls.npc = npc
cls.derived = derived
mp.add_property("npc", relationship(npc))
def test_join_parent_child(self):
Parent = self.classes.Parent
npc = self.npc
sess = Session()
self.assert_compile(
sess.query(Parent).join(Parent.npc).filter(self.derived.c.data == 'x'),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (SELECT child.id AS id, child.parent_id AS parent_id, "
"child.data AS data "
"FROM child) AS anon_1 ON parent.id = anon_1.parent_id "
"WHERE anon_1.data = :data_1"
)
def test_join_parent_child_select_from(self):
Parent = self.classes.Parent
npc = self.npc
sess = Session()
self.assert_compile(
sess.query(npc).select_from(Parent).join(Parent.npc).\
filter(self.derived.c.data == 'x'),
"SELECT anon_1.id AS anon_1_id, anon_1.parent_id "
"AS anon_1_parent_id, anon_1.data AS anon_1_data "
"FROM parent JOIN (SELECT child.id AS id, child.parent_id AS "
"parent_id, child.data AS data FROM child) AS anon_1 ON "
"parent.id = anon_1.parent_id WHERE anon_1.data = :data_1"
)
def test_join_select_parent_child(self):
Parent = self.classes.Parent
npc = self.npc
sess = Session()
self.assert_compile(
sess.query(Parent, npc).join(Parent.npc).filter(
self.derived.c.data == 'x'),
"SELECT parent.id AS parent_id, parent.data AS parent_data, "
"anon_1.id AS anon_1_id, anon_1.parent_id AS anon_1_parent_id, "
"anon_1.data AS anon_1_data FROM parent JOIN "
"(SELECT child.id AS id, child.parent_id AS parent_id, "
"child.data AS data FROM child) AS anon_1 ON parent.id = "
"anon_1.parent_id WHERE anon_1.data = :data_1"
)
class SelfReferentialTest(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
@classmethod
def setup_classes(cls):
class Node(cls.Comparable):
def append(self, node):
self.children.append(node)
@classmethod
def setup_mappers(cls):
Node, nodes = cls.classes.Node, cls.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node, lazy='select', join_depth=3,
backref=backref('parent', remote_side=[nodes.c.id])
),
})
@classmethod
def insert_data(cls):
Node = cls.classes.Node
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.close()
def test_join_1(self):
Node = self.classes.Node
sess = create_session()
node = sess.query(Node).join('children', aliased=True).filter_by(data='n122').first()
assert node.data=='n12'
def test_join_2(self):
Node = self.classes.Node
sess = create_session()
ret = sess.query(Node.data).join(Node.children, aliased=True).filter_by(data='n122').all()
assert ret == [('n12',)]
def test_join_3(self):
Node = self.classes.Node
sess = create_session()
node = sess.query(Node).join('children', 'children', aliased=True).filter_by(data='n122').first()
assert node.data=='n1'
def test_join_4(self):
Node = self.classes.Node
sess = create_session()
node = sess.query(Node).filter_by(data='n122').join('parent', aliased=True).filter_by(data='n12').\
join('parent', aliased=True, from_joinpoint=True).filter_by(data='n1').first()
assert node.data == 'n122'
def test_string_or_prop_aliased(self):
"""test that join('foo') behaves the same as join(Cls.foo) in a self
referential scenario.
"""
Node = self.classes.Node
sess = create_session()
nalias = aliased(Node, sess.query(Node).filter_by(data='n1').subquery())
q1 = sess.query(nalias).join(nalias.children, aliased=True).\
join(Node.children, from_joinpoint=True)
q2 = sess.query(nalias).join(nalias.children, aliased=True).\
join("children", from_joinpoint=True)
for q in (q1, q2):
self.assert_compile(
q,
"SELECT anon_1.id AS anon_1_id, anon_1.parent_id AS "
"anon_1_parent_id, anon_1.data AS anon_1_data FROM "
"(SELECT nodes.id AS id, nodes.parent_id AS parent_id, "
"nodes.data AS data FROM nodes WHERE nodes.data = :data_1) "
"AS anon_1 JOIN nodes AS nodes_1 ON anon_1.id = "
"nodes_1.parent_id JOIN nodes ON nodes_1.id = nodes.parent_id",
use_default_dialect=True
)
q1 = sess.query(Node).join(nalias.children, aliased=True).\
join(Node.children, aliased=True, from_joinpoint=True).\
join(Node.children, from_joinpoint=True)
q2 = sess.query(Node).join(nalias.children, aliased=True).\
join("children", aliased=True, from_joinpoint=True).\
join("children", from_joinpoint=True)
for q in (q1, q2):
self.assert_compile(
q,
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, nodes.data AS nodes_data FROM (SELECT "
"nodes.id AS id, nodes.parent_id AS parent_id, nodes.data "
"AS data FROM nodes WHERE nodes.data = :data_1) AS anon_1 "
"JOIN nodes AS nodes_1 ON anon_1.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id "
"JOIN nodes ON nodes_2.id = nodes.parent_id",
use_default_dialect=True
)
def test_from_self_inside_excludes_outside(self):
"""test the propagation of aliased() from inside to outside
on a from_self()..
"""
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
# n1 is not inside the from_self(), so all cols must be maintained
# on the outside
self.assert_compile(
sess.query(Node).filter(Node.data=='n122').from_self(n1, Node.id),
"SELECT nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, "
"nodes_1.data AS nodes_1_data, anon_1.nodes_id AS anon_1_nodes_id "
"FROM nodes AS nodes_1, (SELECT nodes.id AS nodes_id, "
"nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data FROM "
"nodes WHERE nodes.data = :data_1) AS anon_1",
use_default_dialect=True
)
parent = aliased(Node)
grandparent = aliased(Node)
q = sess.query(Node, parent, grandparent).\
join(parent, Node.parent).\
join(grandparent, parent.parent).\
filter(Node.data=='n122').filter(parent.data=='n12').\
filter(grandparent.data=='n1').from_self().limit(1)
# parent, grandparent *are* inside the from_self(), so they
# should get aliased to the outside.
self.assert_compile(
q,
"SELECT anon_1.nodes_id AS anon_1_nodes_id, "
"anon_1.nodes_parent_id AS anon_1_nodes_parent_id, "
"anon_1.nodes_data AS anon_1_nodes_data, "
"anon_1.nodes_1_id AS anon_1_nodes_1_id, "
"anon_1.nodes_1_parent_id AS anon_1_nodes_1_parent_id, "
"anon_1.nodes_1_data AS anon_1_nodes_1_data, "
"anon_1.nodes_2_id AS anon_1_nodes_2_id, "
"anon_1.nodes_2_parent_id AS anon_1_nodes_2_parent_id, "
"anon_1.nodes_2_data AS anon_1_nodes_2_data "
"FROM (SELECT nodes.id AS nodes_id, nodes.parent_id "
"AS nodes_parent_id, nodes.data AS nodes_data, "
"nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, "
"nodes_1.data AS nodes_1_data, nodes_2.id AS nodes_2_id, "
"nodes_2.parent_id AS nodes_2_parent_id, nodes_2.data AS "
"nodes_2_data FROM nodes JOIN nodes AS nodes_1 ON "
"nodes_1.id = nodes.parent_id JOIN nodes AS nodes_2 "
"ON nodes_2.id = nodes_1.parent_id "
"WHERE nodes.data = :data_1 AND nodes_1.data = :data_2 AND "
"nodes_2.data = :data_3) AS anon_1 LIMIT :param_1",
{'param_1':1},
use_default_dialect=True
)
def test_explicit_join_1(self):
Node = self.classes.Node
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
join(Node, n1, 'children').join(n2, 'children'),
"nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id",
use_default_dialect=True
)
def test_explicit_join_2(self):
Node = self.classes.Node
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
join(Node, n1, Node.children).join(n2, n1.children),
"nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id",
use_default_dialect=True
)
def test_explicit_join_3(self):
Node = self.classes.Node
n1 = aliased(Node)
n2 = aliased(Node)
# the join_to_left=False here is unfortunate. the default on this flag should
# be False.
self.assert_compile(
join(Node, n1, Node.children).join(n2, Node.children, join_to_left=False),
"nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id",
use_default_dialect=True
)
def test_explicit_join_4(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
sess.query(Node).join(n1, Node.children).join(n2, n1.children),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS "
"nodes_data FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id",
use_default_dialect=True
)
def test_explicit_join_5(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
n2 = aliased(Node)
self.assert_compile(
sess.query(Node).join(n1, Node.children).join(n2, Node.children),
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS "
"nodes_data FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id "
"JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id",
use_default_dialect=True
)
def test_explicit_join_6(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
node = sess.query(Node).select_from(join(Node, n1, 'children')).\
filter(n1.data == 'n122').first()
assert node.data == 'n12'
def test_explicit_join_7(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
n2 = aliased(Node)
node = sess.query(Node).select_from(
join(Node, n1, 'children').join(n2, 'children')).\
filter(n2.data == 'n122').first()
assert node.data == 'n1'
def test_explicit_join_8(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
n2 = aliased(Node)
# mix explicit and named onclauses
node = sess.query(Node).select_from(
join(Node, n1, Node.id == n1.parent_id).join(n2, 'children')).\
filter(n2.data == 'n122').first()
assert node.data == 'n1'
def test_explicit_join_9(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
n2 = aliased(Node)
node = sess.query(Node).select_from(join(Node, n1, 'parent').join(n2, 'parent')).\
filter(and_(Node.data == 'n122', n1.data == 'n12', n2.data == 'n1')).first()
assert node.data == 'n122'
def test_explicit_join_10(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
n2 = aliased(Node)
eq_(
list(sess.query(Node).select_from(join(Node, n1, 'parent').join(n2, 'parent')).\
filter(and_(Node.data == 'n122',
n1.data == 'n12',
n2.data == 'n1')).values(Node.data, n1.data, n2.data)),
[('n122', 'n12', 'n1')])
def test_join_to_nonaliased(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
# using 'n1.parent' implicitly joins to unaliased Node
eq_(
sess.query(n1).join(n1.parent).filter(Node.data=='n1').all(),
[Node(parent_id=1,data='n11',id=2), Node(parent_id=1,data='n12',id=3), Node(parent_id=1,data='n13',id=4)]
)
# explicit (new syntax)
eq_(
sess.query(n1).join(Node, n1.parent).filter(Node.data=='n1').all(),
[Node(parent_id=1,data='n11',id=2), Node(parent_id=1,data='n12',id=3), Node(parent_id=1,data='n13',id=4)]
)
def test_multiple_explicit_entities_one(self):
Node = self.classes.Node
sess = create_session()
parent = aliased(Node)
grandparent = aliased(Node)
eq_(
sess.query(Node, parent, grandparent).\
join(parent, Node.parent).\
join(grandparent, parent.parent).\
filter(Node.data=='n122').filter(parent.data=='n12').\
filter(grandparent.data=='n1').first(),
(Node(data='n122'), Node(data='n12'), Node(data='n1'))
)
def test_multiple_explicit_entities_two(self):
Node = self.classes.Node
sess = create_session()
parent = aliased(Node)
grandparent = aliased(Node)
eq_(
sess.query(Node, parent, grandparent).\
join(parent, Node.parent).\
join(grandparent, parent.parent).\
filter(Node.data == 'n122').filter(parent.data == 'n12').\
filter(grandparent.data == 'n1').from_self().first(),
(Node(data='n122'), Node(data='n12'), Node(data='n1'))
)
def test_multiple_explicit_entities_three(self):
Node = self.classes.Node
sess = create_session()
parent = aliased(Node)
grandparent = aliased(Node)
# same, change order around
eq_(
sess.query(parent, grandparent, Node).\
join(parent, Node.parent).\
join(grandparent, parent.parent).\
filter(Node.data == 'n122').filter(parent.data == 'n12').\
filter(grandparent.data == 'n1').from_self().first(),
(Node(data='n12'), Node(data='n1'), Node(data='n122'))
)
def test_multiple_explicit_entities_four(self):
Node = self.classes.Node
sess = create_session()
parent = aliased(Node)
grandparent = aliased(Node)
eq_(
sess.query(Node, parent, grandparent).\
join(parent, Node.parent).\
join(grandparent, parent.parent).\
filter(Node.data=='n122').filter(parent.data=='n12').\
filter(grandparent.data=='n1').\
options(joinedload(Node.children)).first(),
(Node(data='n122'), Node(data='n12'), Node(data='n1'))
)
def test_multiple_explicit_entities_five(self):
Node = self.classes.Node
sess = create_session()
parent = aliased(Node)
grandparent = aliased(Node)
eq_(
sess.query(Node, parent, grandparent).\
join(parent, Node.parent).\
join(grandparent, parent.parent).\
filter(Node.data=='n122').filter(parent.data=='n12').\
filter(grandparent.data=='n1').from_self().\
options(joinedload(Node.children)).first(),
(Node(data='n122'), Node(data='n12'), Node(data='n1'))
)
def test_any(self):
Node = self.classes.Node
sess = create_session()
eq_(sess.query(Node).filter(Node.children.any(Node.data=='n1')).all(), [])
eq_(sess.query(Node).filter(Node.children.any(Node.data=='n12')).all(), [Node(data='n1')])
eq_(sess.query(Node).filter(~Node.children.any()).order_by(Node.id).all(),
[Node(data='n11'), Node(data='n13'),Node(data='n121'),Node(data='n122'),Node(data='n123'),])
def test_has(self):
Node = self.classes.Node
sess = create_session()
eq_(sess.query(Node).filter(Node.parent.has(Node.data=='n12')).order_by(Node.id).all(),
[Node(data='n121'),Node(data='n122'),Node(data='n123')])
eq_(sess.query(Node).filter(Node.parent.has(Node.data=='n122')).all(), [])
eq_(sess.query(Node).filter(~Node.parent.has()).all(), [Node(data='n1')])
def test_contains(self):
Node = self.classes.Node
sess = create_session()
n122 = sess.query(Node).filter(Node.data=='n122').one()
eq_(sess.query(Node).filter(Node.children.contains(n122)).all(), [Node(data='n12')])
n13 = sess.query(Node).filter(Node.data=='n13').one()
eq_(sess.query(Node).filter(Node.children.contains(n13)).all(), [Node(data='n1')])
def test_eq_ne(self):
Node = self.classes.Node
sess = create_session()
n12 = sess.query(Node).filter(Node.data=='n12').one()
eq_(sess.query(Node).filter(Node.parent==n12).all(), [Node(data='n121'),Node(data='n122'),Node(data='n123')])
eq_(sess.query(Node).filter(Node.parent != n12).all(), [Node(data='n1'), Node(data='n11'), Node(data='n12'), Node(data='n13')])
class SelfReferentialM2MTest(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
nodes = Table('nodes', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
node_to_nodes =Table('node_to_nodes', metadata,
Column('left_node_id', Integer, ForeignKey('nodes.id'),primary_key=True),
Column('right_node_id', Integer, ForeignKey('nodes.id'),primary_key=True),
)
@classmethod
def setup_classes(cls):
class Node(cls.Comparable):
pass
@classmethod
def insert_data(cls):
Node, nodes, node_to_nodes = (cls.classes.Node,
cls.tables.nodes,
cls.tables.node_to_nodes)
mapper(Node, nodes, properties={
'children':relationship(Node, lazy='select', secondary=node_to_nodes,
primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id,
secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id,
)
})
sess = create_session()
n1 = Node(data='n1')
n2 = Node(data='n2')
n3 = Node(data='n3')
n4 = Node(data='n4')
n5 = Node(data='n5')
n6 = Node(data='n6')
n7 = Node(data='n7')
n1.children = [n2, n3, n4]
n2.children = [n3, n6, n7]
n3.children = [n5, n4]
sess.add(n1)
sess.add(n2)
sess.add(n3)
sess.add(n4)
sess.flush()
sess.close()
def test_any(self):
Node = self.classes.Node
sess = create_session()
eq_(sess.query(Node).filter(Node.children.any(Node.data == 'n3'
)).order_by(Node.data).all(), [Node(data='n1'), Node(data='n2')])
def test_contains(self):
Node = self.classes.Node
sess = create_session()
n4 = sess.query(Node).filter_by(data='n4').one()
eq_(sess.query(Node).filter(Node.children.contains(n4)).order_by(Node.data).all(),
[Node(data='n1'), Node(data='n3')])
eq_(sess.query(Node).filter(not_(Node.children.contains(n4))).order_by(Node.data).all(),
[Node(data='n2'), Node(data='n4'), Node(data='n5'),
Node(data='n6'), Node(data='n7')])
def test_explicit_join(self):
Node = self.classes.Node
sess = create_session()
n1 = aliased(Node)
eq_(
sess.query(Node).select_from(join(Node, n1, 'children'
)).filter(n1.data.in_(['n3', 'n7'
])).order_by(Node.id).all(),
[Node(data='n1'), Node(data='n2')]
)
| mit | -8,141,388,040,386,381,000 | 39.473218 | 216 | 0.553042 | false |
nonemaw/pynet | learnpy_ecourse/class4/ex2_show_version.py | 4 | 3357 | #!/usr/bin/env python
'''
Disclaimer - This is a solution to the below problem given the content we have
discussed in class. It is not necessarily the best solution to the problem.
In other words, I generally only use things we have covered up to this point
in the class (with some exceptions which I will usually note).
Python for Network Engineers
https://pynet.twb-tech.com
Learning Python
Parse the below 'show version' data and obtain the following items (vendor,
model, os_version, uptime, and serial_number). Try to make your string parsing
generic i.e. it would work for other Cisco IOS devices.
The following are reasonable strings to look for:
'Cisco IOS Software' for vendor and os_version
'bytes of memory' for model
'Processor board ID' for serial_number
' uptime is ' for uptime
Store these variables (vendor, model, os_version, uptime, and serial_number) in
a dictionary. Print the dictionary to standard output when done.
Note, "Cisco IOS Software...Version 15.0(1)M4...(fc1)" is one line.
'''
import pprint
show_version = '''
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
ROM: System Bootstrap, Version 12.4(22r)YB5, RELEASE SOFTWARE (fc1)
twb-sf-881 uptime is 7 weeks, 5 days, 19 hours, 23 minutes
System returned to ROM by reload at 15:33:36 PST Fri Feb 28 2014
System restarted at 15:34:09 PST Fri Feb 28 2014
System image file is "flash:c880data-universalk9-mz.150-1.M4.bin"
Last reload type: Normal Reload
Last reload reason: Reload Command
Cisco 881 (MPC8300) processor (revision 1.0) with 236544K/25600K bytes of memory.
Processor board ID FTX1000038X
5 FastEthernet interfaces
1 Virtual Private Network (VPN) Module
256K bytes of non-volatile configuration memory.
126000K bytes of ATA CompactFlash (Read/Write)
License Info:
License UDI:
-------------------------------------------------
Device# PID SN
-------------------------------------------------
*0 CISCO881-SEC-K9 FTX1000038X
License Information for 'c880-data'
License Level: advipservices Type: Permanent
Next reboot license Level: advipservices
Configuration register is 0x2102
'''
# Convert the show version to a list - one entry per line
show_ver_list = show_version.split("\n")
router_dict = {}
# Iterate over the show version data
for line in show_ver_list:
# Vendor and OS Version processing
if 'Cisco IOS Software' in line:
router_dict['vendor'] = 'Cisco'
os_version = line.split(',')[2]
router_dict['os_version'] = os_version.split('Version ')[1]
# Model processing (note, 'Cisco 881...bytes of memory' is on one line)
if 'bytes of memory' in line:
# The model will be the second word in this line
router_dict['model'] = line.split()[1]
# Serial number processing
if 'Processor board ID' in line:
router_dict['serial_number'] = line.split('Processor board ID ')[1]
# Uptime processing
if ' uptime is ' in line:
uptime = line.split(' uptime is ')[1]
uptime = uptime.strip()
router_dict['uptime'] = uptime
# Print dictionary to standard output
print
pprint.pprint(router_dict)
print
| gpl-2.0 | 7,522,227,299,573,160,000 | 30.083333 | 101 | 0.698242 | false |
rupertnash/subgrid | python/d3q15/fallers/calibrate.py | 2 | 3363 | import d3q15
N = d3q15.N
import scipy
import cPickle
nIters = 100
def calibrateFromInstance(L, fallerClass):
"""Performs a calibration of the fitting parameter.
L = the lattice to calibrate for
fallerClass = the class of faller to calibrate for
"""
latticeSize = (L.nx, L.ny, L.nz)
return calibrate(L.__class__, latticeSize, L.tau_s/3.0, fallerClass, a, F_)
def calibrate(latticeClass, latticeSize, eta, fallerClass, posfile=None):
"""Performs a calibration of the fitting parameter.
latticeClass = the class of lattice to calibrate for
latticeSize = size of lattice
eta = viscosity
fallerClass = the class of faller to calibrate for
"""
rng = N.random.RandomState()
kT = 0.001
(nx, ny, nz) = latticeSize
tau_s = 3.0 * eta
# no. steps to equilibriate the fluid
equiliSteps = int((nx*ny*nz)**(2./3.) / eta)
# gonna run for this many steps ONCE only and save the distributions
L = d3q15.Lattice(nx,ny,nz, tau_s, tau_s)
L.noise.temperature = kT
L.noise.seed = rng.tomaxint()
L.rho = N.ones(L.scalarFieldSize(), N.float)
L.u = N.zeros(L.vectorFieldSize(), N.float)
L.initBoundaryC('periodic')
L.initForceC('none')
L.initFromHydroVars()
L.step(equiliSteps)
fStore = L.f.copy()
# Number of particles we can add if they are, on average, separated
# by 4 lattice units (required for non-overlapping interpolation
# regions
nCells = nx*ny*nz
nPart = max(nCells / 64, 1) # must be at least one!
nRuns = nIters * nPart
# How long to run for? Want to diffuse order 1 lattice unit to sample
# over the grid. Use \Delta r ^2 ~ 2Dt = ____kT______ t
# 3 \pi \eta a
# With a = 1.5 (cos I know that's roughly right!)
nSteps = int(3.*N.pi * eta * 1.5 / kT)
posns = N.zeros((nSteps, nRuns, 3), N.float)
for i in xrange(nIters):
# construct initial particle positions
initialPos = N.concatenate((rng.uniform(1,nx,size=(nPart,1)),
rng.uniform(1,ny,size=(nPart,1)),
rng.uniform(1,nz,size=(nPart,1))), axis=1)
L = latticeClass(nx, ny, nz,tau_s, tau_s)
L.initBoundaryC('periodic')
L.add(fallerClass,
a=1., F=[0.,0.,0.], r_list=initialPos,
hydroRadius=1.1)
# turn off noise associated with subgridness of particle,
L.things[0].noiseStDev = 0.0
# copy in equilibriated fluid
L.f[:] = fStore
L.noise.temperature = kT
# set a new random seed
L.noise.seed = rng.tomaxint()
L.updateHydroVars()
# Going to integrate velocities ourselves to avoid PBC issues
for j in xrange(nSteps):
posns[j, nPart*i:nPart*(i+1), :] = \
posns[j-1, nPart*i:nPart*(i+1), :] + L.things[0].v
L.step(1)
continue
continue
if posfile is not None:
cPickle.dump(posns, posfile, protocol=2)
pass
rSq = N.mean(N.sum(posns**2, axis=-1), axis=-1)
t = N.arange(nSteps)
coeffs = scipy.polyfit(t, rSq, 1)
hydro = 2 * kT / (6 * N.pi * eta * coeffs[0])
return hydro
| mit | 8,132,917,996,249,948,000 | 31.650485 | 79 | 0.567648 | false |
gsathya/pyptlib | examples/server.py | 4 | 1179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""This is a server-side example of the pyptlib API."""
import sys
import pyptlib
import pyptlib.server
if __name__ == '__main__':
try:
managed_info = pyptlib.server.init(["blackfish", "bluefish"])
except pyptlib.config.EnvError, err:
print "pyptlib could not bootstrap ('%s')." % str(err)
sys.exit(1)
for transport, transport_bindaddr in managed_info['transports'].items():
# Try to spawn transports and make them listen in the ports
# that Tor wants. Report failure or success appropriately.
# 'transport' is a string with the name of the transport.
# 'transport_bindaddr' is the (<ip>,<port>) where that
# transport should listen for connections.
try:
bind_addrport = your_function_that_launches_transports(transport, transport_bindaddr)
except YourFailException, err:
reportFailure(transport, "Failed to launch ('%s')." % str(err))
continue
pyptlib.server.reportSuccess(transport, bind_addrport, None)
# Report back after we finish spawning transports.
pyptlib.server.reportEnd()
| bsd-3-clause | 2,169,129,348,734,376,400 | 32.685714 | 97 | 0.649703 | false |
MetricsGrimoire/Octopus | octopus/backends/puppet.py | 2 | 7416 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Santiago Dueñas <[email protected]>
#
import urlparse
import dateutil.parser
import requests
from octopus.backends import Backend, ProjectsIterator, ReleasesIterator
from octopus.model import Platform, Project, User, Release
PROJECTS_LIMIT = 20
RELEASES_LIMIT = 20
PUPPET_MODULES_PATH = '/v3/modules'
PUPPET_RELEASES_PATH = '/v3/releases'
class PuppetForge(Backend):
def __init__(self, session, url):
super(PuppetForge, self).__init__('puppet')
self.url = url
self.session = session
@classmethod
def set_arguments_subparser(cls, parser):
subparser = parser.add_parser('puppet', help='Puppet backend')
# Positional arguments
subparser.add_argument('url',
help='Puppet forge url')
def fetch(self):
platform = Platform.as_unique(self.session, url=self.url)
if not platform.id:
platform.type = 'puppet'
for project in self._projects(self.url, platform, self.session):
for release in self._releases(self.url, project, project.users[0], self.session):
project.releases.append(release)
platform.projects.append(project)
return platform
def _projects(self, url, platform, session):
return PuppetForgeProjectsIterator(url, platform, session)
def _releases(self, url, project, user, session):
return PuppetForgeReleasesIterator(url, project, user, session)
class PuppetForgeFetcher(object):
HEADERS = {'User-Agent': 'Octopus/0.0.1'}
def __init__(self, base_url):
self.base_url = base_url
self._last_url = None
@property
def last_url(self):
return self._last_url
def projects(self, offset, limit=PROJECTS_LIMIT):
params = {'offset' : offset,
'limit' : limit}
url = urlparse.urljoin(self.base_url, PUPPET_MODULES_PATH)
r = requests.get(url, params=params,
headers=PuppetForgeFetcher.HEADERS)
self._last_url = r.url
return r.json()
def releases(self, project, username, offset, limit=RELEASES_LIMIT):
module = username + '-' + project
params = {'offset' : offset,
'limit' : limit,
'module' : module}
url = urlparse.urljoin(self.base_url, PUPPET_RELEASES_PATH)
r = requests.get(url, params=params,
headers=PuppetForgeFetcher.HEADERS)
self._last_url = r.url
return r.json()
class PuppetForgeProjectsIterator(ProjectsIterator):
def __init__(self, base_url, platform, session):
super(PuppetForgeProjectsIterator, self).__init__()
self.fetcher = PuppetForgeFetcher(base_url)
self.base_url = base_url
self.session = session
self.platform = platform
self.projects = []
self.users = {}
self.has_next = True
self.offset = 0
def __iter__(self):
return self
def next(self):
# Check if there are parsed projects in the queue
if self.projects:
return self.projects.pop(0)
# Check if there are more projects to fetch
if not self.has_next:
raise StopIteration
# Fetch new set of projects
json = self.fetcher.projects(self.offset, PROJECTS_LIMIT)
if not json['pagination']['next']:
self.has_next = False
else:
self.offset += PROJECTS_LIMIT
for r in json['results']:
url = self.base_url + r['uri']
project = Project().as_unique(self.session, url=url,
platform=self.platform)
project.updated_on = unmarshal_timestamp(r['updated_at'])
if not project.id:
project.name = r['name']
project.created_on = unmarshal_timestamp(r['created_at'])
# Assign owner of the project
username = r['owner']['username']
if not username in self.users:
user = User().as_unique(self.session, username=username)
self.users[username] = user
else:
user = self.users[username]
project.users.append(user)
self.projects.append(project)
return self.projects.pop(0)
class PuppetForgeReleasesIterator(ReleasesIterator):
def __init__(self, base_url, project, user, session):
super(PuppetForgeReleasesIterator, self).__init__()
self.fetcher = PuppetForgeFetcher(base_url)
self.base_url = base_url
self.project = project
self.user = user
self.session = session
self.releases = []
self.has_next = True
self.offset = 0
def __iter__(self):
return self
def next(self):
# Check if there are parsed releases in the queue
if self.releases:
return self.releases.pop(0)
# Check if there are more releases to fetch
if not self.has_next:
raise StopIteration
# Fetch new set of releases
json = self.fetcher.releases(self.project.name, self.user.username,
self.offset, RELEASES_LIMIT)
if 'errors' in json:
print "Warning: " + json['errors'][0]
raise StopIteration
if not json['pagination']['next']:
self.has_next = False
else:
self.offset += RELEASES_LIMIT
for r in json['results']:
version = r['metadata']['version']
# Some json objects might not include the official
# name of the release. For those cases, build a new one.
if not 'name' in r['metadata']:
name = '-'.join((self.user.username, self.project.name, version))
else:
name = r['metadata']['name']
url = self.base_url + r['uri']
release = Release().as_unique(self.session,
url=url)
if not release.id:
release.name = name
release.version = version
release.user = self.user
release.file_url = self.base_url + r['file_uri']
release.created_on = unmarshal_timestamp(r['created_at'])
release.updated_on = unmarshal_timestamp(r['updated_at'])
self.releases.append(release)
return self.releases.pop(0)
def unmarshal_timestamp(ts):
# FIXME: store time zone data
return dateutil.parser.parse(ts).replace(tzinfo=None)
| gpl-3.0 | -8,845,889,792,911,088,000 | 29.767635 | 93 | 0.593931 | false |
csdms/coupling | pymt/mappers/mapper.py | 2 | 4485 | #! /bin/env python
"""
Examples
========
Point-to-point Mapping
----------------------
>>> import numpy as np
>>> from pymt.grids.map import RectilinearMap as Rectilinear
>>> src = Rectilinear([0, 1, 2], [0, 2])
>>> dst = Rectilinear([.5, 1.5, 2.5], [.25, 1.25])
>>> src.get_x()
array([ 0., 2., 0., 2., 0., 2.])
>>> src.get_y()
array([ 0., 0., 1., 1., 2., 2.])
>>> dst.get_x()
array([ 0.25, 1.25, 0.25, 1.25, 0.25, 1.25])
>>> dst.get_y()
array([ 0.5, 0.5, 1.5, 1.5, 2.5, 2.5])
>>> src_vals = np.arange(src.get_point_count(), dtype=np.float64)
Map the source values on the source points to the destination grid
using nearest neighbor.
>>> from pymt.mappers import NearestVal
>>> mapper = NearestVal()
>>> mapper.initialize(dst, src)
>>> mapper.run(src_vals)
array([ 0., 1., 2., 3., 4., 5.])
>>> mappers = find_mapper(dst, src)
>>> len(mappers)
3
>>> mappers[0].name
'PointToPoint'
>>> src_vals[2] = -999
>>> dst_vals = np.full(dst.get_point_count(), -1.)
>>> mapper.run(src_vals, dst_vals=dst_vals)
array([ 0., 1., -1., 3., 4., 5.])
Cell-to-point Mapping
---------------------
The source grid looks like,
::
(0) ------ (1)
| |
| |
(2) ------ (3)
| |
| |
(4) ------ (5)
| |
| |
(7) ------ (7)
>>> from pymt.mappers import CellToPoint
>>> from pymt.grids.map import UniformRectilinearMap as UniformRectilinear
>>> from pymt.grids.map import UnstructuredPointsMap as UnstructuredPoints
>>> (dst_x, dst_y) = (np.array([.45, 1.25, 3.5]), np.array([.75, 2.25, 3.25]))
>>> src = UniformRectilinear((2,4), (2, 1), (0, 0))
>>> dst = UnstructuredPoints(dst_x, dst_y)
>>> src_vals = np.arange(src.get_cell_count(), dtype=np.float64)
>>> mapper = CellToPoint()
>>> mapper.initialize(dst, src)
>>> mapper.run(src_vals, bad_val=-999)
array([ 0., 2., -999.])
>>> src_vals = np.arange(src.get_cell_count(), dtype=np.float64)
>>> src_vals[0] = -9999
>>> dst_vals = np.zeros(dst.get_point_count()) + 100
>>> _ = mapper.run(src_vals, dst_vals=dst_vals)
>>> dst_vals
array([ 100., 2., -999.])
Point-to-cell Mapping
---------------------
>>> from pymt.mappers.pointtocell import PointToCell
>>> (src_x, src_y) = (np.array ([.45, 1.25, 3.5, .0, 1.]),
... np.array ([.75, 2.25, 3.25, .9, 1.1]))
>>> src = UnstructuredPoints(src_x, src_y)
>>> dst = UniformRectilinear((2,4), (2, 1), (0, 0))
>>> src_vals = np.arange(src.get_point_count(), dtype=np.float64)
>>> mapper = PointToCell()
>>> mapper.initialize(dst, src)
>>> mapper.run(src_vals, bad_val=-999)
array([ 1.5, 4. , 1. ])
>>> mapper.run(src_vals, bad_val=-999, method=np.sum)
array([ 3., 4., 1.])
>>> src_vals[0] = -9999
>>> dst_vals = np.zeros(dst.get_cell_count ()) - 1
>>> _ = mapper.run(src_vals, dst_vals=dst_vals)
>>> dst_vals
array([-1., 4., 1.])
Point on cell edges
-------------------
>>> (src_x, src_y) = (np.array ([0, .5, 1., 2, 3.5]),
... np.array ([1., 1., .0, 3, 3.]))
>>> src = UnstructuredPoints(src_x, src_y)
>>> dst = UniformRectilinear((2,4), (2, 1), (0, 0))
>>> mapper = PointToCell()
>>> mapper.initialize(dst, src)
>>> src_vals = np.arange(src.get_point_count(), dtype=np.float64)
>>> dst_vals = np.zeros(dst.get_cell_count()) - 1
>>> _ = mapper.run(src_vals, dst_vals=dst_vals)
>>> dst_vals
array([ 1. , 0.5, 3. ])
A big mapper
============
>>> (m, n) = (20, 40)
>>> (src_x, src_y) = np.meshgrid(range(m), range(n))
>>> src = UnstructuredPoints(src_y, src_x)
>>> dst = UniformRectilinear((n + 1, m + 1), (1, 1), (-.5, -.5))
>>> mapper = PointToCell()
>>> mapper.initialize(dst, src)
>>> src_vals = np.arange(src.get_point_count(), dtype=np.float64)
>>> dst_vals = np.zeros(dst.get_cell_count(), dtype=np.float64) - 1
>>> _ = mapper.run(src_vals, dst_vals=dst_vals)
>>> from numpy.testing import assert_array_equal
>>> assert_array_equal(dst_vals, src_vals)
"""
from .celltopoint import CellToPoint
from .imapper import IncompatibleGridError
from .pointtocell import PointToCell
from .pointtopoint import NearestVal
_MAPPERS = [NearestVal, CellToPoint, PointToCell]
def find_mapper(dst_grid, src_grid):
"""Find appropriate mappers to map bewteen two grid-like objects"""
choices = []
for cls in _MAPPERS:
if cls.test(dst_grid, src_grid):
choices.append(cls())
if len(choices) == 0:
raise IncompatibleGridError(dst_grid.name, src_grid.name)
return choices
| mit | -6,161,093,386,491,784,000 | 25.075581 | 78 | 0.565663 | false |
shive/try_rsa | keygen.py | 1 | 2235 | #!/bin/env python
# -*- mode: python; coding: utf-8-sig -*-
#======================================================================================================================
__author__ = 'hshibuya <[email protected]>'
import sys
import random
from math import log2
def prime(base):
n = base
while True:
try:
for s in range(2, n - 1):
if 0 == n % s:
raise Exception()
return n
except Exception:
n += 1
continue
def main():
P = prime(0xfe00)
Q = prime(0xfc80)
N = P * Q
K = 0x10001
O = (P - 1) * (Q - 1)
# K * U - O * V = 1
# U = (O * V + 1) / K
U = []
for V in range(5000000):
if 0 == (O * V + 1) % K:
U.append(((O * V + 1) // K, V))
# U, V = random.choice(U)
U, V = sorted(U, key=lambda e: e[1])[0]
print('P = %x' % P)
print('Q = %x' % Q)
print('N = %x / log2(N): %g' % (N, log2(N)))
print('K = %x' % K)
print('U = %x (V = %x)' % (U, V))
# ### 鍵の検証
# for d in range(N):
# print('\rCHECK:%08d:%08d:' % (N - 1, d), end='')
# dd = pow(d, U, N) ### 秘密鍵で暗号化
# ddd = pow(dd, K, N) ### 公開鍵で復号化
# assert d == ddd, '鍵の検証に失敗(%d,%d,%d)' % (d,dd,ddd)
### 暗号化
ORIGIN = '古池や蛙とびこむ水の音'.encode('cp932')
print('len(ORIGIN) =', len(ORIGIN), ORIGIN)
origin_number = 0
for cc in reversed(ORIGIN):
origin_number = origin_number * 256 + cc
print(' origin_number: %.3f %x' % (log2(origin_number) / 32, origin_number))
encrypt_number = 0
num = origin_number
while num > 0:
d = num % N
dd = pow(d, U, N)
num //= N
encrypt_number = encrypt_number * N + dd
print('encrypt_number: %.3f %x' % (log2(encrypt_number) / 32, encrypt_number))
decrypt_number = 0
num = encrypt_number
while num > 0:
d = num % N
dd = pow(d, K, N)
num //= N
decrypt_number = decrypt_number * N + dd
print('decrypt_number: %.3f %x' % (log2(decrypt_number) / 32, decrypt_number))
if __name__ == '__main__':
main()
| unlicense | 2,182,299,872,550,145,300 | 25.9375 | 119 | 0.435267 | false |
hovo1990/deviser | generator/code_files/cpp_functions/ConcreteClassFunctions.py | 1 | 6149 | #!/usr/bin/env python
#
# @file ConcreteClassFunctions.py
# @brief class to create functions for concrete classes
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2015 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from util import strFunctions
class ConcreteClassFunctions():
"""Class for all protected functions"""
def __init__(self, language, is_cpp_api, is_list_of, class_object):
self.language = language
self.class_name = class_object['name']
self.is_cpp_api = is_cpp_api
self.is_list_of = is_list_of
if is_list_of:
self.child_name = class_object['lo_child']
else:
self.child_name = ''
if is_cpp_api:
self.object_name = self.class_name
self.object_child_name = self.child_name
else:
if is_list_of:
self.object_name = 'ListOf_t'
else:
self.object_name = self.class_name + '_t'
self.object_child_name = self.child_name + '_t'
self.concretes = class_object['concretes']
# useful variables
if not self.is_cpp_api and self.is_list_of:
self.struct_name = self.object_child_name
else:
self.struct_name = self.object_name
self.plural = strFunctions.plural(self.child_name)
self.indef_name = strFunctions.get_indefinite(self.object_child_name)
self.abbrev_parent = strFunctions.abbrev_name(self.object_name)
self.abbrev_child = strFunctions.abbrev_name(self.child_name)
if self.is_cpp_api is False:
self.true = '@c 1'
self.false = '@c 0'
else:
self.true = '@c true'
self.false = '@c false'
########################################################################
# Function for writing isFoo functions
def write_is_foo(self, index):
# not applicable to list_of
if self.is_list_of:
return
if self.is_cpp_api:
name = 'abstract \"{0}\"'.format(self.object_name)
conc_type = self.concretes[index]['element']
conc_name = self.concretes[index]['element']
else:
name = '{0}'.format(self.object_name)
conc_type = self.concretes[index]['element'] + '_t'
conc_name = self.concretes[index]['element']
# create comment parts
title_line = 'Predicate returning {0} if this {1} is of type {2}'\
.format(self.true, name, conc_type)
params = []
if not self.is_cpp_api:
params.append('@param {0} the {1} structure.'
.format(self.abbrev_parent, self.object_name))
return_lines = ['@return {0} if this {1} is of type {2}, {3} '
'otherwise'.format(self.true, name, conc_type,
self.false)]
additional = []
# create the function declaration
if self.is_cpp_api:
function = 'is{0}'.format(conc_name)
return_type = 'bool'
arguments = []
else:
function = '{0}_is{1}'.format(self.class_name, conc_name)
return_type = 'int'
arguments = ['const {0} * '
'{1}'.format(self.object_name, self.abbrev_parent)]
if self.is_cpp_api:
line = ['return dynamic_cast<const {0}*>(this) != '
'NULL'.format(conc_name)]
else:
line = ['return ({0} != NULL) ? static_cast<int>({0}'
'->is{1}()) : 0'.format(self.abbrev_parent, conc_name)]
code = [self.create_code_block('line', line)]
# return the parts
return dict({'title_line': title_line,
'params': params,
'return_lines': return_lines,
'additional': additional,
'function': function,
'return_type': return_type,
'arguments': arguments,
'constant': True,
'virtual': True,
'object_name': self.struct_name,
'implementation': code})
@staticmethod
def create_code_block(code_type, lines):
code = dict({'code_type': code_type, 'code': lines})
return code
| lgpl-2.1 | 2,138,370,834,405,310,000 | 41.116438 | 78 | 0.574077 | false |
lingdb/CoBL-public | ielex/lexicon/migrations/0080_fix_cognateClassCitations.py | 1 | 2671 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import migrations
import datetime
data = [{u'comment': u'From *sw\u012b(s) '
'which reflects the PIE acc. 2 pl. *us-we.',
u'source_id': 273,
u'reliability': u'A',
u'pages': u'48-49',
u'modified': datetime.datetime(2015, 11, 2, 11, 9, 2, 72904)},
{u'comment': u"1. PIE *i\u032fu\u0301- 'you', "
"pronominal stem, 2nd person non-singular, "
"only nominative, suppletive oblique stem "
"*u\u032fo\u0301-.\r\n2. PIE *u\u032fo\u0301- "
"'you', pronominal stem, 2nd person non-singular, "
"oblique. Suppletive nominative PIE "
"*i\u032fu\u0301- 'you'. ",
u'source_id': 294,
u'reliability': u'A',
u'pages': u'388-90, 855-860',
u'modified': datetime.datetime(2015, 12, 9, 22, 4, 20, 365304)},
{u'comment': u'For the Slavic forms: "The anlaut of the pronoun '
'was apparently remodelled after the oblique cases. '
'This must have occurred before the delabialization '
'of \xfc, which was an allophone of /u/ '
'after a preceding *j."',
u'source_id': 81,
u'reliability': u'A',
u'pages': u'533',
u'modified': datetime.datetime(2016, 7, 1, 13, 23, 49, 867057)}]
def forwards_func(apps, schema_editor):
'''
This migration was added as a reaction to problems
with merging cognate classes described by @CasFre in [1].
https://github.com/lingdb/CoBL/issues/197
'''
CognateClass = apps.get_model('lexicon', 'CognateClass')
CognateClassCitation = apps.get_model('lexicon', 'CognateClassCitation')
# Id that needs to get CognateClassCitations attached:
target = 5822
try:
cognateClass = CognateClass.objects.get(id=target)
sourceIds = set([c.source_id for c in
cognateClass.cognateclasscitation_set.all()])
for d in data:
if d['source_id'] not in sourceIds:
CognateClassCitation.objects.create(
cognate_class_id=target, **d)
except CognateClass.DoesNotExist:
pass # Nothing to do
def reverse_func(apps, schema_editor):
print('Nothing to do for reverse_func of 0080_fix_cognateClassCitations')
class Migration(migrations.Migration):
dependencies = [('lexicon', '0079_auto_20160629_1150')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| bsd-2-clause | -7,152,170,905,480,466,000 | 38.865672 | 77 | 0.578435 | false |
larsbergstrom/servo | tests/wpt/web-platform-tests/html/cross-origin-opener-policy/resources/coop-coep.py | 5 | 1224 | def main(request, response):
coop = request.GET.first("coop")
coep = request.GET.first("coep")
redirect = request.GET.first("redirect", None)
if coop != "":
response.headers.set("Cross-Origin-Opener-Policy", coop)
if coep != "":
response.headers.set("Cross-Origin-Embedder-Policy", coep)
if redirect != None:
response.status = 302
response.headers.set("Location", redirect)
return
# This uses an <iframe> as BroadcastChannel is same-origin bound.
response.content = """
<!doctype html>
<meta charset=utf-8>
<script src="/common/get-host-info.sub.js"></script>
<iframe></iframe>
<script>
const navigate = new URL(location).searchParams.get("navigate");
if (navigate !== null) {
self.location = navigate;
} else {
const iframe = document.querySelector("iframe");
iframe.onload = () => {
const payload = { name: self.name, opener: !!self.opener };
iframe.contentWindow.postMessage(payload, "*");
};
const channelName = new URL(location).searchParams.get("channel");
iframe.src = `${get_host_info().HTTPS_ORIGIN}/html/cross-origin-opener-policy/resources/postback.html?channel=${channelName}`;
}
</script>
"""
| mpl-2.0 | 6,312,795,498,351,184,000 | 33.971429 | 130 | 0.64951 | false |
hackndev/dalboot | dalmod/pdb.py | 1 | 4891 | # Simple PDB file format parser
# Author: Alex Osborne <[email protected]>
# Created: 27 Nov 2006
#
import struct
import sys
def main():
if len(sys.argv) > 1:
fn = sys.argv[1]
else:
fn = 'brahma-spl.pdb'
out = file('test.pdb','wb')
pdb = Pdb()
pdb.read(file(fn,'rb'))
print str(pdb)
pdb.write(out)
def trunc(x):
return x[:x.index('\0')]
class Pdb:
def __init__(self):
pass
attr_resdb = 1
def isprc(self):
return self.attr & Pdb.attr_resdb == Pdb.attr_resdb
def read(self, f):
self.read_header(f)
self.read_rlheader(f)
if self.isprc():
self.read_resourcelist(f)
else:
self.read_recordlist(f)
self.read_resources(f)
def write(self, f):
self.write_header(f)
self.write_rlheader(f)
if self.isprc():
self.write_resourcelist(f)
self.write_resources(f)
else:
print 'TODO: pdb record list'
header_fmt = ('>' ## Database header
+ '32s' # name
+ 'H' # attributes
+ 'H' # version
+ 'I' # creation date
+ 'I' # modification date
+ 'I' # backup date
+ 'I' # modification number
+ 'I' # app info id
+ 'I' # sort info id
+ '4s' # type
+ '4s' # creator
+ 'I') # unique id seed
header_sz = struct.calcsize(header_fmt)
def read_header(self, f):
( self.name, self.attr, self.ver, self.created, self.modified,
self.backupdate, self.modnumber,
self.appinfoid, self.sortinfoid, self.type, self.creator,
self.seed ) = struct.unpack(Pdb.header_fmt,
f.read(Pdb.header_sz))
self.name = trunc(self.name)
def write_header(self, f):
f.write(struct.pack(Pdb.header_fmt,
self.name, self.attr, self.ver, self.created, self.modified,
self.backupdate, self.modnumber,
self.appinfoid, self.sortinfoid, self.type, self.creator,
self.seed))
rlheader_fmt = ('>' ## Record list header
+ 'I' # next record list pointer (deprecated)
+ 'H') # number of records
rlheader_sz = struct.calcsize(rlheader_fmt)
def read_rlheader(self, f):
(self.nextrl, self.numrecs) = struct.unpack(Pdb.rlheader_fmt,
f.read(Pdb.rlheader_sz))
if self.nextrl != 0:
print 'Warning: Chained record lists found.'
print 'Ignoring secondary lists.'
def write_rlheader(self, f):
f.write(struct.pack(Pdb.rlheader_fmt, 0, len(self.resources)))
def read_recordlist(self, f):
def read_record(i):
rec = PdbRecord()
rec.read_header(f)
return rec
self.resources = map(read_record, range(self.numrecs))
if f.read(2) != '\0\0':
print 'Warning: non-zero record list padding'
def read_resourcelist(self, f):
def read_resource(i):
rsrc = PrcResource()
rsrc.read_header(f)
return rsrc
self.resources = map(read_resource, range(self.numrecs))
if f.read(2) != '\0\0':
print 'Warning: non-zero resource list padding'
def write_resourcelist(self, f):
offset = 2+f.tell()+PrcResource.header_sz * len(self.resources)
for rsrc in self.resources:
if rsrc.offset != offset:
print rsrc.offset, offset
rsrc.offset = offset
rsrc.write_header(f)
offset += len(rsrc.data)
f.write('\0\0') # padding
def read_resources(self, f):
pos = f.tell()
for i, rsrc in enumerate(self.resources):
if pos != rsrc.offset:
print 'Warning: out of sync', pos, rsrc.offset
pos = rsrc.offset
f.seek(pos)
if i+1 < len(self.resources):
rsrc.data = f.read(self.resources[i+1].offset
- pos)
else:
rsrc.data = f.read()
pos += len(rsrc.data)
def write_resources(self, f):
for rsrc in self.resources:
f.write(rsrc.data)
def __str__(self):
return (str((self.name, self.attr, self.ver, self.created,
self.modified, self.backupdate, self.modnumber, self.appinfoid,
self.sortinfoid, self.type,
self.creator, self.seed )) +
'\n\n%d records:\n' % self.numrecs +
'\n'.join([repr(x) for x in self.resources]))
class PrcResource:
header_fmt = ('>' ## Record header
+ '4s' # resource type
+ 'H' # id
+ 'I') # data offset
header_sz = struct.calcsize(header_fmt)
def read_header(self, f):
(self.type, self.id, self.offset) = struct.unpack(
PrcResource.header_fmt,
f.read(PrcResource.header_sz))
def write_header(self, f):
f.write(struct.pack(PrcResource.header_fmt, self.type, self.id,
self.offset))
def __repr__(self):
return '<PrcResource %s[%d] %d bytes>' % (self.type, self.id,
len(self.data))
class PdbRecord:
header_fmt = ('>' ## Record header
+ 'I' # data offset
+ 'B' # attributes
+ '3s') # uniqueID
header_sz = struct.calcsize(header_fmt)
def read_header(self, f):
(self.offset, self.attrib, self.id) = struct.unpack(
PdbRecord.header_fmt,
f.read(PdbRecord.header_sz))
def write_header(self, f):
f.write(struct.pack(PdbRecord.header_fmt, self.offset, self.attrib,
self.id))
def __repr__(self):
return '<PdbRecord %s[%d] %d bytes>' % (repr(self.id), self.attrib,
len(self.data))
if __name__ == '__main__': main()
| gpl-2.0 | -230,827,279,061,851,680 | 25.437838 | 69 | 0.643018 | false |
unor/schemaorg | lib/rdflib/plugins/sparql/evalutils.py | 23 | 2644 | import collections
from rdflib.term import Variable, Literal, BNode, URIRef
from rdflib.plugins.sparql.operators import EBV
from rdflib.plugins.sparql.parserutils import Expr, CompValue
from rdflib.plugins.sparql.sparql import SPARQLError, NotBoundError
def _diff(a, b, expr):
res = set()
for x in a:
if all(not x.compatible(y) or not _ebv(expr, x.merge(y)) for y in b):
res.add(x)
return res
def _minus(a, b):
for x in a:
if all((not x.compatible(y)) or x.disjointDomain(y) for y in b):
yield x
def _join(a, b):
for x in a:
for y in b:
if x.compatible(y):
yield x.merge(y)
def _ebv(expr, ctx):
"""
Return true/false for the given expr
Either the expr is itself true/false
or evaluates to something, with the given ctx
an error is false
"""
try:
return EBV(expr)
except SPARQLError:
pass
if isinstance(expr, Expr):
try:
return EBV(expr.eval(ctx))
except SPARQLError:
return False # filter error == False
elif isinstance(expr, CompValue):
raise Exception(
"Weird - filter got a CompValue without evalfn! %r" % expr)
elif isinstance(expr, Variable):
try:
return EBV(ctx[expr])
except:
return False
return False
def _eval(expr, ctx):
if isinstance(expr, (Literal, URIRef)):
return expr
if isinstance(expr, Expr):
return expr.eval(ctx)
elif isinstance(expr, Variable):
try:
return ctx[expr]
except KeyError:
return NotBoundError("Variable %s is not bound" % expr)
elif isinstance(expr, CompValue):
raise Exception(
"Weird - _eval got a CompValue without evalfn! %r" % expr)
else:
raise Exception("Cannot eval thing: %s (%s)" % (expr, type(expr)))
def _filter(a, expr):
for c in a:
if _ebv(expr, c):
yield c
def _fillTemplate(template, solution):
"""
For construct/deleteWhere and friends
Fill a triple template with instantiated variables
"""
bnodeMap = collections.defaultdict(BNode)
for t in template:
s, p, o = t
_s = solution.get(s)
_p = solution.get(p)
_o = solution.get(o)
# instantiate new bnodes for each solution
_s, _p, _o = [bnodeMap[x] if isinstance(
x, BNode) else y for x, y in zip(t, (_s, _p, _o))]
if _s is not None and \
_p is not None and \
_o is not None:
yield (_s, _p, _o)
| apache-2.0 | 4,553,954,542,530,082,000 | 22.81982 | 77 | 0.570348 | false |
nysan/yocto-autobuilder | lib/python2.6/site-packages/SQLAlchemy-0.7.1-py2.6-linux-x86_64.egg/sqlalchemy/sql/__init__.py | 8 | 1116 | # sql/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.sql.expression import (
Alias,
ClauseElement,
ColumnCollection,
ColumnElement,
CompoundSelect,
Delete,
FromClause,
Insert,
Join,
Select,
Selectable,
TableClause,
Update,
alias,
and_,
asc,
between,
bindparam,
case,
cast,
collate,
column,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
func,
insert,
intersect,
intersect_all,
join,
label,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
table,
text,
tuple_,
type_coerce,
union,
union_all,
update,
)
from sqlalchemy.sql.visitors import ClauseVisitor
__tmp = locals().keys()
__all__ = sorted([i for i in __tmp if not i.startswith('__')])
| gpl-2.0 | -9,017,723,915,355,155,000 | 15.909091 | 84 | 0.600358 | false |
tavisrudd/eventlet | tests/test__socket_errors.py | 8 | 1872 | import unittest
import socket as _original_sock
from eventlet import api
from eventlet.green import socket
class TestSocketErrors(unittest.TestCase):
def test_connection_refused(self):
# open and close a dummy server to find an unused port
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 0))
server.listen(1)
port = server.getsockname()[1]
server.close()
del server
s = socket.socket()
try:
s.connect(('127.0.0.1', port))
self.fail("Shouldn't have connected")
except socket.error, ex:
code, text = ex.args
assert code in [111, 61, 10061], (code, text)
assert 'refused' in text.lower(), (code, text)
def test_timeout_real_socket(self):
""" Test underlying socket behavior to ensure correspondence
between green sockets and the underlying socket module. """
return self.test_timeout(socket=_original_sock)
def test_timeout(self, socket=socket):
""" Test that the socket timeout exception works correctly. """
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 0))
server.listen(1)
port = server.getsockname()[1]
s = socket.socket()
s.connect(('127.0.0.1', port))
cs, addr = server.accept()
cs.settimeout(1)
try:
try:
cs.recv(1024)
self.fail("Should have timed out")
except socket.timeout, ex:
assert hasattr(ex, 'args')
assert len(ex.args) == 1
assert ex.args[0] == 'timed out'
finally:
s.close()
cs.close()
server.close()
if __name__=='__main__':
unittest.main()
| mit | -8,021,932,500,938,966,000 | 32.428571 | 71 | 0.558761 | false |
molgun/ocl_web | ocl_web/libs/ocl/api_resource.py | 7 | 1588 | # # TODO: I believe that this file is not used -- retire?
# import simplejson as json
# class ApiResource(object):
# def __init__(self):
# self.uuid = ""
# self.url = ""
# self.display = ""
# self.display_locale = ""
# self.retired = ""
# self.properties = {}
# self.auditInfo = {}
# self.resourceVersion = ""
# def set_values(self, dct):
# # validate values??
# for key, value in dct.iteritems():
# # print key, value
# # raw_input()
# self.__setattr__(key, value)
# def json(self):
# return json.dumps(
# dict(self.__dict__.items() + {'__type__': self.__class__.__name__}.items()))
# def __repr__(self):
# return '(' + self.uuid + ') ' + self.display + ' [' + self.display_locale + ']'
# def object_hooker(dct):
# class_names = {
# 'OclMapType': MapType,
# 'OclConcept': Concept,
# 'OclConceptClass': ConceptClass,
# 'OclConceptDataType': ConceptDataType,
# 'OclCollection': Collection,
# 'OclMapping': Mapping,
# 'OclSource': Source,
# 'OclStar': Star,
# 'OclUser': User
# }
# if '__type__' in dct:
# class_name = dct['__type__']
# try:
# # Instantiate class based on value in the variable
# x = class_names[class_name]()
# x.set_values(dct)
# return x
# except KeyError:
# # handle error - Class is not defined
# pass
# return dct
| mpl-2.0 | -3,989,711,067,591,392,000 | 27.872727 | 90 | 0.488035 | false |
ppasq/geonode | geonode/favorite/urls.py | 8 | 1627 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^document/(?P<id>\d+)$',
views.favorite, {'subject': 'document'},
name='add_favorite_document'
),
url(
r'^map/(?P<id>\d+)$',
views.favorite, {'subject': 'map'},
name='add_favorite_map'
),
url(
r'^layer/(?P<id>\d+)$',
views.favorite, {'subject': 'layer'},
name='add_favorite_layer'
),
url(
r'^user/(?P<id>\d+)$',
views.favorite, {'subject': 'user'},
name='add_favorite_user'
),
url(
r'^(?P<id>\d+)/delete$',
views.delete_favorite,
name='delete_favorite'
),
url(
r'^list/$',
views.get_favorites,
name='favorite_list'
),
]
| gpl-3.0 | 2,795,132,803,671,436,300 | 28.581818 | 73 | 0.548248 | false |
hujiajie/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart.py | 13 | 6934 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
import logging
import threading
import Queue
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.breakpad.dump_reader import DumpReader
_log = logging.getLogger(__name__)
class DumpReaderMultipart(DumpReader):
"""Base class for Linux and Android breakpad dump reader."""
def __init__(self, host, build_dir):
super(DumpReaderMultipart, self).__init__(host, build_dir)
self._webkit_finder = WebKitFinder(host.filesystem)
self._breakpad_tools_available = None
self._generated_symbols = False
def check_is_functional(self):
return self._check_breakpad_tools_available()
def _get_pid_from_dump(self, dump_file):
dump = self._read_dump(dump_file)
if not dump:
return None
if 'pid' in dump:
return dump['pid'][0]
return None
def _get_stack_from_dump(self, dump_file):
dump = self._read_dump(dump_file)
if not dump:
return None
if not 'upload_file_minidump' in dump:
return None
self._generate_breakpad_symbols_if_necessary()
f, temp_name = self._host.filesystem.open_binary_tempfile('dmp')
f.write("\r\n".join(dump['upload_file_minidump']))
f.close()
cmd = [self._path_to_minidump_stackwalk(), temp_name, self._symbols_dir()]
try:
stack = self._host.executive.run_command(cmd, return_stderr=False)
except:
_log.warning('Failed to execute "%s"' % ' '.join(cmd))
stack = None
finally:
self._host.filesystem.remove(temp_name)
return stack
def _read_dump(self, dump_file):
with self._host.filesystem.open_binary_file_for_reading(dump_file) as f:
boundary = f.readline().strip()[2:]
f.seek(0)
try:
data = cgi.parse_multipart(f, {'boundary': boundary})
return data
except:
pass
return None
def _check_breakpad_tools_available(self):
if self._breakpad_tools_available != None:
return self._breakpad_tools_available
REQUIRED_BREAKPAD_TOOLS = [
'dump_syms',
'minidump_stackwalk',
]
result = True
for binary in REQUIRED_BREAKPAD_TOOLS:
full_path = self._host.filesystem.join(self._build_dir, binary)
if not self._host.filesystem.exists(full_path):
result = False
_log.error('Unable to find %s' % binary)
_log.error(' at %s' % full_path)
if not result:
_log.error(" Could not find breakpad tools, unexpected crashes won't be symbolized")
_log.error(' Did you build the target blink_tests?')
_log.error('')
self._breakpad_tools_available = result
return self._breakpad_tools_available
def _path_to_minidump_stackwalk(self):
return self._host.filesystem.join(self._build_dir, "minidump_stackwalk")
def _path_to_generate_breakpad_symbols(self):
return self._webkit_finder.path_from_chromium_base("components", "crash", "content", "tools", "generate_breakpad_symbols.py")
def _symbols_dir(self):
return self._host.filesystem.join(self._build_dir, 'content_shell.syms')
def _generate_breakpad_symbols_if_necessary(self):
if self._generated_symbols:
return
self._generated_symbols = True
_log.debug("Generating breakpad symbols")
queue = Queue.Queue()
thread = threading.Thread(target=_symbolize_keepalive, args=(queue,))
thread.start()
try:
for binary in self._binaries_to_symbolize():
_log.debug(' Symbolizing %s' % binary)
full_path = self._host.filesystem.join(self._build_dir, binary)
cmd = [
self._path_to_generate_breakpad_symbols(),
'--binary=%s' % full_path,
'--symbols-dir=%s' % self._symbols_dir(),
'--build-dir=%s' % self._build_dir,
]
try:
self._host.executive.run_command(cmd)
except:
_log.error('Failed to execute "%s"' % ' '.join(cmd))
finally:
queue.put(None)
thread.join()
_log.debug("Done generating breakpad symbols")
def _binaries_to_symbolize(self):
"""This routine must be implemented by subclasses.
Returns an array of binaries that need to be symbolized."""
raise NotImplementedError()
def _symbolize_keepalive(queue):
while True:
_log.debug("waiting for symbolize to complete")
try:
msg = queue.get(block=True, timeout=60)
return
except Queue.Empty:
pass
class DumpReaderLinux(DumpReaderMultipart):
"""Linux breakpad dump reader."""
def _binaries_to_symbolize(self):
return ['content_shell', 'libtest_netscape_plugin.so', 'libosmesa.so']
def _file_extension(self):
return 'dmp'
class DumpReaderAndroid(DumpReaderMultipart):
"""Android breakpad dump reader."""
def _binaries_to_symbolize(self):
return ['lib/libcontent_shell_content_view.so']
def _file_extension(self):
return 'dmp'
| bsd-3-clause | 2,830,638,199,103,254,500 | 35.687831 | 133 | 0.628209 | false |
CamelBackNotation/CarnotKE | jyhton/Lib/test/test_complex_jy.py | 9 | 1910 | """Misc complex tests
Made for Jython.
"""
import unittest
from test import test_support
INF, NINF, NAN = map(float, ("inf", "-inf", "nan"))
class ComplexTest(unittest.TestCase):
def test_dunder_coerce(self):
self.assertEqual(complex.__coerce__(1+1j, None), NotImplemented)
self.assertRaises(TypeError, complex.__coerce__, None, 1+2j)
def test_pow(self):
class Foo(object):
def __rpow__(self, other):
return other ** 2
# regression in 2.5 alphas
self.assertEqual((4+0j) ** Foo(), (16+0j))
def test___nonzero__(self):
self.assertTrue(0.25+0j)
self.assertTrue(25j)
def test_abs_big(self):
# These are close to overflow but don't
close = [ complex( 1.794e+308, 0.000e+00),
complex( 1.119e+308, 1.403e+308),
complex(-3.992e+307, 1.749e+308),
complex(-1.617e+308, 7.785e+307),
complex(-1.617e+308,-7.785e+307),
complex(-3.992e+307,-1.749e+308) ]
# These are a little bigger and do overflow
over = [ complex( 1.130e+308, 1.417e+308),
complex(-4.032e+307, 1.767e+308),
complex(-1.633e+308, 7.863e+307),
complex(-1.633e+308,-7.863e+307),
complex(-4.032e+307,-1.767e+308) ]
# If you start with infinity, the return is infinity, no overflow
infinities = [ complex(INF, 1), complex(NINF, 2), complex(3, INF), complex(4, NINF) ]
for z in close :
self.assertAlmostEquals(abs(z), 1.794e+308, delta=0.01e+308)
for z in over :
self.assertRaises(OverflowError, abs, z)
for z in infinities :
self.assertEqual(abs(z), INF)
def test_main():
test_support.run_unittest(ComplexTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | 7,105,574,746,260,678,000 | 33.727273 | 93 | 0.548691 | false |
manassolanki/frappe | frappe/desk/report/todo/todo.py | 18 | 1298 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate
def execute(filters=None):
priority_map = {"High": 3, "Medium": 2, "Low": 1}
todo_list = frappe.get_list('ToDo', fields=["name", "date", "description",
"priority", "reference_type", "reference_name", "assigned_by", "owner"],
filters={'status': 'Open'})
todo_list.sort(key=lambda todo: (priority_map.get(todo.priority, 0),
todo.date and getdate(todo.date) or getdate("1900-01-01")), reverse=True)
columns = [_("ID")+":Link/ToDo:90", _("Priority")+"::60", _("Date")+ ":Date",
_("Description")+"::150", _("Assigned To/Owner") + ":Data:120",
_("Assigned By")+":Data:120", _("Reference")+"::200"]
result = []
for todo in todo_list:
if todo.owner==frappe.session.user or todo.assigned_by==frappe.session.user:
if todo.reference_type:
todo.reference = """<a href="#Form/%s/%s">%s: %s</a>""" % (todo.reference_type,
todo.reference_name, todo.reference_type, todo.reference_name)
else:
todo.reference = None
result.append([todo.name, todo.priority, todo.date, todo.description,
todo.owner, todo.assigned_by, todo.reference])
return columns, result | mit | 80,248,676,777,061,330 | 37.205882 | 83 | 0.665639 | false |
kevinmel2000/sl4a | python/src/Tools/bgen/bgen/bgenGenerator.py | 43 | 8834 | from bgenOutput import *
from bgenType import *
from bgenVariable import *
Error = "bgenGenerator.Error"
DEBUG=0
# Strings to specify argument transfer modes in generator calls
IN = "in"
OUT = "out"
INOUT = IN_OUT = "in-out"
class BaseFunctionGenerator:
def __init__(self, name, condition=None, callname=None, modifiers=None):
if DEBUG: print "<--", name
self.name = name
if callname:
self.callname = callname
else:
self.callname = name
self.prefix = name
self.objecttype = "PyObject" # Type of _self argument to function
self.condition = condition
self.modifiers = modifiers
def setprefix(self, prefix):
self.prefix = prefix
def checkgenerate(self):
return True
def generate(self):
if not self.checkgenerate():
return
if DEBUG: print "-->", self.name
if self.condition:
Output()
Output(self.condition)
self.functionheader()
self.functionbody()
self.functiontrailer()
if self.condition:
Output("#endif")
def functionheader(self):
Output()
Output("static PyObject *%s_%s(%s *_self, PyObject *_args)",
self.prefix, self.name, self.objecttype)
OutLbrace()
Output("PyObject *_res = NULL;")
def functionbody(self):
Output("/* XXX To be provided */")
def functiontrailer(self):
OutRbrace()
def reference(self, name = None):
if not self.checkgenerate():
return
if name is None:
name = self.name
docstring = self.docstring()
if self.condition:
Output()
Output(self.condition)
Output("{\"%s\", (PyCFunction)%s_%s, 1,", name, self.prefix, self.name)
Output(" PyDoc_STR(%s)},", stringify(docstring))
if self.condition:
Output("#endif")
def docstring(self):
return None
def __cmp__(self, other):
if not hasattr(other, 'name'):
return cmp(id(self), id(other))
return cmp(self.name, other.name)
_stringify_map = {'\n': '\\n', '\t': '\\t', '\r': '\\r', '\b': '\\b',
'\e': '\\e', '\a': '\\a', '\f': '\\f', '"': '\\"'}
def stringify(str):
if str is None: return "NULL"
res = '"'
map = _stringify_map
for c in str:
if map.has_key(c): res = res + map[c]
elif ' ' <= c <= '~': res = res + c
else: res = res + '\\%03o' % ord(c)
res = res + '"'
return res
class ManualGenerator(BaseFunctionGenerator):
def __init__(self, name, body, condition=None):
BaseFunctionGenerator.__init__(self, name, condition=condition)
self.body = body
def functionbody(self):
Output("%s", self.body)
def setselftype(self, selftype, itselftype):
self.objecttype = selftype
self.itselftype = itselftype
class FunctionGenerator(BaseFunctionGenerator):
def __init__(self, returntype, name, *argumentList, **conditionlist):
BaseFunctionGenerator.__init__(self, name, **conditionlist)
self.returntype = returntype
self.argumentList = []
self.setreturnvar()
self.parseArgumentList(argumentList)
self.prefix = "XXX" # Will be changed by setprefix() call
self.itselftype = None # Type of _self->ob_itself, if defined
def setreturnvar(self):
if self.returntype:
self.rv = self.makereturnvar()
self.argumentList.append(self.rv)
else:
self.rv = None
def makereturnvar(self):
return Variable(self.returntype, "_rv", OutMode)
def setselftype(self, selftype, itselftype):
self.objecttype = selftype
self.itselftype = itselftype
def parseArgumentList(self, argumentList):
iarg = 0
for type, name, mode in argumentList:
iarg = iarg + 1
if name is None: name = "_arg%d" % iarg
arg = Variable(type, name, mode)
self.argumentList.append(arg)
def docstring(self):
input = []
output = []
for arg in self.argumentList:
if arg.flags == ErrorMode or arg.flags == SelfMode:
continue
if arg.type is None:
str = 'void'
else:
if hasattr(arg.type, 'typeName'):
typeName = arg.type.typeName
if typeName is None: # Suppressed type
continue
else:
typeName = "?"
print "Nameless type", arg.type
str = typeName + ' ' + arg.name
if arg.mode in (InMode, InOutMode):
input.append(str)
if arg.mode in (InOutMode, OutMode):
output.append(str)
if not input:
instr = "()"
else:
instr = "(%s)" % ", ".join(input)
if not output or output == ["void"]:
outstr = "None"
else:
outstr = "(%s)" % ", ".join(output)
return instr + " -> " + outstr
def functionbody(self):
self.declarations()
self.precheck()
self.getargs()
self.callit()
self.checkit()
self.returnvalue()
def declarations(self):
for arg in self.argumentList:
arg.declare()
def getargs(self):
sep = ",\n" + ' '*len("if (!PyArg_ParseTuple(")
fmt, lst = self.getargsFormatArgs(sep)
Output("if (!PyArg_ParseTuple(_args, \"%s\"%s))", fmt, lst)
IndentLevel()
Output("return NULL;")
DedentLevel()
for arg in self.argumentList:
if arg.flags == SelfMode:
continue
if arg.mode in (InMode, InOutMode):
arg.getargsCheck()
def getargsFormatArgs(self, sep):
fmt = ""
lst = ""
for arg in self.argumentList:
if arg.flags == SelfMode:
continue
if arg.mode in (InMode, InOutMode):
arg.getargsPreCheck()
fmt = fmt + arg.getargsFormat()
args = arg.getargsArgs()
if args:
lst = lst + sep + args
return fmt, lst
def precheck(self):
pass
def beginallowthreads(self):
pass
def endallowthreads(self):
pass
def callit(self):
args = ""
s = "%s%s(" % (self.getrvforcallit(), self.callname)
sep = ",\n" + ' '*len(s)
for arg in self.argumentList:
if arg is self.rv:
continue
s = arg.passArgument()
if args: s = sep + s
args = args + s
self.beginallowthreads()
Output("%s%s(%s);",
self.getrvforcallit(), self.callname, args)
self.endallowthreads()
def getrvforcallit(self):
if self.rv:
return "%s = " % self.rv.name
else:
return ""
def checkit(self):
for arg in self.argumentList:
arg.errorCheck()
def returnvalue(self):
sep = ",\n" + ' '*len("return Py_BuildValue(")
fmt, lst = self.mkvalueFormatArgs(sep)
if fmt == "":
Output("Py_INCREF(Py_None);")
Output("_res = Py_None;");
else:
Output("_res = Py_BuildValue(\"%s\"%s);", fmt, lst)
tmp = self.argumentList[:]
tmp.reverse()
for arg in tmp:
if not arg: continue
arg.cleanup()
Output("return _res;")
def mkvalueFormatArgs(self, sep):
fmt = ""
lst = ""
for arg in self.argumentList:
if not arg: continue
if arg.flags == ErrorMode: continue
if arg.mode in (OutMode, InOutMode):
arg.mkvaluePreCheck()
fmt = fmt + arg.mkvalueFormat()
lst = lst + sep + arg.mkvalueArgs()
return fmt, lst
class MethodGenerator(FunctionGenerator):
def parseArgumentList(self, args):
a0, args = args[0], args[1:]
t0, n0, m0 = a0
if m0 != InMode:
raise ValueError, "method's 'self' must be 'InMode'"
self.itself = Variable(t0, "_self->ob_itself", SelfMode)
self.argumentList.append(self.itself)
FunctionGenerator.parseArgumentList(self, args)
def _test():
void = None
eggs = FunctionGenerator(void, "eggs",
(stringptr, 'cmd', InMode),
(int, 'x', InMode),
(double, 'y', InOutMode),
(int, 'status', ErrorMode),
)
eggs.setprefix("spam")
print "/* START */"
eggs.generate()
if __name__ == "__main__":
_test()
| apache-2.0 | 1,704,962,134,877,571,000 | 28.251656 | 79 | 0.521621 | false |
pyhmsa/pyhmsa | pyhmsa/fileformat/importer/emsa.py | 1 | 12424 | """
Importer from EMSA file format
"""
# Standard library modules.
import datetime
# Third party modules.
import numpy as np
# Local modules.
from pyhmsa.fileformat.importer.importer import _Importer, _ImporterThread
from pyhmsa.fileformat.common.emsa import calculate_checksum
from pyhmsa.datafile import DataFile
from pyhmsa.spec.header import Header
from pyhmsa.spec.condition.probe import ProbeEM, ProbeTEM
from pyhmsa.spec.condition.acquisition import AcquisitionPoint
from pyhmsa.spec.condition.specimenposition import SpecimenPosition
from pyhmsa.spec.condition.detector import \
(DetectorSpectrometer, DetectorSpectrometerXEDS, DetectorSpectrometerCL,
Window)
from pyhmsa.spec.condition.calibration import CalibrationLinear
from pyhmsa.spec.datum.analysis import Analysis1D
from pyhmsa.type.unit import validate_unit
from pyhmsa.util.parsedict import parsedict
# Globals and constants variables.
from pyhmsa.spec.condition.detector import \
(COLLECTION_MODE_PARALLEL, COLLECTION_MODE_SERIAL,
XEDS_TECHNOLOGY_GE, XEDS_TECHNOLOGY_SILI, XEDS_TECHNOLOGY_SDD,
XEDS_TECHNOLOGY_UCAL,
SIGNAL_TYPE_EDS, SIGNAL_TYPE_WDS, SIGNAL_TYPE_CLS)
from pyhmsa.fileformat.common.emsa import \
(EMSA_ELS_DETECTOR_SERIAL, EMSA_ELS_DETECTOR_PARALL,
EMSA_EDS_DETECTOR_SIBEW, EMSA_EDS_DETECTOR_SIUTW, EMSA_EDS_DETECTOR_SIWLS,
EMSA_EDS_DETECTOR_GEBEW, EMSA_EDS_DETECTOR_GEUTW, EMSA_EDS_DETECTOR_GEWLS,
EMSA_EDS_DETECTOR_SDBEW, EMSA_EDS_DETECTOR_SDUTW, EMSA_EDS_DETECTOR_SDWLS,
EMSA_EDS_DETECTOR_UCALUTW)
_ELSDET_TO_COLLECTION_MODE = \
{EMSA_ELS_DETECTOR_PARALL: COLLECTION_MODE_PARALLEL,
EMSA_ELS_DETECTOR_SERIAL: COLLECTION_MODE_SERIAL}
_EDSDET_TO_XEDS_TECHNOLOGY = \
{EMSA_EDS_DETECTOR_SIBEW: XEDS_TECHNOLOGY_SILI,
EMSA_EDS_DETECTOR_SIUTW: XEDS_TECHNOLOGY_SILI,
EMSA_EDS_DETECTOR_SIWLS: XEDS_TECHNOLOGY_SILI,
EMSA_EDS_DETECTOR_GEBEW: XEDS_TECHNOLOGY_GE,
EMSA_EDS_DETECTOR_GEUTW: XEDS_TECHNOLOGY_GE,
EMSA_EDS_DETECTOR_GEWLS: XEDS_TECHNOLOGY_GE,
EMSA_EDS_DETECTOR_SDBEW: XEDS_TECHNOLOGY_SDD,
EMSA_EDS_DETECTOR_SDUTW: XEDS_TECHNOLOGY_SDD,
EMSA_EDS_DETECTOR_SDWLS: XEDS_TECHNOLOGY_SDD,
EMSA_EDS_DETECTOR_UCALUTW: XEDS_TECHNOLOGY_UCAL}
class _ImporterEMSAThread(_ImporterThread):
def _run(self, filepath, *args, **kwargs):
emsa_file = None
try:
# Parse EMSA file
emsa_file = open(filepath, 'rt')
lines = emsa_file.readlines()
self._update_status(0.1, 'Verify checksum')
self._verify_checksum(lines)
self._update_status(0.2, 'Parse keywords')
keywords = self._parse_keywords(lines)
self._update_status(0.3, 'Parse data')
buffer = self._parse_data(lines, keywords)
# Create data file
datafile = DataFile()
self._update_status(0.4, 'Extracting header')
datafile.header.update(self._extract_header(keywords))
self._update_status(0.5, 'Extracting probe')
datafile.conditions.update(self._extract_probe(keywords))
self._update_status(0.6, 'Extracting acquisition')
datafile.conditions.update(self._extract_acquisition(keywords))
self._update_status(0.7, 'Extracting detector')
datafile.conditions.update(self._extract_detector(keywords))
datum = Analysis1D(len(buffer), dtype=buffer.dtype,
buffer=np.ravel(buffer),
conditions=datafile.conditions)
datafile.data['Spectrum'] = datum
finally:
if emsa_file is not None:
emsa_file.close()
return datafile
def _is_line_keyword(self, line):
try:
return line.strip()[0] == '#'
except:
return False
def _verify_checksum(self, lines):
for line in lines:
if not self._is_line_keyword(line):
continue
tag, _comment, expected_checksum = self._parse_keyword_line(line)
if tag == 'ENDOFDATA':
return # No checksum
if tag == 'CHECKSUM':
break
actual_checksum = calculate_checksum(lines)
if actual_checksum != expected_checksum:
raise IOError("The checksums don't match: %i != %i " % \
(actual_checksum, expected_checksum))
def _parse_keywords(self, lines):
keywords = parsedict()
# First pass
for line in lines:
if not self._is_line_keyword(line):
break
tag, _comment, value = self._parse_keyword_line(line)
if tag == 'SPECTRUM':
break
keywords.setdefault(tag, []).append(value)
# Second pass (remove list if only one value)
for tag, values in keywords.items():
if len(values) == 1:
keywords[tag] = values[0]
else:
keywords[tag] = tuple(values)
return keywords
def _parse_keyword_line(self, line):
line = line.strip("#") # Strip keyword character
tag, value = line.split(":", 1)
tag = tag.strip()
value = value.strip()
try:
tag, comment = tag.split()
except:
comment = ""
tag = tag.upper()
comment = comment.strip("-")
return tag, comment, value
def _parse_data(self, lines, keywords):
# Filter to get only data lines
lines = filter(lambda line: not self._is_line_keyword(line), lines)
# Read based on data type
datatype = keywords.get('DATATYPE')
if datatype is None:
raise ValueError('No DATATYPE specified')
datatype = datatype.upper()
if datatype == 'XY':
data = self._parse_data_xy(lines, keywords)
elif datatype == 'Y':
data = self._parse_data_y(lines, keywords)
else:
raise ValueError('Unknown data type')
# Check number of points
npoints = int(float(keywords.get('NPOINTS', len(data))))
if npoints != len(data):
raise ValueError('Inconsistent number of points. NPOINTS=%i != len(data)=%i' % \
(npoints, len(data)))
return data
def _parse_data_xy(self, lines, keywords):
data = []
for line in lines:
data.append(self._parse_data_line(line))
return np.array(data)[:, 1]
def _parse_data_y(self, lines, keywords):
ydata = []
for line in lines:
ydata.extend(self._parse_data_line(line))
return np.array(ydata)
def _parse_data_line(self, line):
# Split values separated by a comma
tmprow = [value.strip() for value in line.split(',')]
# Split values separated by a space
row = []
for value in tmprow:
row.extend(value.split())
# Convert to float
row = list(map(float, row))
return row
def _extract_header(self, keywords):
header = Header()
header.title = keywords['TITLE']
header.date = \
datetime.datetime.strptime(keywords['DATE'], '%d-%b-%Y').date()
header.time = \
datetime.datetime.strptime(keywords['TIME'], '%H:%M').time()
header.author = keywords['OWNER']
return header
def _extract_probe(self, keywords):
if 'BEAMKV' not in keywords:
return {}
kwargs = {}
kwargs['beam_voltage'] = (keywords.getfloat('BEAMKV'), 'kV')
kwargs['beam_current'] = (keywords.getfloat('PROBECUR'), 'nA')
kwargs['emission_current'] = (keywords.getfloat('EMISSION'), 'uA')
kwargs['beam_diameter'] = (keywords.getfloat('BEAMDIAM'), 'nm')
kwargs['scan_magnification'] = keywords.getint('MAGCAM')
if 'OPERMODE' in keywords:
kwargs['lens_mode'] = keywords.get('OPERMODE') # Enums are identical
kwargs['convergence_angle'] = (keywords.getfloat('CONVANGLE'), 'mrad')
c = ProbeTEM(**kwargs)
else:
c = ProbeEM(**kwargs)
return {'Probe0': c}
def _extract_acquisition(self, keywords):
if 'XPOSITION' not in keywords or \
'YPOSITION' not in keywords or \
'ZPOSITION' not in keywords:
return {}
position = SpecimenPosition(x=keywords.getfloat('XPOSITION'),
y=keywords.getfloat('YPOSITION'),
z=keywords.getfloat('ZPOSITION')) #FIXME: Handle XTILTSTGE and YTILTSTGE
dwell_time = (keywords.getfloat('DWELLTIME'), 'ms')
if 'INTEGTIME' in keywords:
total_time = (keywords.getfloat('INTEGTIME'), 'ms')
else:
total_time = (keywords.getfloat('REALTIME'), 's')
dwell_time_live = (keywords.getfloat('LIVETIME'), 's')
c = AcquisitionPoint(position, dwell_time, total_time, dwell_time_live)
return {'Acq0': c}
def _extract_detector(self, keywords):
if 'SIGNALTYPE' not in keywords:
return {}
signal_type = keywords.get('SIGNALTYPE') # Enums is identical
kwargs = {}
kwargs['signal_type'] = signal_type
kwargs['channel_count'] = keywords.getint('NPOINTS')
quantity = keywords.get('XLABEL', 'Energy')
unit = keywords.get('XUNITS')
gain = keywords.getfloat('XPERCHAN')
offset = keywords.getfloat('OFFSET')
try:
unit = validate_unit(unit)
except ValueError as ex: # Attempt quick fix for common unit
if 'angstroms' in unit:
unit = 'nm'
gain /= 10.0
offset /= 10.0
elif 'eV' in unit:
unit = 'eV'
else:
raise ex
kwargs['calibration'] = CalibrationLinear(quantity, unit, gain, offset)
kwargs['measurement_unit'] = keywords.get('yunits')
kwargs['elevation'] = (keywords.getfloat('ELEVANGLE'), 'degrees')
kwargs['azimuth'] = (keywords.getfloat('AZIMANGLE'), 'degrees')
kwargs['solid_angle'] = (keywords.getfloat('SOLIDANGLE'), 'sr')
kwargs['semi_angle'] = (keywords.getfloat('COLLANGLE'), 'mrad')
kwargs['collection_mode'] = \
_ELSDET_TO_COLLECTION_MODE.get(keywords.get('ELSDET'))
if signal_type in [SIGNAL_TYPE_EDS, SIGNAL_TYPE_WDS]:
window = Window()
if 'TDEADLYR' in keywords:
window.append_layer('Dead layer', (keywords.getfloat('TDEADLYR') * 1e4, 'um'))
if 'TACTLYR' in keywords:
window.append_layer('Active Layer', (keywords.getfloat('TACTLYR') * 1e4, 'um'))
if 'TBEWIND' in keywords:
window.append_layer('Be window', (keywords.getfloat('TBEWIND') * 1e4, 'um'))
if 'TAUWIND' in keywords:
window.append_layer('Au window', (keywords.getfloat('TAUWIND') * 1e4, 'um'))
if 'TALWIND' in keywords:
window.append_layer('Al window', (keywords.getfloat('TALWIND') * 1e4, 'um'))
if 'TPYWIND' in keywords:
window.append_layer('Pyrolene window', (keywords.getfloat('TPYWIND') * 1e4, 'um'))
if 'TBNWIND' in keywords:
window.append_layer('Boron-Nitride window', (keywords.getfloat('TBNWIND') * 1e4, 'um'))
if 'TDIWIND' in keywords:
window.append_layer('Diamond window', (keywords.getfloat('TDIWIND') * 1e4, 'um'))
if 'THCWIND' in keywords:
window.append_layer('HydroCarbon window', (keywords.getfloat('TDIWIND') * 1e4, 'um'))
if window.layers:
kwargs['window'] = window
if signal_type == SIGNAL_TYPE_EDS:
kwargs['technology'] = \
_EDSDET_TO_XEDS_TECHNOLOGY.get(keywords.get('EDSDET'))
c = DetectorSpectrometerXEDS(**kwargs)
elif signal_type == SIGNAL_TYPE_CLS:
c = DetectorSpectrometerCL(**kwargs)
else:
c = DetectorSpectrometer(**kwargs)
return {signal_type: c}
class ImporterEMSA(_Importer):
SUPPORTED_EXTENSIONS = ('.emsa',)
def _create_thread(self, filepath, *args, **kwargs):
return _ImporterEMSAThread(filepath)
| mit | -6,221,120,949,332,548,000 | 35.011594 | 108 | 0.59047 | false |
seanli9jan/tensorflow | tensorflow/python/autograph/operators/control_flow.py | 10 | 7759 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
def for_stmt(iter_, extra_test, body, init_state):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the iterate as well as the
variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
The state is represented by the variables geo_mean and arith_mean. The
argument for initial_state may contain the tuple (1, 0), the body will
include the arguments geo_mean and arith_mean and will return a tuple
representing the new values for geo_mean and respectively arith_mean.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and
state as return type. The actual loop body.
init_state: Tuple containing the initial state.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
return _known_len_for_stmt(iter_, extra_test, body, init_state)
elif isinstance(iter_, dataset_ops.Dataset):
return _dataset_for_stmt(iter_, extra_test, body, init_state)
else:
return _py_for_stmt(iter_, extra_test, body, init_state)
def _py_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that executes a Python for loop."""
state = init_state
for target in iter_:
if not extra_test(*state):
break
state = body(target, *state)
# TODO(mdan): Remove this special case.
if len(state) == 1:
return state[0]
return state
def _known_len_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that iterates over objects that admit a length."""
n = py_builtins.len_(iter_)
def while_body(iterate_index, *state):
iterate = iter_[iterate_index]
new_state = body(iterate, *state)
return (iterate_index + 1,) + new_state
def while_cond(iterate_index, *state):
return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(0,) + init_state,
extra_deps=(iter_,),
opts=dict(maximum_iterations=n))
# Dropping the iteration index because it's not syntactically visible.
results = results[1:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def _dataset_for_stmt(ds, extra_test, body, init_state):
"""Overload of for_stmt that iterates over TF Datasets."""
# Because Datsets only expose get_next, in the style of Python iterators,
# we are forced to unpack the loop as:
#
# epoch_number, iterate = ds.get_next()
# while epoch_number < 2:
# <body>
# epoch_number, iterate = ds.get_next()
epoch_numbers = dataset_ops.Dataset.range(2)
def tag_with(ds, tag):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(tag).repeat(), ds))
ds_with_epoch = epoch_numbers.flat_map(lambda i: tag_with(ds, i))
iterator = ds_with_epoch.make_initializable_iterator()
with ops.control_dependencies((iterator.initializer,)):
epoch_number, iterate = iterator.get_next()
def while_body(epoch_number, iterate, *state):
new_state = body(iterate, *state)
epoch_number, iterate = iterator.get_next()
return (epoch_number, iterate) + new_state
def while_cond(epoch_number, iterate, *state):
del iterate
return gen_math_ops.logical_and(epoch_number < 1, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(epoch_number, iterate) + init_state,
extra_deps=())
# Dropping the epoch number and iterate because they are not syntactically
# visible.
results = results[2:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def while_stmt(test, body, init_state, extra_deps, opts=None):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type.
The loop condition.
body: Callable with the state as arguments, and state as return type.
The actual loop body.
init_state: Tuple containing the initial state.
extra_deps: Tuple containing additional entities on which the loop may
depend, such as loop invariants referenced by test. Used
exclusively for dispatch control.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.
# That could be something as simple as a collection of dispatch rules, with
# some prioritization.
if any(tensor_util.is_tensor(v) for v in init_state + extra_deps):
return _tf_while_stmt(test, body, init_state, opts)
else:
return _py_while_stmt(test, body, init_state, opts)
def _tf_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
if opts is None:
opts = {}
return control_flow_ops.while_loop(test, body, init_state, **opts)
def _py_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts
state = init_state
while test(*state):
state = body(*state)
return state
def if_stmt(cond, body, orelse):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return tf_if_stmt(cond, body, orelse)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse):
"""Overload of if_stmt that stages a TF cond."""
return control_flow_ops.cond(cond, body, orelse)
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
| apache-2.0 | -5,335,090,495,901,691,000 | 33.180617 | 80 | 0.693259 | false |
jrg365/gpytorch | docs/source/conf.py | 1 | 8284 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import io
import re
import shutil
import sys
# Mock - so RTD doesn't have to import torch
from unittest.mock import MagicMock # noqa
import sphinx_rtd_theme # noqa
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), "..", "..", *names), encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
# Mechanism to mock out modules
class ModuleMock(object):
def __init__(self, *args, **kwargs):
pass
# We need some dirty hackary to fix the distributions mocking
class _Distribution(object):
pass
# More dirty hackary
class _SubDistribution(object):
pass
class _Kernel(object):
pass
# Putting all of our dirty hacks together
class Mock(MagicMock):
__metaclass__ = type
@classmethod
def __getattr__(cls, name):
if "Module" == name:
return ModuleMock
elif "Distribution" in name:
return _Distribution
elif "Normal" in name or "Gamma" in name or "Wishart" in name or "Uniform" in name:
return _SubDistribution
elif "Kernel" in name or "Parallel" in name:
return _Kernel
else:
res = MagicMock()
res.Module = ModuleMock
res.__metaclass__ = type
return res
MOCK_MODULES = [
"pyro",
"pyro.distributions",
"pyro.distributions.torch_distribution",
"torch",
"torch.autograd",
"torch.nn",
"torch.nn.functional",
"torch.nn.parallel",
"torch.optim",
"torch.utils",
"torch.utils.data",
"torch.distributions.kl",
"torch.distributions.multivariate_normal",
"torch.distributions.utils",
"torch.distributions",
"torch.optim.lr_scheduler",
"numpy",
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# - Copy over examples folder to docs/source
# This makes it so that nbsphinx properly loads the notebook images
examples_source = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
examples_dest = os.path.abspath(os.path.join(os.path.dirname(__file__), "examples"))
if os.path.exists(examples_dest):
shutil.rmtree(examples_dest)
os.mkdir(examples_dest)
for root, dirs, files in os.walk(examples_source):
for dr in dirs:
os.mkdir(os.path.join(root.replace(examples_source, examples_dest), dr))
for fil in files:
if os.path.splitext(fil)[1] in [".ipynb", ".md", ".rst"]:
source_filename = os.path.join(root, fil)
dest_filename = source_filename.replace(examples_source, examples_dest)
shutil.copyfile(source_filename, dest_filename)
# -- Project information -----------------------------------------------------
project = "GPyTorch"
copyright = "2019, Cornellius GP"
author = "Cornellius GP"
# The short X.Y version
version = find_version("gpytorch", "__init__.py")
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.autodoc",
"nbsphinx",
"m2r",
]
# Disable docstring inheritance
autodoc_inherit_docstrings = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [
"_build", "**.ipynb_checkpoints", "examples/**/README.rst",
"examples/README.rst", "examples/index.rst"
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
# 'logo_only': False,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "GPyTorchdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "GPyTorch.tex", "GPyTorch Documentation", "Cornellius GP", "manual")]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "gpytorch", "GPyTorch Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"GPyTorch",
"GPyTorch Documentation",
author,
"GPyTorch",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
| mit | 8,419,958,730,044,610,000 | 28.691756 | 108 | 0.637615 | false |
mathLab/RBniCS | tests/methodology/tutorials/13_elliptic_optimal_control/tutorial_elliptic_optimal_control_2_pod_exact.py | 1 | 6138 | # Copyright (C) 2015-2021 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from dolfin import *
from rbnics import *
@ExactParametrizedFunctions()
class EllipticOptimalControl(EllipticOptimalControlProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
EllipticOptimalControlProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
yup = TrialFunction(V)
(self.y, self.u, self.p) = split(yup)
zvq = TestFunction(V)
(self.z, self.v, self.q) = split(zvq)
self.dx = Measure("dx")(subdomain_data=subdomains)
self.ds = Measure("ds")(subdomain_data=boundaries)
# Regularization coefficient
self.alpha = 0.01
# Store the velocity expression
self.vel = Expression("x[1]*(1-x[1])", element=self.V.sub(0).ufl_element())
# Customize linear solver parameters
self._linear_solver_parameters.update({
"linear_solver": "mumps"
})
# Return custom problem name
def name(self):
return "EllipticOptimalControl2PODExact"
# Return theta multiplicative terms of the affine expansion of the problem.
def compute_theta(self, term):
mu = self.mu
if term in ("a", "a*"):
theta_a0 = 1.0 / mu[0]
theta_a1 = 1.0
return (theta_a0, theta_a1)
elif term in ("c", "c*"):
theta_c0 = 1.0
return (theta_c0,)
elif term == "m":
theta_m0 = 1.0
return (theta_m0,)
elif term == "n":
theta_n0 = self.alpha
return (theta_n0,)
elif term == "f":
theta_f0 = 1.0
return (theta_f0,)
elif term == "g":
theta_g0 = mu[1]
theta_g1 = mu[2]
return (theta_g0, theta_g1)
elif term == "h":
theta_h0 = 0.24 * mu[1]**2 + 0.52 * mu[2]**2
return (theta_h0,)
elif term == "dirichlet_bc_y":
theta_bc0 = 1.
return (theta_bc0,)
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
def assemble_operator(self, term):
dx = self.dx
if term == "a":
y = self.y
q = self.q
vel = self.vel
a0 = inner(grad(y), grad(q)) * dx
a1 = vel * y.dx(0) * q * dx
return (a0, a1)
elif term == "a*":
z = self.z
p = self.p
vel = self.vel
as0 = inner(grad(z), grad(p)) * dx
as1 = - vel * p.dx(0) * z * dx
return (as0, as1)
elif term == "c":
u = self.u
q = self.q
c0 = u * q * dx
return (c0,)
elif term == "c*":
v = self.v
p = self.p
cs0 = v * p * dx
return (cs0,)
elif term == "m":
y = self.y
z = self.z
m0 = y * z * dx(1) + y * z * dx(2)
return (m0,)
elif term == "n":
u = self.u
v = self.v
n0 = u * v * dx
return (n0,)
elif term == "f":
q = self.q
f0 = Constant(0.0) * q * dx
return (f0,)
elif term == "g":
z = self.z
g0 = z * dx(1)
g1 = z * dx(2)
return (g0, g1)
elif term == "h":
h0 = 1.0
return (h0,)
elif term == "dirichlet_bc_y":
bc0 = [DirichletBC(self.V.sub(0), Constant(i), self.boundaries, i) for i in (1, 2)]
return (bc0,)
elif term == "dirichlet_bc_p":
bc0 = [DirichletBC(self.V.sub(2), Constant(0.0), self.boundaries, i) for i in (1, 2)]
return (bc0,)
elif term == "inner_product_y":
y = self.y
z = self.z
x0 = inner(grad(y), grad(z)) * dx
return (x0,)
elif term == "inner_product_u":
u = self.u
v = self.v
x0 = u * v * dx
return (x0,)
elif term == "inner_product_p":
p = self.p
q = self.q
x0 = inner(grad(p), grad(q)) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
# 1. Read the mesh for this problem
mesh = Mesh("data/mesh2.xml")
subdomains = MeshFunction("size_t", mesh, "data/mesh2_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/mesh2_facet_region.xml")
# 2. Create Finite Element space (Lagrange P1)
scalar_element = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(scalar_element, scalar_element, scalar_element)
V = FunctionSpace(mesh, element, components=["y", "u", "p"])
# 3. Allocate an object of the EllipticOptimalControl class
problem = EllipticOptimalControl(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(3.0, 20.0), (0.5, 1.5), (1.5, 2.5)]
problem.set_mu_range(mu_range)
# 4. Prepare reduction with a reduced basis method
reduction_method = PODGalerkin(problem)
reduction_method.set_Nmax(20)
# 5. Perform the offline phase
lifting_mu = (3.0, 1.0, 2.0)
problem.set_mu(lifting_mu)
reduction_method.initialize_training_set(100)
reduced_problem = reduction_method.offline()
# 6. Perform an online solve
online_mu = (15.0, 0.6, 1.8)
reduced_problem.set_mu(online_mu)
reduced_problem.solve()
reduced_problem.export_solution(filename="online_solution")
print("Reduced output for mu =", online_mu, "is", reduced_problem.compute_output())
# 7. Perform an error analysis
reduction_method.initialize_testing_set(100)
reduction_method.error_analysis()
# 8. Perform a speedup analysis
reduction_method.speedup_analysis()
| lgpl-3.0 | -4,688,242,645,868,259,000 | 32.358696 | 102 | 0.538938 | false |
sebastien-forestier/explaupoppydiva | scripts/arm/pool.py | 1 | 12345 | from experiment import VrepDivaExperiment
from config import Config
from multiprocessing import Lock
import threading
import pyvrep
import time
import datetime
import os
import sys
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
assert len(sys.argv) > 1, "Usage: 'python pool.py <cube> <im_mode>' with <cube> being cube2, cube3 or cube5"
cube = str(sys.argv[1])
if len(sys.argv) > 2:
im_mode = str(sys.argv[2])
else:
im_mode = 'random'
if len(sys.argv) > 3:
pool_name = str(sys.argv[3])
else:
pool_name = "explaupoppydiva-" + im_mode + '-' + cube
if __name__ == '__main__':
configs = {}
#PARAMS
debug = False
max_proc_par = 7
n_trials = 5
iterations = 10000
#CMA
config = Config("TOP-DOWN-CMA-DISCR-PROGRESS", cube, iterations/20, debug=debug)
config.learning['training_mode'] = 'par'
config.learning['par']['exploring']['exploring_mode'] = 'cma'
config.modules['mod2']['im_name'] = 'discretized_progress'
configs[config.name] = config
config = Config("TOP-DOWN-CMA-tree", cube, iterations/20, debug=debug)
config.learning['training_mode'] = 'par'
config.learning['par']['exploring']['exploring_mode'] = 'cma'
config.modules['mod2']['im_name'] = 'tree'
configs[config.name] = config
config = Config("TOP-DOWN-CMA-GOAL-BABBLING", cube, iterations/20, debug=debug)
config.learning['training_mode'] = 'par'
config.learning['par']['exploring']['exploring_mode'] = 'cma'
config.modules['mod2']['im_name'] = 'random'
configs[config.name] = config
#RANDOM
config = Config("TOP-DOWN-RANDOM-DISCR-PROGRESS", cube, iterations/20, debug=debug)
config.learning['training_mode'] = 'par'
config.learning['par']['exploring']['exploring_mode'] = 'random'
config.modules['mod2']['im_name'] = 'discretized_progress'
configs[config.name] = config
config = Config("TOP-DOWN-RANDOM-GOAL-BABBLING", cube, iterations/20, debug=debug)
config.learning['training_mode'] = 'par'
config.learning['par']['exploring']['exploring_mode'] = 'random'
config.modules['mod2']['im_name'] = 'random'
configs[config.name] = config
config = Config("TOP-DOWN-RANDOM-tree", cube, iterations/20, debug=debug)
config.learning['training_mode'] = 'par'
config.learning['par']['exploring']['exploring_mode'] = 'random'
config.modules['mod2']['im_name'] = 'tree'
configs[config.name] = config
#Seq goal babbling
config = Config("SEQ-GOAL-BABBLING", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'random'
config.modules['mod2']['im_name'] = 'random'
config.modules['mod1']['babbling_iter'] = range(1,iterations/2+1)
config.modules['mod2']['babbling_iter'] = range(iterations/2+1,iterations+1)
configs[config.name] = config
#Seq discretized progress
config = Config("SEQ-DISCR-PROGRESS", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'discretized_progress'
config.modules['mod2']['im_name'] = 'discretized_progress'
config.modules['mod1']['babbling_iter'] = range(1,iterations/2+1)
config.modules['mod2']['babbling_iter'] = range(iterations/2+1,iterations+1)
configs[config.name] = config
#Seq discretized progress
config = Config("SEQ-tree", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'tree'
config.modules['mod2']['im_name'] = 'tree'
config.modules['mod1']['babbling_iter'] = range(1,iterations/2+1)
config.modules['mod2']['babbling_iter'] = range(iterations/2+1,iterations+1)
configs[config.name] = config
#MS2 discretized_progress
config = Config("MS2-DISCR-PROGRESS", cube, iterations, debug=debug)
config.modules.pop('mod2')
config.mids = ['mod1']
config.modules['mod1'] = dict(m = config.motor_dims,
s = config.obj_dims,
babbling_name = 'goal',
sm_name = 'knn1',
im_name = 'discretized_progress',
from_log = None,#['../logs/2015-03-11_20-36-49VrepDivaExperiment-test1000-seq_6000/', 'mod1', True],
children = [],
babbling_iter = range(1,iterations+1)
)
configs[config.name] = config
#MS2 discretized_progress
config = Config("MS2-tree", cube, iterations, debug=debug)
config.modules.pop('mod2')
config.mids = ['mod1']
config.modules['mod1'] = dict(m = config.motor_dims,
s = config.obj_dims,
babbling_name = 'goal',
sm_name = 'knn1',
im_name = 'tree',
from_log = None,#['../logs/2015-03-11_20-36-49VrepDivaExperiment-test1000-seq_6000/', 'mod1', True],
children = [],
babbling_iter = range(1,iterations+1)
)
configs[config.name] = config
#MS2 rand goal
config = Config("MS2-GOAL-BABBLING", cube, iterations, debug=debug)
config.modules.pop('mod2')
config.mids = ['mod1']
config.modules['mod1'] = dict(m = config.motor_dims,
s = config.obj_dims,
babbling_name = 'goal',
sm_name = 'knn1',
im_name = 'random',
from_log = None,#['../logs/2015-03-11_20-36-49VrepDivaExperiment-test1000-seq_6000/', 'mod1', True],
children = [],
babbling_iter = range(1,iterations+1)
)
configs[config.name] = config
#MOTOR BABBLING ONLY
config = Config("MOTOR_BABBLING", cube, iterations, debug=debug)
config.modules.pop('mod2')
config.mids = ['mod1']
config.modules['mod1'] = dict(m = config.motor_dims,
s = config.obj_dims,
babbling_name = 'motor',
sm_name = 'knn1',
im_name = 'random',
from_log = None,#['../logs/2015-03-11_20-36-49VrepDivaExperiment-test1000-seq_6000/', 'mod1', True],
children = [],
babbling_iter = range(1,iterations+1)
)
configs[config.name] = config
#MS1 goal babbling
config = Config("MS1-GOAL-BABBLING", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'random'
config.modules['mod2']['im_name'] = 'random'
config.modules['mod1']['babbling_iter'] = range(1,iterations+1)
config.modules['mod2']['babbling_iter'] = []
configs[config.name] = config
#MS1 discretized progress
config = Config("MS1-DISCR-PROGRESS", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'discretized_progress'
config.modules['mod2']['im_name'] = 'discretized_progress'
config.modules['mod1']['babbling_iter'] = range(1,iterations+1)
config.modules['mod2']['babbling_iter'] = []
configs[config.name] = config
#MS1 discretized progress
config = Config("MS1-tree", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'tree'
config.modules['mod2']['im_name'] = 'tree'
config.modules['mod1']['babbling_iter'] = range(1,iterations+1)
config.modules['mod2']['babbling_iter'] = []
configs[config.name] = config
#S1S2 goal babbling
config = Config("S1S2-GOAL-BABBLING", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'random'
config.modules['mod2']['im_name'] = 'random'
config.modules['mod1']['babbling_iter'] = []
config.modules['mod2']['babbling_iter'] = range(1,iterations+1)
configs[config.name] = config
#S1S2 discretized progress
config = Config("S1S2-DISCR-PROGRESS", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'discretized_progress'
config.modules['mod2']['im_name'] = 'discretized_progress'
config.modules['mod1']['babbling_iter'] = []
config.modules['mod2']['babbling_iter'] = range(1,iterations+1)
configs[config.name] = config
#S1S2 discretized progress
config = Config("S1S2-tree", cube, iterations, debug=debug)
config.learning['training_mode'] = 'seq'
config.modules['mod1']['im_name'] = 'tree'
config.modules['mod2']['im_name'] = 'tree'
config.modules['mod1']['babbling_iter'] = []
config.modules['mod2']['babbling_iter'] = range(1,iterations+1)
configs[config.name] = config
if im_mode == 'random':
configs_used = [
"MOTOR_BABBLING",
"TOP-DOWN-CMA-GOAL-BABBLING",
"TOP-DOWN-RANDOM-GOAL-BABBLING",
"SEQ-GOAL-BABBLING",
"MS2-GOAL-BABBLING",
"MS1-GOAL-BABBLING",
"S1S2-GOAL-BABBLING",]
elif im_mode == 'discretized_progress':
configs_used = [
"MOTOR_BABBLING",
"MS1-DISCR-PROGRESS",
"TOP-DOWN-CMA-DISCR-PROGRESS",
"TOP-DOWN-RANDOM-DISCR-PROGRESS",
"SEQ-DISCR-PROGRESS",
"MS2-DISCR-PROGRESS",
"S1S2-DISCR-PROGRESS",
]
elif im_mode == 'tree':
configs_used = [
"MOTOR_BABBLING",
"MS1-tree",
"TOP-DOWN-CMA-tree",
"TOP-DOWN-RANDOM-tree",
"SEQ-tree",
"MS2-tree",
"S1S2-tree",
]
_spawn_lock = Lock()
started = 0
start_date = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log_dir = '../logs/' + start_date + '-' + pool_name + '/'
os.mkdir(log_dir)
def start_experiment():
global started
with _spawn_lock:
conf_num = started
started += 1
xp = VrepDivaExperiment(configs[configs_used[conf_num]], log_dir=log_dir, n_trials=n_trials)
print "Start experiment with conf=", conf_num, "on vrep port=", xp.env.vrepEnv.simulation.port
xp.start()
#xp.draw(False)
print "Finished experiment with conf=", conf_num, "on vrep port=", xp.env.vrepEnv.simulation.port
#xp.close_connections()
processes = []
for _ in configs_used:
processes.append(threading.Thread(target = start_experiment))
t_start = time.time()
n_proc = len(processes)
for processes_i in [processes[i:i+max_proc_par] for i in range(0,n_proc,max_proc_par)]:
# Starts experiments
for p in processes_i:
p.start()
# Waits end of experiments
for p in processes_i:
p.join()
print "Experiment total time:", str(datetime.timedelta(seconds=int(time.time() - t_start)))
print "KILL VREPs"
pyvrep.spawn.killall_vrep()
#
#
# pref = '../logs/'
#
# log_dirs = [config.log_dir for config in configs]
#
# for log_dir in log_dirs:
# with open(pref + log_dir + '/{}'.format('log.pickle'), 'r') as f:
# log = cPickle.load(f)
# f.close()
#
# drawer = Drawer(log)
# drawer.plot_learning_curve()
# drawer.plot_scatter2D()
#
#
# plt.show()
#
#
#
#
# log_dir = experiment.log_dir
# import cPickle
# with open(log_dir + '/{}'.format('log.pickle'), 'r') as f:
# log = cPickle.load(f)
# f.close()
| gpl-3.0 | -1,628,991,086,286,354,700 | 36.409091 | 134 | 0.55245 | false |
VisheshHanda/production_backup | erpnext/shopping_cart/doctype/shopping_cart_settings/shopping_cart_settings.py | 23 | 3878 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import comma_and
from frappe.model.document import Document
from frappe.utils import get_datetime, get_datetime_str, now_datetime
class ShoppingCartSetupError(frappe.ValidationError): pass
class ShoppingCartSettings(Document):
def onload(self):
self.get("__onload").quotation_series = frappe.get_meta("Quotation").get_options("naming_series")
def validate(self):
if self.enabled:
self.validate_exchange_rates_exist()
def validate_exchange_rates_exist(self):
"""check if exchange rates exist for all Price List currencies (to company's currency)"""
company_currency = frappe.db.get_value("Company", self.company, "default_currency")
if not company_currency:
msgprint(_("Please specify currency in Company") + ": " + self.company,
raise_exception=ShoppingCartSetupError)
price_list_currency_map = frappe.db.get_values("Price List",
[self.price_list],
"currency")
# check if all price lists have a currency
for price_list, currency in price_list_currency_map.items():
if not currency:
frappe.throw(_("Currency is required for Price List {0}").format(price_list))
expected_to_exist = [currency + "-" + company_currency
for currency in price_list_currency_map.values()
if currency != company_currency]
# manqala 20/09/2016: set up selection parameters for query from tabCurrency Exchange
from_currency = [currency for currency in price_list_currency_map.values() if currency != company_currency]
to_currency = company_currency
# manqala end
if expected_to_exist:
# manqala 20/09/2016: modify query so that it uses date in the selection from Currency Exchange.
# exchange rates defined with date less than the date on which this document is being saved will be selected
exists = frappe.db.sql_list("""select CONCAT(from_currency,'-',to_currency) from `tabCurrency Exchange`
where from_currency in (%s) and to_currency = "%s" and date <= curdate()""" % (", ".join(["%s"]*len(from_currency)), to_currency), tuple(from_currency))
# manqala end
missing = list(set(expected_to_exist).difference(exists))
if missing:
msgprint(_("Missing Currency Exchange Rates for {0}").format(comma_and(missing)),
raise_exception=ShoppingCartSetupError)
def validate_tax_rule(self):
if not frappe.db.get_value("Tax Rule", {"use_for_shopping_cart" : 1}, "name"):
frappe.throw(frappe._("Set Tax Rule for shopping cart"), ShoppingCartSetupError)
def get_tax_master(self, billing_territory):
tax_master = self.get_name_from_territory(billing_territory, "sales_taxes_and_charges_masters",
"sales_taxes_and_charges_master")
return tax_master and tax_master[0] or None
def get_shipping_rules(self, shipping_territory):
return self.get_name_from_territory(shipping_territory, "shipping_rules", "shipping_rule")
def validate_cart_settings(doc, method):
frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings").run_method("validate")
def get_shopping_cart_settings():
if not getattr(frappe.local, "shopping_cart_settings", None):
frappe.local.shopping_cart_settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
return frappe.local.shopping_cart_settings
def is_cart_enabled():
return get_shopping_cart_settings().enabled
def show_quantity_in_website():
return get_shopping_cart_settings().show_quantity_in_website
def check_shopping_cart_enabled():
if not get_shopping_cart_settings().enabled:
frappe.throw(_("You need to enable Shopping Cart"), ShoppingCartSetupError)
def show_attachments():
return get_shopping_cart_settings().show_attachments
| gpl-3.0 | -3,614,599,071,228,152,000 | 39.821053 | 156 | 0.740846 | false |
ddboline/pylearn2 | setup.py | 1 | 3987 | from __future__ import print_function
import warnings
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
import numpy
from theano.compat.six.moves import input
# Because many people neglected to run the pylearn2/utils/setup.py script
# separately, we compile the necessary Cython extensions here but because
# Cython is not a strict dependency, we issue a warning when it is not
# available.
try:
from Cython.Distutils import build_ext
cython_available = True
except ImportError:
warnings.warn("Cython was not found and hence pylearn2.utils._window_flip "
"and pylearn2.utils._video and classes that depend on them "
"(e.g. pylearn2.train_extensions.window_flip) will not be "
"available")
cython_available = False
if cython_available:
cmdclass = {'build_ext': build_ext}
ext_modules = [Extension("pylearn2.utils._window_flip",
["pylearn2/utils/_window_flip.pyx"],
include_dirs=[numpy.get_include()]),
Extension("pylearn2.utils._video",
["pylearn2/utils/_video.pyx"],
include_dirs=[numpy.get_include()])]
else:
cmdclass = {}
ext_modules = []
# Inform user of setup.py develop preference
class pylearn2_install(install):
def run(self):
print("Because Pylearn2 is under heavy development, we generally do "
"not advice using the `setup.py install` command. Please "
"consider using the `setup.py develop` command instead for the "
"following reasons:\n\n1. Using `setup.py install` creates a "
"copy of the Pylearn2 source code in your Python installation "
"path. In order to update Pylearn2 afterwards you will need to "
"rerun `setup.py install` (!). Simply using `git pull` to "
"update your local copy of Pylearn2 code will not suffice. \n\n"
"2. When using `sudo` to install Pylearn2, all files, "
"including the tutorials, will be copied to a directory owned "
"by root. Not only is running tutorials as root unsafe, it "
"also means that all Pylearn2-related environment variables "
"which were defined for the user will be unavailable.\n\n"
"Pressing enter will continue the installation of Pylearn2 in "
"`develop` mode instead. Note that this means that you need to "
"keep this folder with the Pylearn2 code in its current "
"location. If you know what you are doing, and are very sure "
"that you want to install Pylearn2 using the `install` "
"command instead, please type `install`.\n")
mode = 'install'
#while mode not in ['', 'install', 'develop', 'cancel']:
#if mode is not None:
#print("Please try again")
#mode = input("Installation mode: [develop]/install/cancel: ")
if mode in ['', 'develop']:
self.distribution.run_command('develop')
if mode == 'install':
return install.run(self)
cmdclass.update({'install': pylearn2_install})
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='pylearn2',
version='0.1dev',
packages=find_packages(),
description='A machine learning library built on top of Theano.',
license='BSD 3-clause license',
long_description=open('README.rst', 'rb').read().decode('utf8'),
dependency_links=['git+http://github.com/Theano/Theano.git#egg=Theano'],
install_requires=['numpy>=1.5', 'pyyaml', 'argparse', "Theano"],
scripts=['bin/pylearn2-plot-monitor', 'bin/pylearn2-print-monitor',
'bin/pylearn2-show-examples', 'bin/pylearn2-show-weights',
'bin/pylearn2-train'],
package_data={
'': ['*.cu', '*.cuh', '*.h'],
},
)
| bsd-3-clause | -7,392,977,645,857,930,000 | 44.827586 | 79 | 0.619513 | false |
lcy-seso/Paddle | python/paddle/fluid/tests/unittests/test_mean_op.py | 5 | 1070 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestMeanOp(OpTest):
def setUp(self):
self.op_type = "mean"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self):
self.check_output()
def test_checkout_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -4,405,959,967,995,606,500 | 30.470588 | 74 | 0.68785 | false |
iwebhub/dotfiles | setup.py | 1 | 4004 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import operator
print 'Postinstall wizard'
print 'za Ubuntu/Debian'
print 'Za nastavak unesite Y, a za izlaz X'
choice = raw_input()
if choice == "X":
print 'izlazim'
exit()
elif choice == "Y":
print 'ok, nastavljam'
else:
print 'nevaljan izbor, izlazim'
exit()
print 'update repozitorija slijedi'
proc = subprocess.Popen('sudo apt-get update -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'repozitoriji ažurirani'
else:
print out
print err
print 'greška'
print 'upgrade distribucije slijedi'
proc = subprocess.Popen('sudo apt-get upgrade -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'distribucija upgradeana'
else:
print out
print err
print 'greška'
print 'instaliram qbittorrent'
proc = subprocess.Popen('sudo apt-get install qbittorrent -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram vlc'
proc = subprocess.Popen('sudo apt-get install vlc -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram firefox'
proc = subprocess.Popen('sudo apt-get install firefox -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram chromium'
proc = subprocess.Popen('sudo apt-get install chromium -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram libreoffice'
proc = subprocess.Popen('sudo apt-get install libreoffice -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram pidgin'
proc = subprocess.Popen('sudo apt-get install pidgin -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram gimp'
proc = subprocess.Popen('sudo apt-get install gimp -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram geogebra'
proc = subprocess.Popen('sudo apt-get install geogebra -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram smplayer'
proc = subprocess.Popen('sudo apt-get install smplayer -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'instaliram gparted'
proc = subprocess.Popen('sudo apt-get install gparted -y', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if err=='':
print out
print 'aplikacija instalirana'
else:
print out
print err
print 'greška'
print 'POSTINSTALL ZAVRŠEN'
print 'kraj programa'
| mit | 5,720,968,160,988,043,000 | 23.036145 | 145 | 0.745363 | false |
nicholasmalaya/paleologos | disputatio/routines/vanes/combo.py | 2 | 8664 | #!/bin/py
#
# interpolate over data field with 2d polynomial fit
#
# fit a 2D, 3rd order polynomial to data
# estimate the 16 coefficients using all of your data points.
#
# http://stackoverflow.com/questions/18832763/drawing-directions-fields
#
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import itertools
import matplotlib.pyplot as plt
from scipy import integrate
from scipy.integrate import ode
#
# adding functions
#
import top
hprime = -12
Rprime = 3.0
def load_ell():
#
# Generate Data from ellipses
#
h = hprime
thetaf = 20*np.pi/180.
a = -h*1.0
miny = -1
#
# create data
#
space = 0.02
R = Rprime
y0 = np.arange(Rprime,miny,-space)
x0 = -np.sqrt(R*R-y0*y0)
theta0 = np.arctan2(y0,x0)
thetafy = thetaf*(R-y0)/R
#thetafy = thetaf*np.arccos(y0/R)/2.
thetam = theta0-np.pi/2-thetafy
m = np.tan(thetam)
k = (y0 + a*a*m/(x0-h) - m*(x0-h))
bs = -a*a*m*(y0-k)/(x0-h)
b = np.sqrt(bs)
xl = []
yl = []
zl = []
print 'y0 ', y0
print 'b/a: ',b/a
fudge = 0.05
dx_space=0.1
for i in xrange(len(k)):
dx = np.arange(h,x0[i]+fudge,dx_space)
xl = xl + dx.tolist()
dy = -(b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i]
#yl.append(-(b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i])
yl = yl + dy.tolist()
#zl.append(np.arctan(dy/dx))
if(i == 0):
m = np.zeros(len(dy))
else:
m = -b[i]*b[i]*(dx-h)/((dy-k[i])*(a*a))
zl = zl + m.tolist()
#
# convert to numpy array
#
x = np.asarray(xl)
y = np.asarray(yl)
z = np.asarray(zl)
#
# steady as she goes
#
return x,y,z
def vf(t,x,m):
#
# Vector field function
#
dx=np.zeros(2)
zz = polyval2d(x[0], x[1], m)
theta = np.arctan(zz)
dx[0]=np.cos(theta)
dx[1]=np.sin(theta)
#dx[1]=x[0]**2-x[0]-2
#polyval2d(xx, yy, m)
#dx[1]=polyval2d(xx, yy, m)
return dx
def arr(m):
#
# Solution curves
#
h = hprime
#ic=[[h,-4],[h,-1],[h,1],[h,-8]]
ic=[[h,-3.4],[h,-0.1],[h,-8]]
end = [2,2,2,2]
t0=0; dt=0.1;
r = ode(vf).set_integrator('vode', method='bdf',max_step=dt)
for k in range(len(ic)):
tEnd=np.sqrt(ic[k][0]**2 + ic[k][1]**2)-end[k]
Y=[];T=[];S=[];
r.set_initial_value(ic[k], t0).set_f_params(m)
while r.successful() and r.t +dt < tEnd:
r.integrate(r.t+dt)
Y.append(r.y)
S=np.array(np.real(Y))
plt.plot(S[:,0],S[:,1], color = 'red', lw = 4.25)
plt.hlines(Rprime, hprime, 0, color='red',lw = 4.25)
def polyfit2d(x, y, z, order=5):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
#
cnd=1e-5
#m, _, _, _ = np.linalg.lstsq(G, z,rcond=cnd)
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
#print a,i,j,tmp,z
return z
def polyval2d_disp(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
print a,i,j,tmp,z
return z
#
#
#
def poly_disp_fparse(m):
print "#"
print "# Polynomial Interpolation Function"
print "#"
print "slope_func = '"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' %.15f * x^%i * y^%i +' % (a,i,j )
else:
print " %.15f * x^%i * y^%i'" % (a,i,j )
print
return 0
#
#
#
def poly_disp_py(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
print "return ",
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print '%.15f * x**%i * y**%i +' % (a,i,j ),
else:
print "%.15f * x**%i * y**%i" % (a,i,j ),
print
return 0
#
#
#
def poly_disp_py_line(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' tmp += %.15f * x**%i * y**%i' % (a,i,j )
print ' print tmp'
else:
print " tmp += %.15f * x**%i * y**%i" % (a,i,j )
print ' print tmp'
print
return 0
def load_ex():
#
# Generate Example Data
#
numdata = 100
x = np.random.random(numdata)
y = np.random.random(numdata)
#
# silly fake function for z
#
z = x**2 + y**2 + 3*x**3 + y + np.random.random(numdata)
return x,y,z
#
# main function: execute
#
def main():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# ----------------------------------------
#
# load data in
#
x,y,z=load_ell()
#x,y,z=load_ex()
#
# Fit polynomial
#
m = polyfit2d(x,y,z)
#
# Evaluate it on a grid...
#
nx, ny = 200, 200
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zz = polyval2d(xx, yy, m)
#
# m is a matrix of polynomial values...
# e.g.
#
# Plot!
#
arr(m)
#
# ----------------------------------------
xt,yt,zt = top.load_ell()
mt = polyfit2d(xt,yt,zt)
nxt, nyt = 200, 200
xxt, yyt = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zzt = top.polyval2d(xxt, yyt, mt)
top.arr(mt)
top.poly_disp_fparse(m)
#
# ----------------------------------------
plt.suptitle("SoV Configuration: Top Tier")
plt.title("Seven Vane")
xmin = -15
xmax = 6
ymin = -10
ymax = 14
major_ticksx = np.arange(xmin, xmax, 5)
minor_ticksx = np.arange(xmin, xmax, 1)
major_ticksy = np.arange(ymin, ymax, 5)
minor_ticksy = np.arange(ymin, ymax, 1)
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.xlabel('Streamwise (X) [Meters]')
plt.ylabel('Spanwise (Y) [Meters]')
plt.grid()
# add circle
R = Rprime
circle=plt.Circle((0,0),R,color='black',linestyle='dotted',fill=False,linewidth=4)
from matplotlib.patches import Ellipse, Arc
ellipse = Arc([0.0,0.0],2*Rprime,2*Rprime,0,180,0,color='black', linewidth='5.0')
ax.add_patch(ellipse)
# adding text
#
ax.text(-20, 15, r'Upstream', fontsize=15)
ax.text(5, 15, r'Downstream', fontsize=15)
# angles
ax.text(-2, 1, r'$\theta^{t,r}$', fontsize=15,color='blue')
ax.text(3, 2, r'$\theta^{t,l}$', fontsize=15,color='blue')
ax.text(-12, -3, r'$\phi^{t,r}$', fontsize=15,color='blue')
ax.text(-12, 10, r'$\phi^{t,l}$', fontsize=15,color='blue')
# outer and inner radius
ax.annotate(r'$L_{x}$', xy=(-12,0), xytext=(0, 0),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
ax.annotate(r'cylinder', xy=(2,-3), xytext=(6,-7),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
ax.annotate(r'$L^{t,r}$', xy=(-12.5,-8.5), xytext=(-12.5, 4.0),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
ax.annotate(r'$L^{t,l}$', xy=(-13, 3), xytext=(-13,13),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=15)
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.axes().set_aspect('equal', 'datalim')
plt.savefig('interp_entire_top.png')
plt.savefig('interp_entire_top.pdf', format='pdf', dpi=1000)
#
# output polynomial for input
#
poly_disp_fparse(m)
#
# EXECUTE
#
main()
#
# nick
# 4/28/16
#
| mit | -839,450,253,255,232,900 | 22.543478 | 89 | 0.498384 | false |
shootstar/novatest | nova/api/openstack/compute/contrib/server_diagnostics.py | 13 | 2512 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
sd_nsmap = {None: wsgi.XMLNS_V11}
class ServerDiagnosticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('diagnostics')
elem = xmlutil.SubTemplateElement(root, xmlutil.Selector(0),
selector=xmlutil.get_items)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=sd_nsmap)
class ServerDiagnosticsController(object):
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.NotFound():
raise webob.exc.HTTPNotFound(_("Instance not found"))
return compute_api.get_diagnostics(context, instance)
class Server_diagnostics(extensions.ExtensionDescriptor):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
namespace = ("http://docs.openstack.org/compute/ext/"
"server-diagnostics/api/v1.1")
updated = "2011-12-21T00:00:00+00:00"
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
#NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
return [ext]
| apache-2.0 | -4,403,356,443,584,771,000 | 36.492537 | 78 | 0.67078 | false |
syphar/django | tests/template_tests/syntax_tests/test_for.py | 11 | 8082 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class ForTagTests(SimpleTestCase):
libraries = {'custom': 'template_tests.templatetags.custom'}
@setup({'for-tag01': '{% for val in values %}{{ val }}{% endfor %}'})
def test_for_tag01(self):
output = self.engine.render_to_string('for-tag01', {'values': [1, 2, 3]})
self.assertEqual(output, '123')
@setup({'for-tag02': '{% for val in values reversed %}{{ val }}{% endfor %}'})
def test_for_tag02(self):
output = self.engine.render_to_string('for-tag02', {'values': [1, 2, 3]})
self.assertEqual(output, '321')
@setup({'for-tag-vars01': '{% for val in values %}{{ forloop.counter }}{% endfor %}'})
def test_for_tag_vars01(self):
output = self.engine.render_to_string('for-tag-vars01', {'values': [6, 6, 6]})
self.assertEqual(output, '123')
@setup({'for-tag-vars02': '{% for val in values %}{{ forloop.counter0 }}{% endfor %}'})
def test_for_tag_vars02(self):
output = self.engine.render_to_string('for-tag-vars02', {'values': [6, 6, 6]})
self.assertEqual(output, '012')
@setup({'for-tag-vars03': '{% for val in values %}{{ forloop.revcounter }}{% endfor %}'})
def test_for_tag_vars03(self):
output = self.engine.render_to_string('for-tag-vars03', {'values': [6, 6, 6]})
self.assertEqual(output, '321')
@setup({'for-tag-vars04': '{% for val in values %}{{ forloop.revcounter0 }}{% endfor %}'})
def test_for_tag_vars04(self):
output = self.engine.render_to_string('for-tag-vars04', {'values': [6, 6, 6]})
self.assertEqual(output, '210')
@setup({'for-tag-vars05': '{% for val in values %}'
'{% if forloop.first %}f{% else %}x{% endif %}{% endfor %}'})
def test_for_tag_vars05(self):
output = self.engine.render_to_string('for-tag-vars05', {'values': [6, 6, 6]})
self.assertEqual(output, 'fxx')
@setup({'for-tag-vars06': '{% for val in values %}'
'{% if forloop.last %}l{% else %}x{% endif %}{% endfor %}'})
def test_for_tag_vars06(self):
output = self.engine.render_to_string('for-tag-vars06', {'values': [6, 6, 6]})
self.assertEqual(output, 'xxl')
@setup({'for-tag-unpack01': '{% for key,value in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack01(self):
output = self.engine.render_to_string('for-tag-unpack01', {'items': (('one', 1), ('two', 2))})
self.assertEqual(output, 'one:1/two:2/')
@setup({'for-tag-unpack03': '{% for key, value in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack03(self):
output = self.engine.render_to_string('for-tag-unpack03', {'items': (('one', 1), ('two', 2))})
self.assertEqual(output, 'one:1/two:2/')
@setup({'for-tag-unpack04': '{% for key , value in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack04(self):
output = self.engine.render_to_string('for-tag-unpack04', {'items': (('one', 1), ('two', 2))})
self.assertEqual(output, 'one:1/two:2/')
@setup({'for-tag-unpack05': '{% for key ,value in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack05(self):
output = self.engine.render_to_string('for-tag-unpack05', {'items': (('one', 1), ('two', 2))})
self.assertEqual(output, 'one:1/two:2/')
@setup({'for-tag-unpack06': '{% for key value in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('for-tag-unpack06', {'items': (('one', 1), ('two', 2))})
@setup({'for-tag-unpack07': '{% for key,,value in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('for-tag-unpack07', {'items': (('one', 1), ('two', 2))})
@setup({'for-tag-unpack08': '{% for key,value, in items %}{{ key }}:{{ value }}/{% endfor %}'})
def test_for_tag_unpack08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('for-tag-unpack08', {'items': (('one', 1), ('two', 2))})
@setup({'for-tag-unpack09': '{% for val in items %}{{ val.0 }}:{{ val.1 }}/{% endfor %}'})
def test_for_tag_unpack09(self):
"""
Ensure that a single loopvar doesn't truncate the list in val.
"""
output = self.engine.render_to_string('for-tag-unpack09', {'items': (('one', 1), ('two', 2))})
self.assertEqual(output, 'one:1/two:2/')
@setup({'for-tag-unpack13': '{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}'})
def test_for_tag_unpack13(self):
output = self.engine.render_to_string(
'for-tag-unpack13', {'items': (('one', 1, 'carrot'), ('two', 2, 'cheese'))}
)
if self.engine.string_if_invalid:
self.assertEqual(output, 'one:1,carrot/two:2,cheese/')
else:
self.assertEqual(output, 'one:1,carrot/two:2,cheese/')
@setup({'for-tag-empty01': '{% for val in values %}{{ val }}{% empty %}empty text{% endfor %}'})
def test_for_tag_empty01(self):
output = self.engine.render_to_string('for-tag-empty01', {'values': [1, 2, 3]})
self.assertEqual(output, '123')
@setup({'for-tag-empty02': '{% for val in values %}{{ val }}{% empty %}values array empty{% endfor %}'})
def test_for_tag_empty02(self):
output = self.engine.render_to_string('for-tag-empty02', {'values': []})
self.assertEqual(output, 'values array empty')
@setup({'for-tag-empty03': '{% for val in values %}'
'{{ val }}{% empty %}values array not found{% endfor %}'})
def test_for_tag_empty03(self):
output = self.engine.render_to_string('for-tag-empty03')
self.assertEqual(output, 'values array not found')
@setup({'for-tag-filter-ws': "{% load custom %}{% for x in s|noop:'x y' %}{{ x }}{% endfor %}"})
def test_for_tag_filter_ws(self):
"""
#19882
"""
output = self.engine.render_to_string('for-tag-filter-ws', {'s': 'abc'})
self.assertEqual(output, 'abc')
@setup({'for-tag-unpack-strs': '{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}'})
def test_for_tag_unpack_strs(self):
output = self.engine.render_to_string('for-tag-unpack-strs', {'items': ('ab', 'ac')})
self.assertEqual(output, 'a:b/a:c/')
@setup({'for-tag-unpack10': '{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}'})
def test_for_tag_unpack10(self):
with self.assertRaisesMessage(ValueError, 'Need 2 values to unpack in for loop; got 3.'):
self.engine.render_to_string(
'for-tag-unpack10',
{'items': (('one', 1, 'carrot'), ('two', 2, 'orange'))},
)
@setup({'for-tag-unpack11': '{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}'})
def test_for_tag_unpack11(self):
with self.assertRaisesMessage(ValueError, 'Need 3 values to unpack in for loop; got 2.'):
self.engine.render_to_string(
'for-tag-unpack11',
{'items': (('one', 1), ('two', 2))},
)
@setup({'for-tag-unpack12': '{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}'})
def test_for_tag_unpack12(self):
with self.assertRaisesMessage(ValueError, 'Need 3 values to unpack in for loop; got 2.'):
self.engine.render_to_string(
'for-tag-unpack12',
{'items': (('one', 1, 'carrot'), ('two', 2))}
)
@setup({'for-tag-unpack14': '{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}'})
def test_for_tag_unpack14(self):
with self.assertRaisesMessage(ValueError, 'Need 2 values to unpack in for loop; got 1.'):
self.engine.render_to_string('for-tag-unpack14', {'items': (1, 2)})
| bsd-3-clause | -2,457,779,798,460,977,000 | 49.198758 | 108 | 0.557659 | false |
intel/intel-iot-refkit | meta-iotqa/lib/oeqa/runtime/programming/python/apprt_python_runtime.py | 6 | 5973 | # -*- coding: utf8 -*-
import os
import time
from oeqa.oetest import oeRuntimeTest
class PythonRuntimeTest(oeRuntimeTest):
'''
This test suite tests whether some basic and key Python modules work well.
Notice:
Python upstream tests have been already present in an image.
The path generally be: /usr/lib/python2.7/test
@class PythonRuntimeTest
'''
python_modules = {
'os': 'test_os.py',
'sys': 'test_sys.py',
'string': 'test_string.py',
'time': 'test_time.py',
're': 'test_re.py',
'shutil': 'test_shutil.py',
'inspect': 'test_inspect.py',
'subprocess': 'test_subprocess.py',
'unittest': 'test_unittest.py',
'logging': 'test_logging.py',
'ConfigParser': 'test_cfgparser.py',
'OptionParser': 'test_optparse.py',
'csv': 'test_csv.py',
'StringIO': 'test_StringIO.py',
'json': 'test_json.py',
'traceback': 'test_traceback.py'
}
test_mod_log = {}
results_python_runtime = 'results-python-runtime.log'
@classmethod
def setUpClass(cls):
'''
Clean the workspace before run all the test cases.
:return:
@fn setUpClass
@param cls
@return
'''
if os.path.exists(cls.results_python_runtime):
os.remove(cls.results_python_runtime)
def test_python_runtime(self):
'''
Test the Python key standard modules.
:return:
@fn test_python_runtime
@param self
@return
'''
for mod_name, test_mode_file in self.python_modules.items():
(status, output) = self.target.run(
'cd /usr/lib/python2.7/test;python %s' % test_mode_file
)
self.test_mod_log[mod_name] = output.strip().splitlines()
@classmethod
def tearDownClass(cls):
'''
Generate the final result output with specified format.
:return:
@fn tearDownClass
@param cls
@return
'''
parse_all_tc(cls.test_mod_log, cls.results_python_runtime)
def line_contains_result(line):
'''
Check whether a line contains the results of 'ok', 'FAIL', 'skipped', 'ERROR'
:param line: A list of one line in the test case log files
:return: True if the list line contains any test cases running information.
@fn line_contains_result
@return
'''
return ('ok' in line) or \
('FAIL' in line) or \
('skipped' in line) or \
('ERROR' in line)
def write_tc_name(tc_result, tc, mod_name):
'''
Get the test case result of tc_result list and append 'mod_name: tc_name'
to tc list. The tc_name should be the first element of tc_result.
:param tc_result:A list of one line in the test case log file.
:param tc:A list of two elements: [test case name, result]
:param mod_name:The module name of a test case
:return:
@fn write_tc_name
@return
'''
if tc_result:
tc_name = tc_result[0]
tc_detailed_name = tc_result[1].strip('()').split('.')[-1]
tc.append('%s.%s.%s' % (mod_name, tc_detailed_name, tc_name))
def write_tc_result(tc_result, tc):
'''
Get the test case result of tc_result list and append it to tc list.
:param tc_result: A list of one line in the test case log file.
:param tc: A list of two elements: [test case name, result]
:return:
@fn write_tc_result
@return
'''
if 'ok' in tc_result or 'OK' in tc_result:
tc.append('PASSED')
elif 'FAIL' in tc_result or 'fail' in tc_result:
tc.append('FAILED')
elif 'ERROR' in tc_result or 'error' in tc_result:
tc.append('ERROR')
else:
tc.append(None)
def parse_all_tc(mod_log, result_file):
'''
Read all the test cases results. It supports both regular
and unregular format.
:param mod_log: The mod:log dictionary
:param result_file: The final python runtime test case report file.
:return:
@fn parse_all_tc
@return
'''
tc_results = []
for mod, lines in mod_log.items():
for line_no, line in enumerate(lines):
tc = []
if line.startswith('test'):
tc_result = line.strip().split()
if ('...' in tc_result) :
if line_contains_result(tc_result):
write_tc_name(tc_result, tc, mod)
write_tc_result(tc_result, tc)
else:
if line_no < len(lines) - 1:
next_tc_result = lines[line_no + 1].strip().split()
write_tc_name(tc_result, tc, mod)
write_tc_result(next_tc_result, tc)
elif line_no < len(lines) - 1 and \
'...' in lines[line_no + 1] and \
(not lines[line_no + 1].startswith('test')):
tc_result = line.strip().split()
next_tc_result = lines[line_no + 1].strip().split()
write_tc_name(tc_result, tc, mod)
write_tc_result(next_tc_result, tc)
elif line_no < len(lines) - 1 and \
'...' in lines[line_no + 1] and \
(not lines[line_no + 1].startswith('test')):
tc_result = line.strip().split()
next_tc_result = lines[line_no + 1].strip().split()
write_tc_name(tc_result, tc, mod)
write_tc_result(next_tc_result, tc)
if None not in tc and tc != []:
tc_results.append(tc)
with open(result_file, 'w') as results_f:
for t in tc_results:
if None not in t and t != []:
results_f.write('%s - runtest.py - RESULTS - Testcase %s: %s\n' %
(time.strftime('%H:%M:%S'), t[0], t[1])
)
| mit | 9,182,438,659,550,893,000 | 31.286486 | 82 | 0.537753 | false |
hirose31/ganglia-pymodule_skeleton | skel_simple.py | 1 | 2607 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
descriptors = list()
Desc_Skel = {}
Debug = False
def dprint(f, *v):
if Debug:
print >>sys.stderr, "DEBUG: "+f % v
# ここで値を作って返す
def metric_of(name):
dprint("%s", name)
if name == "foo":
return 1
elif name == "bar":
return 2
else:
return 9
def metric_init(params):
global descriptors, Desc_Skel, Debug
print '[skel_simple] fixme'
print params
# initialize skeleton of descriptors
# uint は unsigned int にキャストされるので、4294967295(4G) が上限になる?
# gmond/modules/python/mod_python.c
Desc_Skel = {
'name' : 'fixme TBD',
'call_back' : metric_of,
'time_max' : 60,
# value_typeとformatは型を合わせること
'value_type' : 'uint', # string | uint | float | double
'format' : '%d', # %s | %d | %f | %f
'units' : 'fixme',
'slope' : 'fixme zero|positive|negative|both',
'description' : 'fixme TBD',
'groups' : 'fixme network',
}
if "refresh_rate" not in params:
params["refresh_rate"] = 10
if "debug" in params:
Debug = params["debug"]
dprint("%s", "Debug mode on")
# IP:HOSTNAME
if "spoof_host" in params:
Desc_Skel["spoof_host"] = params["spoof_host"]
descriptors.append(create_desc(Desc_Skel, {
"name" : "foo",
"value_type" : "float",
"format" : "%.3f",
"units" : "req/sec",
"description": "request per second",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "bar",
"value_type" : "uint",
"format" : "%d",
"units" : "bytes/sec",
"description": "byte per sec",
}))
return descriptors
def create_desc(skel, prop):
d = skel.copy()
for k,v in prop.iteritems():
d[k] = v
return d
def metric_cleanup():
pass
if __name__ == '__main__':
params = {
"device": "eth0",
"host" : "localhost",
"debug" : True,
}
metric_init(params)
# for d in descriptors:
# print ''' metric {
# name = "%s"
# title = "%s"
# value_threshold = 0
# }''' % (d["name"], d["description"])
for d in descriptors:
v = d['call_back'](d['name'])
print ('value for %s is '+d['format']) % (d['name'], v)
| apache-2.0 | 7,990,513,699,839,484,000 | 24.31 | 64 | 0.478072 | false |
bakatrouble/elevators | ui/applications/application_wizard.py | 1 | 5674 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'application_wizard.ui'
#
# Created by: PyQt5 UI code generator 5.8.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ApplicationWizard(object):
def setupUi(self, ApplicationWizard):
ApplicationWizard.setObjectName("ApplicationWizard")
ApplicationWizard.resize(400, 300)
self.wizardPage1 = QtWidgets.QWizardPage()
self.wizardPage1.setObjectName("wizardPage1")
self.verticalLayout = QtWidgets.QVBoxLayout(self.wizardPage1)
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtWidgets.QLabel(self.wizardPage1)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.edtDate = QtWidgets.QDateEdit(self.wizardPage1)
self.edtDate.setCalendarPopup(True)
self.edtDate.setObjectName("edtDate")
self.verticalLayout.addWidget(self.edtDate)
self.label = QtWidgets.QLabel(self.wizardPage1)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.cmbApplicationType = QtWidgets.QComboBox(self.wizardPage1)
self.cmbApplicationType.setObjectName("cmbApplicationType")
self.verticalLayout.addWidget(self.cmbApplicationType)
self.label_2 = QtWidgets.QLabel(self.wizardPage1)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lblClient = QtWidgets.QLabel(self.wizardPage1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblClient.sizePolicy().hasHeightForWidth())
self.lblClient.setSizePolicy(sizePolicy)
self.lblClient.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.lblClient.setFrameShadow(QtWidgets.QFrame.Sunken)
self.lblClient.setObjectName("lblClient")
self.horizontalLayout_2.addWidget(self.lblClient)
self.btnSelectClient = QtWidgets.QToolButton(self.wizardPage1)
self.btnSelectClient.setObjectName("btnSelectClient")
self.horizontalLayout_2.addWidget(self.btnSelectClient)
self.verticalLayout.addLayout(self.horizontalLayout_2)
ApplicationWizard.addPage(self.wizardPage1)
self.wizardPage2 = QtWidgets.QWizardPage()
self.wizardPage2.setObjectName("wizardPage2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.wizardPage2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lblTableTitle = QtWidgets.QLabel(self.wizardPage2)
self.lblTableTitle.setObjectName("lblTableTitle")
self.verticalLayout_2.addWidget(self.lblTableTitle)
self.tblElevatorsData = QtWidgets.QTableView(self.wizardPage2)
self.tblElevatorsData.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tblElevatorsData.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.tblElevatorsData.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.tblElevatorsData.setCornerButtonEnabled(False)
self.tblElevatorsData.setObjectName("tblElevatorsData")
self.tblElevatorsData.verticalHeader().setVisible(False)
self.verticalLayout_2.addWidget(self.tblElevatorsData)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnTableAdd = QtWidgets.QPushButton(self.wizardPage2)
icon = QtGui.QIcon.fromTheme("document-new")
self.btnTableAdd.setIcon(icon)
self.btnTableAdd.setObjectName("btnTableAdd")
self.horizontalLayout.addWidget(self.btnTableAdd)
self.btnTableRemove = QtWidgets.QPushButton(self.wizardPage2)
icon = QtGui.QIcon.fromTheme("edit-delete")
self.btnTableRemove.setIcon(icon)
self.btnTableRemove.setObjectName("btnTableRemove")
self.horizontalLayout.addWidget(self.btnTableRemove)
self.verticalLayout_2.addLayout(self.horizontalLayout)
ApplicationWizard.addPage(self.wizardPage2)
self.retranslateUi(ApplicationWizard)
QtCore.QMetaObject.connectSlotsByName(ApplicationWizard)
def retranslateUi(self, ApplicationWizard):
_translate = QtCore.QCoreApplication.translate
ApplicationWizard.setWindowTitle(_translate("ApplicationWizard", "Заявка"))
self.label_3.setText(_translate("ApplicationWizard", "Дата поступления заявки"))
self.label.setText(_translate("ApplicationWizard", "Тип заявки"))
self.label_2.setText(_translate("ApplicationWizard", "Заказчик"))
self.lblClient.setText(_translate("ApplicationWizard", "<не выбрано>"))
self.btnSelectClient.setText(_translate("ApplicationWizard", "..."))
self.lblTableTitle.setText(_translate("ApplicationWizard", "Адресный список и технические характеристики лифтов"))
self.btnTableAdd.setText(_translate("ApplicationWizard", "Добавить"))
self.btnTableRemove.setText(_translate("ApplicationWizard", "Удалить"))
| gpl-3.0 | -6,174,398,591,411,151,000 | 55.734694 | 122 | 0.740108 | false |
teozkr/Flask-Pushrod | flask_pushrod/renderers/base.py | 1 | 2987 | from werkzeug.exceptions import NotAcceptable
from flask import current_app, Response
from functools import wraps
class UnrenderedResponse(object):
"""
Holds basic response data from the view function until it is processed by the renderer.
"""
#: The class to construct with the rendered response, defaults to :class:`flask.Response`.
rendered_class = Response
def __init__(self, response=None, status=None, headers=None):
self.response = response
self.status = status
self.headers = headers
def rendered(self, rendered_response, mime_type):
"""
Constructs a :attr:`rendered_class` (:class:`flask.Response` by default) based on the response parameters.
"""
return self.rendered_class(rendered_response,
self.status, self.headers,
mime_type)
def renderer(name=None, mime_type=None, normalize=True):
"""
Flags a function as a Pushrod renderer.
.. note::
Before it is recognized by :meth:`flask.ext.pushrod.Pushrod.get_renderers_for_request` (and, by extension, :meth:`~flask.ext.pushrod.Pushrod.render_response`) it must be registered to the app's :class:`~flask.ext.pushrod.Pushrod` instance (using :meth:`~flask.ext.pushrod.Pushrod.register_renderer`, or passed as part of the ``renderers`` argument to the :class:`~flask.ext.pushrod.Pushrod` constructor).
:param name: A :obj:`basestring` or a tuple of basestrings to match against when explicitly requested in the query string
:param mime_type: A :obj:`basestring` or a tuple of basestrings to match against against when using HTTP content negotiation
:param normalize: If True then the unrendered response will be passed through :meth:`flask.ext.pushrod.Pushrod.normalize`
"""
if not name: # pragma: no cover
name = ()
if isinstance(name, basestring):
name = (name,)
if not mime_type: # pragma: no cover
mime_type = ()
if isinstance(mime_type, basestring):
mime_type = (mime_type,)
def decorator(f):
f._is_pushrod_renderer = True
f.renderer_names = name
f.renderer_mime_types = mime_type
@wraps(f)
def wrapper(unrendered, **kwargs):
if normalize:
unrendered.response = current_app.extensions['pushrod'].normalize(unrendered.response)
return f(unrendered, **kwargs)
return wrapper
return decorator
class RendererNotFound(NotAcceptable):
"""
Thrown when no acceptable renderer can be found, see :meth:`flask.ext.pushrod.Pushrod.get_renderers_for_request`.
.. note::
This class inherits from :exc:`werkzeug.exceptions.NotAcceptable`, so it's automatically converted to ``406 Not Acceptable`` by Werkzeug if not explicitly handled (which is usually the intended behaviour).
"""
def __init__(self):
super(RendererNotFound, self).__init__(
u"The requested renderer does not exist")
| mit | 7,854,506,730,519,434,000 | 36.810127 | 411 | 0.675929 | false |
bingopodcast/bingos | bingo_emulator/graphics/orient.py | 1 | 58676 |
import pygame
import random
pygame.display.set_caption("Multi Bingo")
screen = pygame.display.set_mode((0,0))
screen.fill([0,0,0])
pygame.mouse.set_visible(False)
meter = pygame.image.load('graphics/assets/black_register_cover.png').convert()
odds = pygame.image.load('orient/assets/odds.png').convert_alpha()
eb = pygame.image.load('orient/assets/eb.png').convert_alpha()
eb_number = pygame.image.load('orient/assets/eb_number.png').convert_alpha()
extra_balls = pygame.image.load('orient/assets/extra_balls.png').convert_alpha()
time = pygame.image.load('orient/assets/time.png').convert_alpha()
ml_letter = pygame.image.load('orient/assets/ml_letter.png').convert_alpha()
ml_arrow = pygame.image.load('orient/assets/ml_arrow.png').convert_alpha()
ml_a = pygame.image.load('orient/assets/ml_a.png').convert_alpha()
select_now = pygame.image.load('orient/assets/select_now.png').convert_alpha()
tilt = pygame.image.load('orient/assets/tilt.png').convert_alpha()
button = pygame.image.load('orient/assets/pap.png').convert_alpha()
red_double = pygame.image.load('orient/assets/red_double.png').convert_alpha()
green_double = pygame.image.load('orient/assets/green_double.png').convert_alpha()
yellow_double = pygame.image.load('orient/assets/yellow_double.png').convert_alpha()
blue_double = pygame.image.load('orient/assets/blue_double.png').convert_alpha()
four_stars = pygame.image.load('orient/assets/four_stars.png').convert_alpha()
six_stars = pygame.image.load('orient/assets/six_stars.png').convert_alpha()
three_stars = pygame.image.load('orient/assets/three_stars.png').convert_alpha()
three_red = pygame.image.load('orient/assets/three_red.png').convert_alpha()
two_red = pygame.image.load('orient/assets/two_red.png').convert_alpha()
red_letter = pygame.image.load('orient/assets/red_letter.png').convert_alpha()
letter1 = pygame.image.load('orient/assets/letter1.png').convert_alpha()
letter2 = pygame.image.load('orient/assets/letter2.png').convert_alpha()
letter3 = pygame.image.load('orient/assets/letter3.png').convert_alpha()
letter4 = pygame.image.load('orient/assets/letter4.png').convert_alpha()
letter5 = pygame.image.load('orient/assets/letter5.png').convert_alpha()
letter6 = pygame.image.load('orient/assets/letter6.png').convert_alpha()
red_letter1 = pygame.image.load('orient/assets/red_letter1.png').convert_alpha()
red_letter2 = pygame.image.load('orient/assets/red_letter2.png').convert_alpha()
red_letter3 = pygame.image.load('orient/assets/red_letter3.png').convert_alpha()
red_letter4 = pygame.image.load('orient/assets/red_letter4.png').convert_alpha()
red_letter5 = pygame.image.load('orient/assets/red_letter5.png').convert_alpha()
red_letter6 = pygame.image.load('orient/assets/red_letter6.png').convert_alpha()
number_card = pygame.image.load('orient/assets/number_card.png').convert_alpha()
number = pygame.image.load('orient/assets/number.png').convert_alpha()
columnb1 = pygame.image.load('orient/assets/columnb1.png').convert_alpha()
columnb2 = pygame.image.load('orient/assets/columnb2.png').convert_alpha()
columna = pygame.image.load('orient/assets/columna.png').convert_alpha()
columnc1 = pygame.image.load('orient/assets/columnc1.png').convert_alpha()
columnc2 = pygame.image.load('orient/assets/columnc2.png').convert_alpha()
double_triple = pygame.image.load('orient/assets/double_triple.png').convert_alpha()
top = pygame.image.load('orient/assets/top.png').convert_alpha()
bottom = pygame.image.load('orient/assets/bottom.png').convert_alpha()
ball = pygame.image.load('orient/assets/ball.png').convert_alpha()
eo = pygame.image.load('orient/assets/eo.png').convert_alpha()
dn = pygame.image.load('orient/assets/dn.png').convert_alpha()
collected = pygame.image.load('orient/assets/collected.png').convert_alpha()
missed = pygame.image.load('orient/assets/missed.png').convert_alpha()
special_odds = pygame.image.load('orient/assets/special_odds.png').convert_alpha()
golden = pygame.image.load('orient/assets/golden.png').convert_alpha()
bg_menu = pygame.image.load('orient/assets/orient_menu.png').convert_alpha()
bg_gi = pygame.image.load('orient/assets/orient_gi.png').convert_alpha()
bg_off = pygame.image.load('orient/assets/orient_off.png').convert_alpha()
class scorereel():
""" Score Reels are used to count replays """
def __init__(self, pos, image):
self.position = pos
self.default_y = self.position[1]
self.image = pygame.image.load(image).convert()
reel1 = scorereel([110,800], "graphics/assets/white_reel.png")
reel10 = scorereel([91,800], "graphics/assets/white_reel.png")
reel100 = scorereel([72,800], "graphics/assets/white_reel.png")
reel1000 = scorereel([53,800], "graphics/assets/white_reel.png")
def display(s, replays=0, menu=False):
meter.set_colorkey((255,0,252))
meter_position = [44,800]
screen.blit(reel1.image, reel1.position)
screen.blit(reel10.image, reel10.position)
screen.blit(reel100.image, reel100.position)
screen.blit(reel1000.image, reel1000.position)
screen.blit(meter, meter_position)
if s.game.line2.position == 0:
p = [233,368]
screen.blit(columnb1, p)
p = [286,369]
screen.blit(columnb2, p)
else:
p = [233,368]
screen.blit(columnb2, p)
p = [286,369]
screen.blit(columnb1, p)
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [337,318]
screen.blit(columna, p)
elif s.game.line1.position == 1:
p = [337,368]
screen.blit(columna, p)
else:
p = [337,269]
screen.blit(columna, p)
if s.game.line3.position == 0:
p = [389,368]
screen.blit(columnc1, p)
p = [440,369]
screen.blit(columnc2, p)
else:
p = [389,368]
screen.blit(columnc2, p)
p = [440,369]
screen.blit(columnc1, p)
nc_p = [228,368]
screen.blit(number_card, nc_p)
backglass_position = [0, 0]
backglass = pygame.Surface(screen.get_size(), flags=pygame.SRCALPHA)
backglass.fill((0, 0, 0))
if menu == True:
screen.blit(bg_menu, backglass_position)
else:
if (s.game.anti_cheat.status == True):
screen.blit(bg_gi, backglass_position)
else:
screen.blit(bg_off, backglass_position)
if s.game.eb_play.status == True:
eb_position = [38,1041]
screen.blit(extra_balls, eb_position)
if s.game.extra_ball.position >= 1:
eb_position = [147,1043]
screen.blit(eb_number, eb_position)
if s.game.extra_ball.position >= 2:
eb_position = [197,1043]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 3:
eb_position = [261,1043]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 4:
eb_position = [322,1043]
screen.blit(eb_number, eb_position)
if s.game.extra_ball.position >= 5:
eb_position = [373,1044]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 6:
eb_position = [434,1043]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 7:
eb_position = [498,1044]
screen.blit(eb_number, eb_position)
if s.game.extra_ball.position >= 8:
eb_position = [548,1045]
screen.blit(eb, eb_position)
if s.game.extra_ball.position >= 9:
eb_position = [612,1044]
screen.blit(eb, eb_position)
if s.game.red_star.status == True:
rs_position = [19,463]
screen.blit(button, rs_position)
if s.game.yellow_star.status == True:
rs_position = [19,507]
screen.blit(button, rs_position)
if s.game.mystic_lines.position >= 4 or s.game.two_red_letter.status == True or s.game.three_red_letter.status == True:
if s.game.selection_feature.position < 7:
bfp = [19,552]
screen.blit(bottom, bfp)
elif s.game.selection_feature.position in [7,8]:
bfp = [19,417]
screen.blit(button, bfp)
elif s.game.selection_feature.position == 9:
bfp = [17,372]
screen.blit(top, bfp)
if s.game.ball_count.position < 1:
if s.game.odds_only.status == True:
b = [20,877]
screen.blit(top, b)
elif s.game.features.status == True:
b = [21,916]
screen.blit(button, b)
elif s.game.special.status == True:
b = [21,1000]
screen.blit(bottom, b)
else:
b = [21,956]
screen.blit(button, b)
if s.game.mystic_lines.position == 1:
p = [204,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 2:
p = [236,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 3:
p = [269,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position >= 4:
p = [295,681]
screen.blit(ml_a, p)
p = [341,595]
screen.blit(ml_letter, p)
if s.game.mystic_lines.position == 5:
p = [334,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 6:
p = [366,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position >= 7:
p = [392,681]
screen.blit(ml_a, p)
p = [265,595]
screen.blit(ml_letter, p)
if s.game.mystic_lines.position == 8:
p = [433,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 9:
p = [463,687]
screen.blit(ml_arrow, p)
if s.game.mystic_lines.position == 10:
p = [489,681]
screen.blit(ml_a, p)
p = [416,595]
screen.blit(ml_letter, p)
if s.game.mystic_lines.position >= 4:
t = 3
if s.game.selection_feature.position in [7,8]:
t = 4
if s.game.selection_feature.position == 9:
t = 5
if s.game.ball_count.position == t:
s.cancel_delayed(name="blink")
blink([s,1,1])
else:
s.cancel_delayed(name="blink")
if s.game.tilt.status == False:
if s.holes:
if 1 in s.holes:
if s.game.line2.position == 0:
p = [287,471]
screen.blit(number, p)
else:
p = [235,471]
screen.blit(number, p)
if 2 in s.holes:
if s.game.line3.position == 0:
p = [389,471]
screen.blit(number, p)
else:
p = [440,471]
screen.blit(number, p)
if 3 in s.holes:
if s.game.line3.position == 0:
p = [389,522]
screen.blit(number, p)
else:
p = [441,521]
screen.blit(number, p)
if 4 in s.holes:
if s.game.line2.position == 0:
p = [287,371]
screen.blit(number, p)
else:
p = [236,371]
screen.blit(number, p)
if 5 in s.holes:
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [336,521]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [337,371]
screen.blit(number, p)
else:
p = [337,471]
screen.blit(number, p)
if 6 in s.holes:
if s.game.line3.position == 0:
p = [389,421]
screen.blit(number, p)
else:
p = [441,419]
screen.blit(number, p)
if 7 in s.holes:
if s.game.line1.position == 0 or s.game.line1.position == 2:
p = [337,370]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [337,420]
screen.blit(number, p)
else:
p = [335,521]
screen.blit(number, p)
if 8 in s.holes:
if s.game.line2.position == 0:
p = [286,421]
screen.blit(number, p)
else:
p = [233,420]
screen.blit(number, p)
if 9 in s.holes:
if s.game.line3.position == 0:
p = [389,369]
screen.blit(number, p)
else:
p = [440,371]
screen.blit(number, p)
if 10 in s.holes:
if s.game.line3.position == 0:
p = [440,521]
screen.blit(number, p)
else:
p = [389,521]
screen.blit(number, p)
if 11 in s.holes:
if s.game.line2.position == 0:
p = [233,420]
screen.blit(number, p)
else:
p = [286,420]
screen.blit(number, p)
if 12 in s.holes:
if s.game.line1.position in [0,2]:
p = [336,419]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [336,471]
screen.blit(number, p)
else:
p = [337,370]
screen.blit(number, p)
if 13 in s.holes:
if s.game.line3.position == 0:
p = [440,420]
screen.blit(number, p)
else:
p = [388,420]
screen.blit(number, p)
if 14 in s.holes:
if s.game.line2.position == 0:
p = [285,521]
screen.blit(number, p)
else:
p = [233,521]
screen.blit(number, p)
if 15 in s.holes:
if s.game.line2.position == 0:
p = [234,470]
screen.blit(number, p)
else:
p = [286,471]
screen.blit(number, p)
if 16 in s.holes:
if s.game.line2.position == 0:
p = [234,521]
screen.blit(number, p)
else:
p = [285,521]
screen.blit(number, p)
if 17 in s.holes:
if s.game.line3.position == 0:
p = [440,370]
screen.blit(number, p)
else:
p = [389,370]
screen.blit(number, p)
if 18 in s.holes:
if s.game.line2.position == 0:
p = [235,370]
screen.blit(number, p)
else:
p = [286,370]
screen.blit(number, p)
if 19 in s.holes:
if s.game.line3.position == 0:
p = [441,470]
screen.blit(number, p)
else:
p = [389,470]
screen.blit(number, p)
if 20 in s.holes:
if s.game.line1.position in [0,2]:
p = [337,471]
screen.blit(number, p)
elif s.game.line1.position == 1:
p = [336,521]
screen.blit(number, p)
else:
p = [336,419]
screen.blit(number, p)
if s.game.red_odds.position == 1:
o = [175,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 2:
o = [223,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 3:
o = [275,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 4:
o = [324,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 5:
o = [375,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 6:
o = [425,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 7:
o = [473,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 8:
o = [525,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 9:
o = [576,773]
screen.blit(odds, o)
elif s.game.red_odds.position == 10:
o = [625,773]
screen.blit(odds, o)
if s.game.yellow_odds.position == 1:
o = [175,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 2:
o = [223,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 3:
o = [275,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 4:
o = [324,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 5:
o = [375,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 6:
o = [424,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 7:
o = [473,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 8:
o = [525,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 9:
o = [576,905]
screen.blit(odds, o)
elif s.game.yellow_odds.position == 10:
o = [625,905]
screen.blit(odds, o)
if s.game.green_odds.position == 1:
o = [175,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 2:
o = [223,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 3:
o = [275,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 4:
o = [324,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 5:
o = [375,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 6:
o = [424,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 7:
o = [473,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 8:
o = [525,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 9:
o = [576,842]
screen.blit(odds, o)
elif s.game.green_odds.position == 10:
o = [625,840]
screen.blit(odds, o)
if s.game.blue_odds.position == 1:
o = [175,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 2:
o = [223,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 3:
o = [275,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 4:
o = [324,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 5:
o = [375,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 6:
o = [424,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 7:
o = [473,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 8:
o = [525,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 9:
o = [574,970]
screen.blit(odds, o)
elif s.game.blue_odds.position == 10:
o = [622,970]
screen.blit(odds, o)
p = [381,207]
screen.blit(letter1, p)
p = [425,197]
screen.blit(letter2, p)
p = [489,206]
screen.blit(letter3, p)
p = [520,209]
screen.blit(letter4, p)
p = [568,211]
screen.blit(letter5, p)
p = [637,203]
screen.blit(letter6, p)
if s.game.red_odds.position < 5:
p = [381,207]
screen.blit(red_letter1, p)
if s.game.red_odds.position in [5,6]:
p = [425,197]
screen.blit(red_letter2, p)
if s.game.red_odds.position == 7:
p = [489,206]
screen.blit(red_letter3, p)
if s.game.red_odds.position == 8:
p = [520,209]
screen.blit(red_letter4, p)
if s.game.red_odds.position == 9:
p = [568,211]
screen.blit(red_letter5, p)
if s.game.red_odds.position == 10:
p = [637,203]
screen.blit(red_letter6, p)
if s.game.two_red_letter.status == True:
p = [17,255]
screen.blit(red_letter, p)
p = [91,217]
screen.blit(two_red, p)
if s.game.three_red_letter.status == True:
p = [17,255]
screen.blit(red_letter, p)
p = [17,216]
screen.blit(three_red, p)
if s.game.three_stars.status == True:
p = [17,294]
screen.blit(four_stars, p)
p = [17,333]
screen.blit(three_stars, p)
if s.game.six_stars.status == True:
p = [17,294]
screen.blit(four_stars, p)
p = [91,334]
screen.blit(six_stars, p)
if s.game.double_red.status == True:
p = [20,613]
screen.blit(red_double, p)
if s.game.double_yellow.status == True:
p = [94,611]
screen.blit(yellow_double, p)
if s.game.double_green.status == True:
p = [21,685]
screen.blit(green_double, p)
if s.game.double_blue.status == True:
p = [95,685]
screen.blit(blue_double, p)
if s.game.triple.status == False and (s.game.double_red.status == True or s.game.double_yellow.status == True or s.game.double_green.status == True or s.game.double_blue.status == True):
p = [56,685]
screen.blit(double_triple, p)
if s.game.triple.status == True and (s.game.double_red.status == True or s.game.double_yellow.status == True or s.game.double_green.status == True or s.game.double_blue.status == True):
p = [56,651]
screen.blit(double_triple, p)
if s.game.tilt.status == True:
tilt_position = [327,325]
screen.blit(tilt, tilt_position)
# Special Game
if s.game.missed.status == False:
if s.game.special_odds.position > 0:
if s.game.special_game.position == 0:
p = [527,697]
screen.blit(eo, p)
if s.game.special_game.position == 1:
p = [527,608]
screen.blit(ball, p)
p = [617,700]
screen.blit(eo, p)
if s.game.special_game.position == 2:
p = [562,609]
screen.blit(ball, p)
p = [527,697]
screen.blit(eo, p)
if s.game.special_game.position == 3:
p = [598,611]
screen.blit(ball, p)
p = [617,700]
screen.blit(eo, p)
if s.game.special_game.position == 4:
p = [634,612]
screen.blit(ball, p)
p = [527,697]
screen.blit(eo, p)
if s.game.special_game.position == 5:
p = [668,613]
screen.blit(ball, p)
p = [536,398]
screen.blit(golden, p)
if s.game.special_game.position in [3,4]:
p = [535,455]
screen.blit(dn, p)
if s.game.special_replay_counter.position > 0:
p = [535,347]
screen.blit(collected, p)
if s.game.missed.status == True:
p = [533,563]
screen.blit(missed, p)
if s.game.special_odds.position == 1:
p = [599,577]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 2:
p = [599,544]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 3:
p = [599,511]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 4:
p = [599,478]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 5:
p = [599,443]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 6:
p = [599,411]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 7:
p = [599,377]
screen.blit(special_odds, p)
elif s.game.special_odds.position == 8:
p = [600,344]
screen.blit(special_odds, p)
pygame.display.update()
def blink(args):
dirty_rects = []
s = args[0]
b = args[1]
sn = args[2]
if b == 0:
if sn == 1:
p = [390,724]
dirty_rects.append(screen.blit(select_now, p))
pygame.display.update(dirty_rects)
else:
dirty_rects.append(screen.blit(bg_gi, (390,724), pygame.Rect(390,724,128,36)))
pygame.display.update(dirty_rects)
b = not b
args = [s,b,sn]
s.delay(name="blink", delay=0.1, handler=blink, param=args)
def line1_animation(args):
dirty_rects = []
s = args[0]
num = args[1]
line = args[2]
if line == 1:
if s.game.line1.position == 0:
dirty_rects.append(screen.blit(columna, (337, 269 - num)))
elif s.game.line1.position == 1:
dirty_rects.append(screen.blit(columna, (337, 318 - num)))
elif s.game.line1.position == 2:
dirty_rects.append(screen.blit(columna, (337, 368 + num)))
elif s.game.line1.position == 3:
dirty_rects.append(screen.blit(columna, (337, 318 + num)))
nc_p = [228,368]
dirty_rects.append(screen.blit(number_card, nc_p))
if (s.game.anti_cheat.status == True):
dirty_rects.append(screen.blit(bg_gi, (224,264), pygame.Rect(224,264,270,408)))
else:
dirty_rects.append(screen.blit(bg_off, (224,264), pygame.Rect(224,264,270,408)))
dirty_rects.append(screen.blit(bg_gi, (376,196), pygame.Rect(376,196,330,106)))
p = [381,207]
dirty_rects.append(screen.blit(letter1, p))
p = [425,197]
dirty_rects.append(screen.blit(letter2, p))
p = [489,206]
dirty_rects.append(screen.blit(letter3, p))
p = [520,209]
dirty_rects.append(screen.blit(letter4, p))
p = [568,211]
dirty_rects.append(screen.blit(letter5, p))
p = [637,203]
dirty_rects.append(screen.blit(letter6, p))
if s.game.red_odds.position < 5:
p = [381,207]
dirty_rects.append(screen.blit(red_letter1, p))
if s.game.red_odds.position in [5,6]:
p = [425,197]
dirty_rects.append(screen.blit(red_letter2, p))
if s.game.red_odds.position == 7:
p = [489,206]
dirty_rects.append(screen.blit(red_letter3, p))
if s.game.red_odds.position == 8:
p = [520,209]
dirty_rects.append(screen.blit(red_letter4, p))
if s.game.red_odds.position == 9:
p = [568,211]
dirty_rects.append(screen.blit(red_letter5, p))
if s.game.red_odds.position == 10:
p = [637,203]
dirty_rects.append(screen.blit(red_letter6, p))
if s.game.mystic_lines.position == 1:
p = [204,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 2:
p = [236,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 3:
p = [269,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position >= 4:
p = [295,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [341,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 5:
p = [334,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 6:
p = [366,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position >= 7:
p = [392,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [265,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 8:
p = [433,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 9:
p = [463,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 10:
p = [489,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [416,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
def line2_animation(args):
dirty_rects = []
s = args[0]
num = args[1]
line = args[2]
if line == 2:
if s.game.line2.position == 0:
dirty_rects.append(screen.blit(columnb2, (233 - num, 369)))
dirty_rects.append(screen.blit(columnb1, (286 + num, 369)))
elif s.game.line2.position == 1:
dirty_rects.append(screen.blit(columnb1, (233 - num, 369)))
dirty_rects.append(screen.blit(columnb2, (286 + num, 369)))
nc_p = [228,368]
dirty_rects.append(screen.blit(number_card, nc_p))
if (s.game.anti_cheat.status == True):
dirty_rects.append(screen.blit(bg_gi, (233,369), pygame.Rect(233,369,270,212)))
else:
dirty_rects.append(screen.blit(bg_off, (233,369), pygame.Rect(233,369,270,212)))
if s.game.mystic_lines.position == 1:
p = [204,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 2:
p = [236,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 3:
p = [269,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position >= 4:
p = [295,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [341,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 5:
p = [334,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 6:
p = [366,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position >= 7:
p = [392,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [265,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 8:
p = [433,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 9:
p = [463,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 10:
p = [489,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [416,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
def line3_animation(args):
dirty_rects = []
s = args[0]
num = args[1]
line = args[2]
if line == 3:
if s.game.line3.position == 0:
dirty_rects.append(screen.blit(columnc2, (389 - num, 369)))
dirty_rects.append(screen.blit(columnc1, (440 + num, 369)))
elif s.game.line3.position == 1:
dirty_rects.append(screen.blit(columnc1, (389 - num, 369)))
dirty_rects.append(screen.blit(columnc2, (440 + num, 369)))
nc_p = [228,368]
dirty_rects.append(screen.blit(number_card, nc_p))
if (s.game.anti_cheat.status == True):
dirty_rects.append(screen.blit(bg_gi, (389,369), pygame.Rect(389,369,100,212)))
else:
dirty_rects.append(screen.blit(bg_off, (389,369), pygame.Rect(389,369,100,212)))
if s.game.mystic_lines.position == 1:
p = [204,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 2:
p = [236,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 3:
p = [269,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position >= 4:
p = [295,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [341,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 5:
p = [334,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 6:
p = [366,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position >= 7:
p = [392,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [265,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
if s.game.mystic_lines.position == 8:
p = [433,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 9:
p = [463,687]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],25,29)))
dirty_rects.append(screen.blit(ml_arrow, p))
if s.game.mystic_lines.position == 10:
p = [489,681]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],38,46)))
dirty_rects.append(screen.blit(ml_a, p))
p = [416,595]
dirty_rects.append(screen.blit(bg_gi, p, pygame.Rect(p[0],p[1],44,50)))
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
def eb_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
if s.game.extra_ball.position < 1:
dirty_rects.append(screen.blit(bg_gi, (147,1043), pygame.Rect(147,1043,47,31)))
if s.game.extra_ball.position < 2:
dirty_rects.append(screen.blit(bg_gi, (197,1043), pygame.Rect(197,1043,59,34)))
if s.game.extra_ball.position < 3:
dirty_rects.append(screen.blit(bg_gi, (261,1043), pygame.Rect(261,1043,59,34)))
if s.game.extra_ball.position < 4:
dirty_rects.append(screen.blit(bg_gi, (322,1043), pygame.Rect(322,1043,47,31)))
if s.game.extra_ball.position < 5:
dirty_rects.append(screen.blit(bg_gi, (373,1044), pygame.Rect(373,1044,59,34)))
if s.game.extra_ball.position < 6:
dirty_rects.append(screen.blit(bg_gi, (434,1043), pygame.Rect(434,1043,59,34)))
if s.game.extra_ball.position < 7:
dirty_rects.append(screen.blit(bg_gi, (498,1044), pygame.Rect(498,1044,47,31)))
if s.game.extra_ball.position < 8:
dirty_rects.append(screen.blit(bg_gi, (548,1045), pygame.Rect(548,1045,59,34)))
if s.game.extra_ball.position < 9:
dirty_rects.append(screen.blit(bg_gi, (612,1044), pygame.Rect(612,1044,59,34)))
pygame.display.update(dirty_rects)
if num in [0,25,14,49]:
if s.game.extra_ball.position < 1:
p = [147,1043]
dirty_rects.append(screen.blit(eb_number, p))
pygame.display.update(dirty_rects)
return
elif num in [39,1,26,15]:
if s.game.extra_ball.position < 2:
p = [197,1043]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [3,4,17,28,29,40]:
if s.game.extra_ball.position < 3:
p = [261,1043]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [5,18,30,43]:
if s.game.extra_ball.position < 4:
p = [322,1043]
dirty_rects.append(screen.blit(eb_number, p))
pygame.display.update(dirty_rects)
return
elif num in [7,8,19,32,33,44]:
if s.game.extra_ball.position < 5:
p = [373,1044]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [9,10,20,34,35,45]:
if s.game.extra_ball.position < 6:
p = [434,1043]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [11,21,36,46]:
if s.game.extra_ball.position < 7:
p = [498,1044]
dirty_rects.append(screen.blit(eb_number, p))
pygame.display.update(dirty_rects)
return
elif num in [12,22,37,47]:
if s.game.extra_ball.position < 8:
p = [548,1045]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
elif num in [2,6,13,16,23,27,31,38,41,48]:
if s.game.extra_ball.position < 9:
p = [612,1044]
dirty_rects.append(screen.blit(eb, p))
pygame.display.update(dirty_rects)
return
def clear_odds(s, num):
global screen
dirty_rects = []
if s.game.double_red.status == False:
dirty_rects.append(screen.blit(bg_gi, (20,613), pygame.Rect(20,613,75,72)))
if s.game.double_yellow.status == False:
dirty_rects.append(screen.blit(bg_gi, (94,611), pygame.Rect(94,611,75,73)))
if s.game.double_green.status == False:
dirty_rects.append(screen.blit(bg_gi, (21,685), pygame.Rect(21,685,75,73)))
if s.game.double_blue.status == False:
dirty_rects.append(screen.blit(bg_gi, (95,685), pygame.Rect(95,685,75,73)))
if s.game.yellow_odds.position != 2:
dirty_rects.append(screen.blit(bg_gi, (223,905), pygame.Rect(223,905,46,61)))
if s.game.yellow_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (375,905), pygame.Rect(375,905,46,61)))
if s.game.yellow_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (473,905), pygame.Rect(473,905,46,61)))
if s.game.yellow_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (576,905), pygame.Rect(576,905,46,61)))
if s.game.yellow_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (625,905), pygame.Rect(625,905,46,61)))
if s.game.red_odds.position != 3:
dirty_rects.append(screen.blit(bg_gi, (275,773), pygame.Rect(275,773,46,61)))
if s.game.red_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (473,773), pygame.Rect(473,773,46,61)))
if s.game.red_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (525,773), pygame.Rect(525,773,46,61)))
if s.game.red_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (576,773), pygame.Rect(576,773,46,61)))
if s.game.red_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (625,773), pygame.Rect(625,773,46,61)))
if s.game.blue_odds.position != 2:
dirty_rects.append(screen.blit(bg_gi, (223,970), pygame.Rect(223,970,46,61)))
if s.game.blue_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (375,970), pygame.Rect(375,970,46,61)))
if s.game.blue_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (525,970), pygame.Rect(525,970,46,61)))
if s.game.blue_odds.position != 9:
dirty_rects.append(screen.blit(bg_gi, (574,970), pygame.Rect(574,970,46,61)))
if s.game.blue_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (622,970), pygame.Rect(622,970,46,61)))
if s.game.green_odds.position != 3:
dirty_rects.append(screen.blit(bg_gi, (275,842), pygame.Rect(275,842,46,61)))
if s.game.green_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (375,842), pygame.Rect(375,842,46,61)))
if s.game.green_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (473,842), pygame.Rect(473,842,46,61)))
if s.game.green_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (525,842), pygame.Rect(525,842,46,61)))
if s.game.green_odds.position != 10:
dirty_rects.append(screen.blit(bg_gi, (625,840), pygame.Rect(625,840,46,61)))
pygame.display.update(dirty_rects)
def draw_odds_animation(s, num):
global screen
dirty_rects = []
if num in [7,32]:
if s.game.double_red.status == False:
p = [20,613]
dirty_rects.append(screen.blit(red_double, p))
pygame.display.update(dirty_rects)
return
if num in [11,36]:
if s.game.double_yellow.status == False:
p = [94,611]
dirty_rects.append(screen.blit(yellow_double, p))
pygame.display.update(dirty_rects)
return
if num in [0,25]:
if s.game.double_green.status == False:
p = [21,685]
dirty_rects.append(screen.blit(green_double, p))
pygame.display.update(dirty_rects)
return
if num in [17,42]:
if s.game.double_blue.status == False:
p = [95,685]
dirty_rects.append(screen.blit(blue_double, p))
pygame.display.update(dirty_rects)
return
if num in [4,29]:
if s.game.yellow_odds.position != 2:
p = [223,905]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [13,38]:
if s.game.yellow_odds.position != 5:
p = [375,905]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [22,47]:
if s.game.yellow_odds.position != 7:
p = [473,905]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [19,44]:
if s.game.yellow_odds.position != 9:
p = [576,905]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [5,30]:
if s.game.yellow_odds.position != 10:
p = [625,905]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [23,48]:
if s.game.red_odds.position != 3:
p = [275,773]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [20,45]:
if s.game.red_odds.position != 7:
p = [473,773]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [12,37]:
if s.game.red_odds.position != 8:
p = [525,773]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [25,0]:
if s.game.red_odds.position != 9:
p = [576,773]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [6,31]:
if s.game.red_odds.position != 10:
p = [625,773]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [18,43]:
if s.game.blue_odds.position != 2:
p = [223,970]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [3,28]:
if s.game.blue_odds.position != 5:
p = [375,970]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [8,33]:
if s.game.blue_odds.position != 8:
p = [525,970]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [21,46]:
if s.game.blue_odds.position != 9:
p = [574,970]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [10,35]:
if s.game.blue_odds.position != 10:
p = [622,970]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [15,40]:
if s.game.green_odds.position != 3:
p = [275,842]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [9,34]:
if s.game.green_odds.position != 5:
p = [375,842]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [16,41]:
if s.game.green_odds.position != 7:
p = [473,842]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [14,39]:
if s.game.green_odds.position != 8:
p = [525,842]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
if num in [23,48]:
if s.game.green_odds.position != 10:
p = [625,840]
dirty_rects.append(screen.blit(odds, p))
pygame.display.update(dirty_rects)
return
def odds_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
clear_odds(s, num)
draw_odds_animation(s, num)
def clear_features(s, num):
global screen
dirty_rects = []
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False and s.game.selection_feature.position < 7:
dirty_rects.append(screen.blit(bg_gi, (19,552), pygame.Rect(19,552,149,48)))
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False and s.game.selection_feature.position not in [7,8]:
dirty_rects.append(screen.blit(bg_gi, (19,417), pygame.Rect(19,417,149,48)))
if s.game.selection_feature.position < 9:
dirty_rects.append(screen.blit(bg_gi, (17,372), pygame.Rect(17,372,150,45)))
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (17,372), pygame.Rect(17,372,150,45)))
if s.game.yellow_star.status == False:
dirty_rects.append(screen.blit(bg_gi, (19,507), pygame.Rect(19,507,151,48)))
if s.game.red_star.status == False:
dirty_rects.append(screen.blit(bg_gi, (19,463), pygame.Rect(19,463,151,48)))
if s.game.three_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (17,216), pygame.Rect(17,216,76,41)))
if s.game.two_red_letter.status == False:
dirty_rects.append(screen.blit(bg_gi, (91,217), pygame.Rect(91,217,76,41)))
if s.game.three_stars.status == False:
dirty_rects.append(screen.blit(bg_gi, (17,333), pygame.Rect(17,333,74,26)))
if s.game.six_stars.status == False:
dirty_rects.append(screen.blit(bg_gi, (91,334), pygame.Rect(91,334,74,26)))
if s.game.mystic_lines.position != 2:
dirty_rects.append(screen.blit(bg_gi, (236,687), pygame.Rect(236,687,18,22)))
if s.game.mystic_lines.position < 4:
dirty_rects.append(screen.blit(bg_gi, (295,681), pygame.Rect(295,681,31,36)))
dirty_rects.append(screen.blit(bg_gi, (341,595), pygame.Rect(341,595,37,42)))
if s.game.mystic_lines.position != 5:
dirty_rects.append(screen.blit(bg_gi, (334,687), pygame.Rect(334,687,18,22)))
if s.game.mystic_lines.position < 7:
dirty_rects.append(screen.blit(bg_gi, (392,681), pygame.Rect(392,681,31,36)))
dirty_rects.append(screen.blit(bg_gi, (265,595), pygame.Rect(265,595,37,42)))
if s.game.mystic_lines.position != 9:
dirty_rects.append(screen.blit(bg_gi, (463,687), pygame.Rect(463,687,18,22)))
if s.game.mystic_lines.position < 10:
dirty_rects.append(screen.blit(bg_gi, (489,681), pygame.Rect(489,681,31,36)))
dirty_rects.append(screen.blit(bg_gi, (416,595), pygame.Rect(416,595,37,42)))
pygame.display.update(dirty_rects)
def draw_feature_animation(s, num):
global screen
dirty_rects = []
if num in [10,35]:
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False and s.game.selection_feature.position < 7:
p = [19,552]
dirty_rects.append(screen.blit(bottom, p))
pygame.display.update(dirty_rects)
return
if num in [9,34]:
if s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False and s.game.selection_feature.position not in [7,8]:
p = [19,417]
dirty_rects.append(screen.blit(button, p))
pygame.display.update(dirty_rects)
return
if num in [6,31,17,42,21,46]:
if s.game.selection_feature.position not in [9] or (s.game.mystic_lines.position < 4 and s.game.two_red_letter.status == False and s.game.three_red_letter.status == False):
p = [17,372]
dirty_rects.append(screen.blit(top, p))
pygame.display.update(dirty_rects)
return
if num in [4,29]:
if s.game.red_star.status == False:
p = [19,463]
dirty_rects.append(screen.blit(button, p))
pygame.display.update(dirty_rects)
s.game.coils.redROLamp.pulse(85)
return
if num in [11,36]:
if s.game.yellow_star.status == False:
p = [19,507]
dirty_rects.append(screen.blit(button, p))
pygame.display.update(dirty_rects)
s.game.coils.yellowROLamp.pulse(85)
return
if num in [16,41]:
if s.game.three_red_letter.status == False:
p = [17,216]
dirty_rects.append(screen.blit(three_red, p))
pygame.display.update(dirty_rects)
return
if num in [7,32]:
if s.game.two_red_letter.status == False:
p = [91,217]
dirty_rects.append(screen.blit(two_red, p))
pygame.display.update(dirty_rects)
return
if num in [8,33]:
if s.game.three_stars.status == False:
p = [17,333]
dirty_rects.append(screen.blit(three_stars, p))
pygame.display.update(dirty_rects)
return
if num in [20,45]:
if s.game.six_stars.status == False:
p = [91,334]
dirty_rects.append(screen.blit(six_stars, p))
pygame.display.update(dirty_rects)
return
if num in [13,19,38,44]:
if s.game.mystic_lines.position != 2:
p = [236,687]
dirty_rects.append(screen.blit(ml_arrow, p))
pygame.display.update(dirty_rects)
return
if num in [3,22,28,47]:
if s.game.mystic_lines.position < 4:
p = [295,681]
dirty_rects.append(screen.blit(ml_a, p))
p = [341,595]
dirty_rects.append(screen.blit(ml_letter, p))
pygame.display.update(dirty_rects)
return
if num in [18,25,43,0]:
if s.game.mystic_lines.position != 5:
p = [334,687]
dirty_rects.append(screen.blit(ml_arrow, p))
pygame.display.update(dirty_rects)
return
if num in [2,12,27,37]:
if s.game.mystic_lines.position < 7:
p = [265,595]
dirty_rects.append(screen.blit(ml_letter, p))
p = [392,681]
dirty_rects.append(screen.blit(ml_a, p))
pygame.display.update(dirty_rects)
return
if num in [15,40]:
if s.game.mystic_lines.position != 9:
p = [463,687]
dirty_rects.append(screen.blit(ml_arrow, p))
pygame.display.update(dirty_rects)
return
if num in [1,14,26,39]:
if s.game.mystic_lines.position < 10:
p = [416,595]
dirty_rects.append(screen.blit(ml_letter, p))
p = [489,681]
dirty_rects.append(screen.blit(ml_a, p))
pygame.display.update(dirty_rects)
return
def feature_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
clear_features(s, num)
draw_feature_animation(s, num)
def both_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
clear_features(s, num)
clear_odds(s, num)
draw_odds_animation(s, num)
draw_feature_animation(s, num)
def special_animation(args):
global screen
dirty_rects = []
s = args[0]
num = args[1]
if s.game.special_odds.position != 1:
dirty_rects.append(screen.blit(bg_gi, (599,577), pygame.Rect(599,577,103,34)))
if s.game.special_odds.position != 2:
dirty_rects.append(screen.blit(bg_gi, (599,544), pygame.Rect(599,544,103,34)))
if s.game.special_odds.position != 3:
dirty_rects.append(screen.blit(bg_gi, (599,511), pygame.Rect(599,511,103,34)))
if s.game.special_odds.position != 4:
dirty_rects.append(screen.blit(bg_gi, (599,478), pygame.Rect(599,478,103,34)))
if s.game.special_odds.position != 5:
dirty_rects.append(screen.blit(bg_gi, (599,443), pygame.Rect(599,443,103,34)))
if s.game.special_odds.position != 6:
dirty_rects.append(screen.blit(bg_gi, (599,411), pygame.Rect(599,411,103,34)))
if s.game.special_odds.position != 7:
dirty_rects.append(screen.blit(bg_gi, (599,377), pygame.Rect(599,377,103,34)))
if s.game.special_odds.position != 8:
dirty_rects.append(screen.blit(bg_gi, (600,344), pygame.Rect(600,344,103,34)))
pygame.display.update(dirty_rects)
if num in [3,28]:
if s.game.special_odds.position != 1:
p = [599,577]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [6,7,31,32]:
if s.game.special_odds.position < 2:
p = [599,544]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [3,4,28,29]:
if s.game.special_odds.position < 3:
p = [599,511]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [10,11,35,36]:
if s.game.special_odds.position < 4:
p = [599,478]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [8,9,33,34]:
if s.game.special_odds.position < 5:
p = [599,443]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [14,15,39,40]:
if s.game.special_odds.position < 6:
p = [599,411]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [12,13,21,22,37,38,46,47]:
if s.game.special_odds.position < 7:
p = [599,377]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
if num in [16,17,23,24,25,41,42,48,49]:
if s.game.special_odds.position < 8:
p = [600,344]
dirty_rects.append(screen.blit(special_odds, p))
pygame.display.update(dirty_rects)
return
| gpl-3.0 | 7,389,662,866,704,500,000 | 37.961487 | 190 | 0.551827 | false |
tobymccann/flask-base | app/models/tools.py | 1 | 15824 | """
SQLAlchemy data model for the web service
"""
from slugify import slugify
from .. import db
from ..exception import TemplateVariableNotFoundException, TemplateValueNotFoundException
from ..utils import MakoConfigGenerator
class TemplateValue(db.Model):
"""
TemplateValue
=============
The template value definition is used to associate a value to a variable within a Template Value Set.
"""
__table_args__ = (db.UniqueConstraint('var_name_slug', 'template_value_set_id'),)
id = db.Column(db.Integer, primary_key=True)
var_name_slug = db.Column(
db.String(256),
index=True,
nullable=False
)
value = db.Column(db.String(4096), index=True)
template_value_set_id = db.Column(db.Integer, db.ForeignKey('template_value_set.id'), nullable=False)
template_value_set = db.relationship('TemplateValueSet', backref=db.backref('values',
cascade="all, delete-orphan",
lazy='dynamic'))
@staticmethod
def convert_variable_name(string):
"""convert the given string to a valid variable name (creates a slug with "_"-spaces)
:param string:
:return:
"""
return slugify(separator="_", to_lower=False)(string)
@property
def var_name(self):
return self.var_name_slug
@var_name.setter
def var_name(self, value):
self.var_name_slug = self.convert_variable_name(value)
def __init__(self, template_value_set, var_name, value=""):
self.var_name = var_name
self.value = value
self.template_value_set = template_value_set
def __repr__(self):
return '<TemplateValue %r>' % self.var_name
class TemplateValueSet(db.Model):
"""
TemplateValueSet
================
The Template Value Set is used to store a set of variables for a Config Template.
"""
__table_args__ = (db.UniqueConstraint('hostname', 'config_template_id'),)
id = db.Column(db.Integer, primary_key=True)
hostname = db.Column(
db.String(256),
index=True,
nullable=False
)
config_template_id = db.Column(db.Integer, db.ForeignKey('config_template.id'), nullable=False)
config_template = db.relationship('ConfigTemplate', backref=db.backref('template_value_sets',
cascade="all, delete-orphan",
lazy='dynamic'))
def __init__(self, hostname, config_template=None):
self.hostname = hostname
self.config_template = config_template
# if a config template is specified during the initial creation of the object, all defined variables are copied
# to this value set
if config_template:
self.copy_variables_from_config_template()
def __repr__(self):
if not self.config_template:
config_template_name = "None"
else:
config_template_name = self.config_template.name
return '<TemplateValueSet %r (%s) in %s>' % (self.hostname, self.id, config_template_name)
@staticmethod
def convert_variable_name(string):
"""convert the given string to a valid variable name
:param string:
:return:
"""
return slugify(separator="_", to_lower=False)(string)
def copy_variables_from_config_template(self):
"""this function copies all variables from the associated configuration template object
:return:
"""
if not self.config_template:
raise ValueError("Config Template not set within the template value set, copy variable names not possible")
parent_vars = self.config_template.variables.all()
# add hostname variable
self.update_variable_value("hostname", value=self.hostname)
for tpl_var in parent_vars:
if self.is_value_defined(tpl_var.var_name):
old_value = self.get_template_value_by_name_as_string(tpl_var.var_name)
else:
old_value = ""
self.update_variable_value(tpl_var.var_name, value=old_value)
def get_template_value_names(self):
"""get all template variable names of the Template Value Set
:return: a list of strings that contains all variable names
"""
result = []
for obj in self.values.all():
result.append(obj.var_name)
return result
def get_template_value_by_name(self, var_name):
"""get the Template Value by name within the Config Template, otherwise an TemplateValueNotFoundException is
thrown
The given var_name is unconditionally converted to a slug string representation, before the query occurs.
:param var_name: a variable name (always converted to a valid variable name)
:return: the TemplateValue instance of the variable
"""
result = TemplateValue.query.filter_by(var_name_slug=var_name, template_value_set=self).first()
if not result:
raise TemplateValueNotFoundException("Value for '%s' not found in "
"Template Value Set '%s'" % (var_name, self.hostname))
return result
def get_template_value_by_name_as_string(self, var_name):
"""get the variable value as string for the given variable name.
If the variable_name was not found within the values, a TemplateValueNotFoundException is thrown
:param var_name:
:return: string representation of the template value
"""
return str(self.get_template_value_by_name(var_name).value)
def update_variable_value(self, var_name, value="", auto_convert_var_name=True):
"""add or update a Template Variable for the Template Value set. The var_name parameter is automatically
converted to a slug string.
:param var_name:
:param value:
:param auto_convert_var_name: enables or disables the automatic conversion of the variable names
:return:
"""
# convert string
if auto_convert_var_name:
var_name = self.convert_variable_name(var_name)
if var_name not in self.get_template_value_names():
# variable not found, create new one (automatic conversion is then enforced)
var_name = self.convert_variable_name(var_name)
new_var = TemplateValue(self, var_name, value)
db.session.add(new_var)
db.session.commit()
else:
# update existing variable
tpl_var = TemplateValue.query.filter_by(var_name_slug=var_name, template_value_set=self).first()
tpl_var.value = value
db.session.commit()
return var_name
def is_value_defined(self, val_name):
"""checks if the given template value is defined on the Template Value Set
:param val_name:
:return:
"""
return val_name in self.get_template_value_names()
def get_template_variables(self):
"""create a sorted list of the Template Values within this Template Value Set
:return:
"""
return self.values.order_by(TemplateValue.var_name_slug).all()
def get_configuration_result(self):
"""generates the configuration based on the Config Template and the associated Template Value Set
:return:
"""
dcg = MakoConfigGenerator(template_string=self.config_template.template_content)
for val in self.values:
dcg.set_variable_value(val.var_name, val.value)
return dcg.get_rendered_result()
class TemplateVariable(db.Model):
"""
TemplateVariable
================
The template variable is used to annotate variables that are used within a Config Template. The major actions are
triggered by the ConfigTemplate class.
"""
__table_args__ = (db.UniqueConstraint('var_name_slug', 'config_template_id'),)
id = db.Column(db.Integer, primary_key=True)
var_name_slug = db.Column(
db.String(256),
index=True,
nullable=False
)
description = db.Column(db.String(4096), index=True)
config_template_id = db.Column(db.Integer, db.ForeignKey('config_template.id'), nullable=False)
config_template = db.relationship('ConfigTemplate', backref=db.backref('variables',
cascade="all, delete-orphan",
lazy='dynamic'))
@property
def var_name(self):
return self.var_name_slug
@var_name.setter
def var_name(self, value):
self.var_name_slug = slugify(separator="_", to_lower=False)(value)
def __init__(self, config_template, var_name, description=""):
self.var_name = var_name
self.description = description
self.config_template = config_template
def __repr__(self):
return '<TemplateVariable %r>' % self.var_name
class ConfigTemplate(db.Model):
"""
ConfigTemplate
==============
The configuration template, that defines the content for the configuration generation process. It works with
TemplateVariable objects, that stores and annotates the variables used within this template.
"""
__table_args__ = (db.UniqueConstraint('name', 'project_id'),)
id = db.Column(db.Integer, primary_key=True)
name = db.Column(
db.String(128),
index=True,
nullable=False
)
_template_content = db.Column(db.UnicodeText(), index=True)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'), nullable=False)
project = db.relationship('Project', backref=db.backref('configtemplates',
cascade="all, delete-orphan",
lazy='dynamic'))
last_successful_ftp_export = db.Column(db.DateTime)
last_successful_tftp_export = db.Column(db.DateTime)
@property
def name_slug(self):
return slugify(to_lower=False)(self.name)
@property
def template_content(self):
return self._template_content
@template_content.setter
def template_content(self, value):
# if the template content is changed, drop all associated Template Value Sets
if self._template_content != value:
for obj in self.template_value_sets.all():
TemplateValueSet.query.filter(TemplateValueSet.id == obj.id).delete()
self._template_content = value
self._create_variables_from_template_content()
def __init__(self, name, project=None, template_content=""):
self.name = name
self.project = project
self.template_content = template_content
def __repr__(self):
if not self.project:
project_name = "None"
else:
project_name = self.project.name
return '<ConfigTemplate %r (%s) in %s>' % (self.name, self.id, project_name)
@staticmethod
def convert_variable_name(string):
"""convert the given string to a valid variable name
:param string:
:return:
"""
return slugify(separator="_", to_lower=False)(string)
def _create_variables_from_template_content(self):
dcg = MakoConfigGenerator(template_string=self.template_content)
# the hostname is always defined within a TemplateValueSet, add it with a default description
self.update_template_variable(
"hostname",
"the hostname of the device (also used as name for the template value set)"
)
# create new template variables on the Config Template
for var_name in dcg.template_variables:
if var_name != "hostname":
self.update_template_variable(var_name)
def rename_variable(self, old_name, new_name):
"""rename the Template Variables within the Config Template and all associated Template Value Sets
:param old_name:
:param new_name:
:return:
"""
if old_name not in self.get_template_variable_names():
raise TemplateVariableNotFoundException("Variable %s not found in config template" % old_name)
var_obj = self.get_template_variable_by_name(old_name)
var_obj.var_name = new_name
# variable renamed, change associated value sets
for valueset in self.template_value_sets.all():
old_val = valueset.get_template_value_by_name_as_string(old_name)
# drop old variable from value set
db.session.delete(valueset.get_template_value_by_name(old_name))
# create new variables with the new name and the old value
valueset.update_variable_value(var_name=new_name, value=old_val)
def valid_template_value_set_name(self, template_value_set_name):
"""test if the given Template Value Set name is valid within the Config Template
:param template_value_set_name:
:return: True if valid, otherwise false
"""
query_result = self.template_value_sets.all()
valid = True
if not template_value_set_name:
valid = False
elif template_value_set_name == "":
valid = False
for obj in query_result:
if obj.hostname == template_value_set_name:
valid = False
break
return valid
def get_template_variable_names(self):
"""get all Template Variable Names of the Config Template
:return:
"""
result = []
for obj in self.variables.all():
result.append(obj.var_name)
return result
def get_template_variable_by_name(self, var_name):
"""get a Template Variable by the var_name_slug attribute within the Config Template
:param var_name:
:return:
"""
result = TemplateVariable.query.filter_by(var_name_slug=var_name, config_template=self).first()
if not result:
raise TemplateVariableNotFoundException("Variable '%s' not found in Template '%s'" % (var_name, self.name))
return result
def update_template_variable(self, var_name, description="", auto_convert_var_name=True):
"""add or update a Template Variable for the Config Template
:param var_name:
:param description:
:param auto_convert_var_name: enables or disables the automatic conversion of the variable names
:return: name of the variable that was updated (automatic conversion)
"""
# convert string
if auto_convert_var_name:
var_name = self.convert_variable_name(var_name)
if var_name not in self.get_template_variable_names():
# variable not found, create new one (automatic conversion is then enforced)
var_name = self.convert_variable_name(var_name)
new_var = TemplateVariable(self, var_name, description)
db.session.add(new_var)
db.session.commit()
else:
# update existing variable
tpl_var = TemplateVariable.query.filter_by(var_name_slug=var_name, config_template=self).first()
tpl_var.description = description
db.session.commit()
return var_name
def is_variable_defined(self, var_name):
"""checks if the given variable is defined on the Config Template
:param var_name:
:return:
"""
return var_name in self.get_template_variable_names()
| mit | 225,298,349,692,474,800 | 34.963636 | 119 | 0.612045 | false |
alexfalcucc/anaconda | anaconda_server/lib/registry.py | 9 | 1594 |
# Copyright (C) 2014 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
import os
import sys
import logging
class HandlerRegistry(object):
"""Register anaconda JsonServer handlers
"""
initialized = False
def __init__(self):
self._handlers = {}
def initialize(self):
"""Load handlers from anaconda installed plugins
"""
if self.initialized:
return
self._import_plugin_handlers()
self.initialized = True
def get(self, handler_type):
"""Retrieve the given handler type or none
"""
return self._handlers.get(handler_type)
def register(self, handler):
"""Register a new handler
"""
self._handlers[handler.__handler_type__] = handler
def _import_plugin_handlers(self):
"""Import hadnlers from anaconda plugins
"""
path = os.path.join(os.path.dirname(__file__), '../../../')
packages = [
os.path.join(path, f) for f in os.listdir(path)
if f.startswith('anaconda_')
]
for package in packages:
if 'vagrant' in package or not os.path.isdir(package):
continue
lang = package.rsplit('anaconda_', 1)[1]
sys.path.append('{}/plugin'.format(package))
mod_name = 'handlers_{}'.format(lang)
mod = __import__(mod_name, globals(), locals())
logging.info(
'[anaconda_plugins] imported handlers for {}'.format(mod)
)
| gpl-3.0 | -3,983,221,470,204,304,000 | 25.566667 | 73 | 0.566499 | false |
google/llvm-premerge-checks | scripts/metrics/buildbot_status_emails.py | 1 | 5577 | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import gzip
import os
import mailbox
import requests
import re
from typing import List, Dict, Set
EMAIL_ARCHIVE_URL = 'http://lists.llvm.org/pipermail/llvm-dev/{year}-{month}.txt.gz'
TMP_DIR = os.path.join(os.path.dirname(__file__), 'tmp')
class LLVMBotArchiveScanner:
def __init__(self):
self._tmpdir = TMP_DIR
@staticmethod
def _generate_archive_url(month: datetime.date) -> str:
return EMAIL_ARCHIVE_URL.format(year=month.year, month=month.strftime('%B'))
def _download_archive(self, month: datetime.date):
os.makedirs(self._tmpdir, exist_ok=True)
filename = os.path.join(self._tmpdir, 'llvmdev-{year}-{month:02d}.txt'.format(year=month.year, month=month.month))
url = self._generate_archive_url(month)
# FIXME: decompress the files
self.download(url, filename)
def get_archives(self, start_month: datetime.date):
print('Downloading data...')
month = start_month
today = datetime.date.today()
while month < today:
self._download_archive(month)
if month.month < 12:
month = datetime.date(year=month.year, month=month.month+1, day=1)
else:
month = datetime.date(year=month.year+1, month=1, day=1)
def extract_emails(self) -> List[mailbox.Message]:
result = []
for archive_name in (d for d in os.listdir(self._tmpdir) if d.startswith('llvmdev-')):
print('Scanning {}'.format(archive_name))
mb = mailbox.mbox(os.path.join(self._tmpdir, archive_name), factory=mbox_reader)
for mail in mb.values():
subject = mail.get('subject')
if subject is None:
continue
if 'Buildbot numbers' in mail['subject']:
yield(mail)
yield
def get_attachments(self, email: mailbox.Message):
if email is None:
return
week_str = re.search(r'(\d+/\d+/\d+)', email['subject']).group(1)
week = datetime.datetime.strptime(week_str, '%m/%d/%Y').date()
attachment_url = re.search(r'Name: completed_failed_avr_time.csv[^<]*URL: <([^>]+)>', email.get_payload(), re.DOTALL).group(1)
filename = os.path.join(self._tmpdir, 'buildbot_stats_{}.csv'.format(week.isoformat()))
self.download(attachment_url, filename)
@staticmethod
def download(url, filename):
if os.path.exists(filename):
return
r = requests.get(url)
print('Getting {}'.format(filename))
with open(filename, 'wb') as f:
f.write(r.content)
def merge_results(self):
def _convert_int(s: str) -> int:
if len(s) == 0:
return 0
return int(s)
bot_stats = {} # type: Dict[str, Dict[datetime.date, float]]
weeks = set() # type: Set[datetime.date]
for csv_filename in (d for d in os.listdir(self._tmpdir) if d.startswith('buildbot_stats_')):
week_str = re.search(r'(\d+-\d+-\d+)', csv_filename).group(1)
week = datetime.datetime.fromisoformat(week_str).date()
weeks.add(week)
with open(os.path.join(self._tmpdir, csv_filename)) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
name = row['name']
red_build = _convert_int(row['red_builds'])
all_builds = _convert_int(row['all_builds'])
percentage = 100.0 * red_build / all_builds
bot_stats.setdefault(name, {})
bot_stats[name][week] = percentage
with open(os.path.join(self._tmpdir, 'buildbot_weekly.csv'), 'w') as csv_file:
fieldnames = ['week']
filtered_bots = sorted(b for b in bot_stats.keys()) # if len(bot_stats[b]) == len(weeks)
fieldnames.extend(filtered_bots)
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for week in sorted(weeks):
row = {'week': week.isoformat()}
for bot in filtered_bots:
percentage = bot_stats[bot].get(week)
if percentage is None:
continue
row[bot] = percentage
writer.writerow(row)
def mbox_reader(stream):
"""Read a non-ascii message from mailbox.
Based on https://stackoverflow.com/questions/37890123/how-to-trap-an-exception-that-occurs-in-code-underlying-python-for-loop
"""
data = stream.read()
text = data.decode(encoding="utf-8")
return mailbox.mboxMessage(text)
if __name__ == '__main__':
scanner = LLVMBotArchiveScanner()
scanner.get_archives(datetime.date(year=2019, month=8, day=1))
for message in scanner.extract_emails():
scanner.get_attachments(message)
scanner.merge_results() | apache-2.0 | -9,055,137,606,409,177,000 | 38.560284 | 134 | 0.59835 | false |
csutorasr/BMEVIAUAL00 | model/rl.py | 1 | 8350 | from tkinter import *
import tkinter as tk
import tkinter.font as tk_font
from tkinter.filedialog import askopenfilename
from tkinter.messagebox import showerror
import xml.etree.ElementTree as ElementTree
import alg
import util
import xmlh
window_width = 1280
window_height = 720
def convert_coordinates(text):
"""
Transforms the given text's coordinates to the scale of the canvas,
which is used in the application.
"""
# Calculating the four edges of the text.
minx = min([float(x) for ((x, y), time) in text[0].coordinates])
miny = min([float(y) for ((x, y), time) in text[0].coordinates])
maxx = max([float(x) for ((x, y), time) in text[0].coordinates])
maxy = max([float(y) for ((x, y), time) in text[0].coordinates])
for i in range(1, len(text)):
temp_x_min = [float(x) for ((x, y), time) in text[i].coordinates if float(x) < minx]
temp_y_min = [float(y) for ((x, y), time) in text[i].coordinates if float(y) < miny]
temp_x_max = [float(x) for ((x, y), time) in text[i].coordinates if float(x) > maxx]
temp_y_max = [float(y) for ((x, y), time) in text[i].coordinates if float(y) > maxy]
minx = minx if len(temp_x_min) == 0 else min(temp_x_min)
miny = miny if len(temp_y_min) == 0 else min(temp_y_min)
maxx = maxx if len(temp_x_max) == 0 else max(temp_x_max)
maxy = maxy if len(temp_y_max) == 0 else max(temp_y_max)
# Calculating the scaling
scale = 1/(maxy-miny) * 4/5 * window_height
if (maxx-minx)*scale > window_width:
scale = scale*1/((maxx-minx)*scale)*4/5*window_width
# Calculating the offset value
bias = (-minx*scale + (window_width/2 - (maxx-minx)*scale/2), -miny*scale + (window_height/2 - (maxy-miny)*scale/2))
return scale, bias
class Gui(tk.Frame):
"""
Gui class, for debugging purposes. The class serves only as an interface for the user,
hence the model is not implemented here, but in the alg.py file.
"""
def __init__(self, root):
tk.Frame.__init__(self, root)
self.root = root
self.root.title('RightLeft')
self.create_menu()
self.canvas = Canvas(self.root, width=window_width, height=window_height)
self.file_name = None
# The alg variable is the port between the model, and the gui.
self.alg = None
# Variable that stores the stroke data in the previously defined Stroke class as a list.
self.strokes = []
# Binding the move functions to the action listener.
self.canvas.bind("<ButtonPress-1>", self.move_start)
self.canvas.bind("<B1-Motion>", self.move_move)
# Binding zoom (Linux)
self.canvas.bind("<Button-4>", self.zoom_p)
self.canvas.bind("<Button-5>", self.zoom_m)
# Binding zoom (Windows)
self.canvas.bind("<MouseWheel>", self.zoom)
self.canvas.pack()
self.center()
def move_start(self, event):
self.canvas.scan_mark(event.x, event.y)
def move_move(self, event):
self.canvas.scan_dragto(event.x, event.y, gain=1)
def zoom_p(self, event):
self.canvas.scale("all", event.x, event.y, 1.1, 1.1)
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def zoom_m(self, event):
self.canvas.scale("all", event.x, event.y, 0.9, 0.9)
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def zoom(self, event):
if event.delta > 0:
self.canvas.scale("all", event.x, event.y, 1.1, 1.1)
elif event.delta < 0:
self.canvas.scale("all", event.x, event.y, 0.9, 0.9)
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def center(self):
"""
Adjusts the window to the center of the screen.
"""
self.root.update_idletasks()
width = self.root.winfo_width()
height = self.root.winfo_height()
x = (self.root.winfo_screenwidth() // 2) - (width // 2)
y = (self.root.winfo_screenheight() // 2) - (height // 2)
self.root.geometry('{}x{}+{}+{}'.format(width, height, x, y))
def create_menu(self):
menu = Menu(self.root)
file_menu = Menu(menu)
file_menu.add_command(label="Load File", command=self.load)
file_menu.add_command(label="Remove Error", command=self.clear)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self.root.quit)
menu.add_cascade(label="File", menu=file_menu)
self.root.config(menu=menu)
def clear(self):
if self.file_name is not None:
xmlh.remove_outliers(self.file_name)
self.alg = alg.Algorithm(self.file_name)
self.extract_data(self.file_name)
self.update()
def update(self):
self.canvas.delete('all')
self.canvas.create_rectangle(0, 0, window_width, window_height, fill="white")
self.draw()
def draw(self):
scale, bias = convert_coordinates(self.strokes)
if self.file_name is not None:
self.canvas.create_text(float(self.root.winfo_width()/2),
float(20), text=self.file_name, fill="black",
font=tk_font.Font(size=11, weight="bold"))
for j, stroke in enumerate(self.strokes):
# Draws the index of each stroke onto the canvas.
self.canvas.create_text(float(stroke.coordinates[0][0][0]) * scale + bias[0],
float(stroke.coordinates[0][0][1]) * scale + bias[1], text=str(j), fill="red",
font=tk_font.Font(size=12, weight="bold"))
# Connecting the stored stroke coordinates
for i in range(len(stroke.coordinates)-1):
x1 = float(stroke.coordinates[i][0][0]) * scale + bias[0]
y1 = float(stroke.coordinates[i][0][1]) * scale + bias[1]
x2 = float(stroke.coordinates[i+1][0][0]) * scale + bias[0]
y2 = float(stroke.coordinates[i+1][0][1]) * scale + bias[1]
self.canvas.create_line(x1, y1, x2, y2, width=3)
# Calculating the indexes of horizontal strokes
h_line_indexes = self.alg.get_horizontal_lines()
for index, stroke in enumerate(self.strokes):
if index in h_line_indexes:
self.canvas.create_oval(float(stroke.coordinates[0][0][0]) * scale + bias[0] - 2,
float(stroke.coordinates[0][0][1]) * scale + bias[1] - 2,
float(stroke.coordinates[0][0][0]) * scale + bias[0] + 2,
float(stroke.coordinates[0][0][1]) * scale + bias[1] + 2,
fill="green")
def load(self):
file_name = askopenfilename(filetypes=(("XML files", "*.xml"), ("All files", "*.*")),
initialdir='/home/patrik/Documents/Data')
if file_name:
try:
self.alg = alg.Algorithm(str(file_name))
self.extract_data(str(file_name))
self.file_name = str(file_name)
self.update()
except IOError:
showerror("Open Source File", "Failed to read file\n'%s'" % file_name)
def extract_data(self, file):
"""
Gathers the stroke data from the xml.
:param file: String, containing the absolute path of the file.
"""
tree = ElementTree.parse(file)
xml_root = tree.getroot()
stroke_set = None
for child in xml_root:
if str(child.tag) == 'StrokeSet':
stroke_set = child
break
if stroke_set is None:
return
self.strokes = []
for stroke in stroke_set:
coordinates = []
for point in stroke:
coordinates.append(((point.attrib['x'], point.attrib['y']), point.attrib['time']))
self.strokes.append(util.Stroke(float(stroke.attrib['start_time']), float(stroke.attrib['end_time']),
coordinates=coordinates))
def main():
root = tk.Tk()
Gui(root).pack()
root.mainloop()
if __name__ == "__main__":
main()
| mit | -7,374,649,057,308,071,000 | 36.276786 | 120 | 0.566108 | false |
PremiumGraphics/DirectView | ThirdParty/wxWidgets-3.0.2/docs/doxygen/scripts/common.py | 23 | 4360 | # format: class : {method : (prototype1, prototype2)}
# using a "*" means all prototypes
ignored_methods = {
"wxIcon": {'wxIcon': (['const char', 'int', 'int'], )},
}
# these classes are either replaced by different data types in bindings, or have equivalent / better
# functionality provided by the target language.
excluded_classes = [
"wxAny",
"wxAnyValueType",
"wxArchiveClassFactory",
"wxArchiveEntry",
"wxArchiveInputStream",
"wxArchiveIterator",
"wxArchiveNotifier",
"wxArchiveOutputStream",
"wxArray< T >",
"wxArrayString",
"wxAutomationObject",
"wxBufferedInputStream",
"wxBufferedOutputStream",
"wxCharBuffer",
"wxCharTypeBuffer",
"wxClassInfo",
"wxCmdLineParser",
"wxCondition",
"wxConnection",
"wxConnectionBase",
"wxConvAuto",
"wxCountingOutputStream",
"wxCriticalSection",
"wxCriticalSectionLocker",
"wxCSConv",
"wxDatagramSocket",
"wxDataInputStream",
"wxDataOutputStream",
"wxDir",
"wxDirTraverser",
"wxFFile",
"wxFFileInputStream",
"wxFFileOutputStream",
"wxFile",
"wxFileInputStream",
"wxFileName",
"wxFileOutputStream",
"wxFileStream",
"wxFilterClassFactory",
"wxFilterInputStream",
"wxFilterOutputStream",
"wxFSFile",
"wxFSVolume",
"wxFTP",
"wxHashMap",
"wxHashSet",
"wxHashTable",
"wxHTTP",
"wxImage::HSVValue",
"wxImage::RGBValue",
"wxInputStream",
"wxIPAddress",
"wxIPV4Address",
"wxList< T >",
"wxLongLong",
"wxMBConv",
"wxMBConvFile",
"wxMBConvUTF7",
"wxMBConvUTF8",
"wxMBConvUTF16",
"wxMBConvUTF32",
"wxMemoryBuffer",
"wxMemoryFSHandler",
"wxMemoryInputStream",
"wxMemoryOutputStream",
"wxMessageQueue< T >",
"wxModule",
"wxMutex",
"wxMutexLocker",
"wxNode< T >",
"wxObjectDataPtr< T >",
"wxObjectRefData",
"wxOutputStream",
"wxProcess",
"wxProcessEvent",
"wxProtocol",
"wxProtocolLog",
"wxRecursionGuard",
"wxRecursionGuardFlag",
"wxRegKey",
"wxScopedArray",
"wxScopedCharTypeBuffer",
"wxScopedPtr",
"wxScopedPtr< T >",
"wxSharedPtr< T >",
"wxServer",
"wxSockAddress",
"wxSocketBase",
"wxSocketClient",
"wxSocketEvent",
"wxSocketInputStream",
"wxSocketOutputStream",
"wxSortedArrayString",
"wxStopWatch",
"wxStreamBase",
"wxStreamBuffer",
"wxStreamToTextRedirector",
"wxString",
"wxStringBuffer",
"wxStringBufferLength",
"wxStringClientData",
"wxStringInputStream",
"wxStringOutputStream",
"wxTarClassFactory",
"wxTarEntry",
"wxTarInputStream",
"wxTarOutputStream",
"wxTCPClient",
"wxTCPConnection",
"wxTCPServer",
"wxTempFile",
"wxTempFileOutputStream",
"wxTextInputStream",
"wxTextOutputStream",
"wxThread",
"wxThreadEvent",
"wxThreadHelper",
"wxULongLong",
"wxUniChar",
"wxUniCharRef",
"wxURI",
"wxURL",
"wxUString",
"wxVariant",
"wxVariantData",
"wxVector< T >",
"wxVector< T >::reverse_iterator",
"wxWCharBuffer",
"wxWeakRef< T >",
"wxWeakRefDynamic< T >",
"wxZipInputStream",
"wxZipOutputStream",
"wxZlibInputStream",
"wxZlibOutputStream",
]
def get_first_value(alist):
if len(alist) > 0:
return alist[0]
else:
return ""
def make_enums(aclass):
retval = ""
for enum in aclass.enums:
retval += "enum %s {\n" % enum
num_values = len(aclass.enums[enum])
for value in aclass.enums[enum]:
retval += " %s" % value
if not value == aclass.enums[enum][-1]:
retval += ", "
retval += "\n"
retval += "};\n\n"
return retval
| lgpl-3.0 | -7,107,579,167,622,636,000 | 25.585366 | 100 | 0.532798 | false |
lesserwhirls/scipy-cwt | scipy/weave/examples/vtk_example.py | 12 | 3587 | """ A simple example to show how to use weave with VTK. This lets one
create VTK objects using the standard VTK-Python API (via 'import
vtk') and then accelerate any of the computations by inlining C++ code
inside Python.
Please note the use of the `inc_dirs` and the `lib_dirs` variables in
the call to weave.inline. Point these to where your VTK headers are
and where the shared libraries are.
For every VTK object encountered the corresponding VTK header is
automatically added to the C++ code generated. If you need to add
other headers specified like so::
headers=['"vtkHeader1.h"', '"vtkHeader2.h"']
in the keyword arguments to weave.inline. Similarly, by default,
vtkCommon is linked into the generated module. If you need to link to
any of the other vtk libraries add something like so::
libraries=['vtkHybrid', 'vtkFiltering']
in the keyword arguments to weave.inline. For example::
weave.inline(code, ['arr', 'v_arr'],
include_dirs = ['/usr/local/include/vtk'],
library_dirs = ['/usr/local/lib/vtk'],
headers=['"vtkHeader1.h"', '"vtkHeader2.h"'],
libraries=['vtkHybrid', 'vtkFiltering'])
This module has been tested to work with VTK-4.2 and VTK-4.4 under
Linux. YMMV on other platforms.
Author: Prabhu Ramachandran
Copyright (c) 2004, Prabhu Ramachandran
License: BSD Style.
"""
import scipy.weave as weave
import vtk
import numpy
import sys
import time
# Please change these to suit your needs. If not, this example will
# not compile.
inc_dirs = ['/usr/local/include/vtk', '/usr/include/vtk']
lib_dirs = ['/usr/local/lib/vtk', '/usr/lib/vtk']
def simple_test():
"""A simple example of how you can access the methods of a VTK
object created from Python in C++ using weave.inline.
"""
a = vtk.vtkStructuredPoints()
a.SetOrigin(1.0, 1.0, 1.0)
print "sys.getrefcount(a) = ", sys.getrefcount(a)
code=r"""
printf("a->ClassName() == %s\n", a->GetClassName());
printf("a->GetReferenceCount() == %d\n", a->GetReferenceCount());
double *origin = a->GetOrigin();
printf("Origin = %f, %f, %f\n", origin[0], origin[1], origin[2]);
"""
weave.inline(code, ['a'], include_dirs=inc_dirs, library_dirs=lib_dirs)
print "sys.getrefcount(a) = ", sys.getrefcount(a)
def array_test():
"""Tests if a large numpy array can be copied into a
vtkFloatArray rapidly by using weave.inline.
"""
# Create a large numpy array.
arr = numpy.arange(0, 10, 0.0001, 'f')
print "Number of elements in array = ", arr.shape[0]
# Copy it into a vtkFloatArray and time the process.
v_arr = vtk.vtkFloatArray()
ts = time.clock()
for i in range(arr.shape[0]):
v_arr.InsertNextValue(arr[i])
print "Time taken to do it in pure Python =", time.clock() - ts
# Now do the same thing using weave.inline
v_arr = vtk.vtkFloatArray()
code = """
int size = Narr[0];
for (int i=0; i<size; ++i)
v_arr->InsertNextValue(arr[i]);
"""
ts = time.clock()
# Note the use of the include_dirs and library_dirs.
weave.inline(code, ['arr', 'v_arr'], include_dirs=inc_dirs,
library_dirs=lib_dirs)
print "Time taken to do it using Weave =", time.clock() - ts
# Test the data to make certain that we have done it right.
print "Checking data."
for i in range(v_arr.GetNumberOfTuples()):
val = (v_arr.GetValue(i) -arr[i] )
assert (val < 1e-6), "i = %d, val= %f"%(i, val)
print "OK."
if __name__ == "__main__":
simple_test()
array_test()
| bsd-3-clause | -7,657,629,103,937,554,000 | 29.922414 | 75 | 0.648453 | false |
SUSE-Cloud/nova | nova/tests/api/ec2/test_ec2_validate.py | 11 | 10954 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudscaling, Inc.
# Author: Joe Gordon <[email protected]>
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests import cast_as_call
from nova.tests import fake_network
from nova.tests.image import fake
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class EC2ValidateTestCase(test.TestCase):
def setUp(self):
super(EC2ValidateTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver')
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.image_service = fake.FakeImageService()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.EC2_MALFORMED_IDS = ['foobar', '', 123]
self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
self.ec2_id_exception_map = [(x,
exception.InvalidInstanceIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
for x in self.EC2_VALID__IDS])
self.volume_id_exception_map = [(x,
exception.InvalidInstanceIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
for x in self.EC2_VALID__IDS])
def fake_show(meh, context, id):
return {'id': id,
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(self, context, **kwargs):
image = fake_show(self, context, None)
image['name'] = kwargs.get('name')
return [image]
fake.stub_out_image_service(self.stubs)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
super(EC2ValidateTestCase, self).tearDown()
fake.FakeImageService_reset()
#EC2_API tests (InvalidInstanceID.Malformed)
def test_console_output(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.get_console_output,
context=self.context,
instance_id=[ec2_id])
def test_describe_instance_attribute(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.describe_instance_attribute,
context=self.context,
instance_id=ec2_id,
attribute='kernel')
def test_instance_lifecycle(self):
lifecycle = [self.cloud.terminate_instances,
self.cloud.reboot_instances,
self.cloud.stop_instances,
self.cloud.start_instances,
]
for cmd in lifecycle:
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
cmd,
context=self.context,
instance_id=[ec2_id])
def test_create_image(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.create_image,
context=self.context,
instance_id=ec2_id)
def test_create_snapshot(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.create_snapshot,
context=self.context,
volume_id=ec2_id)
def test_describe_volumes(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.describe_volumes,
context=self.context,
volume_id=[ec2_id])
def test_delete_volume(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.delete_volume,
context=self.context,
volume_id=ec2_id)
def test_detach_volume(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.detach_volume,
context=self.context,
volume_id=ec2_id)
class EC2TimestampValidationTestCase(test.TestCase):
"""Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_old_format(self):
params = {'Timestamp': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_not_set(self):
params = {}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_ms_time_regex(self):
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
self.assertIsNone(result)
def test_validate_ec2_timestamp_aws_sdk_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_timestamp_invalid_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.000P'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_advanced_time(self):
#EC2 request with Timestamp in advanced time
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertFalse(expired)
def test_validate_ec2_timestamp_advanced_time_expired(self):
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_not_expired(self):
params = {'Timestamp': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
self.assertFalse(expired)
def test_validate_ec2_req_timestamp_expired(self):
params = {'Timestamp': '2011-04-22T12:00:00Z'}
compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(compare)
def test_validate_ec2_req_expired(self):
params = {'Expires': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_not_expired(self):
expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_Expires_timestamp_invalid_format(self):
#EC2 request with invalid Expires
params = {'Expires': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_Expires(self):
#EC2 request with both Timestamp and Expires
params = {'Timestamp': '2011-04-22T11:29:49Z',
'Expires': timeutils.isotime()}
self.assertRaises(exception.InvalidRequest,
ec2utils.is_ec2_timestamp_expired,
params)
| apache-2.0 | -8,973,099,646,914,058,000 | 39.57037 | 78 | 0.591108 | false |
xgin/letsencrypt | acme/acme/challenges.py | 9 | 19974 | """ACME Identifier Validation Challenges."""
import abc
import functools
import hashlib
import logging
import socket
from cryptography.hazmat.primitives import hashes
import OpenSSL
import requests
from acme import errors
from acme import crypto_util
from acme import fields
from acme import jose
from acme import other
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
@classmethod
def from_json(cls, jobj):
try:
return super(Challenge, cls).from_json(jobj)
except jose.UnrecognizedTypeError as error:
logger.debug(error)
return UnrecognizedChallenge.from_json(jobj)
class ContinuityChallenge(Challenge): # pylint: disable=abstract-method
"""Client validation challenges."""
class DVChallenge(Challenge): # pylint: disable=abstract-method
"""Domain validation challenges."""
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
resource_type = 'challenge'
resource = fields.Resource(resource_type)
class UnrecognizedChallenge(Challenge):
"""Unrecognized challenge.
ACME specification defines a generic framework for challenges and
defines some standard challenges that are implemented in this
module. However, other implementations (including peers) might
define additional challenge types, which should be ignored if
unrecognized.
:ivar jobj: Original JSON decoded object.
"""
def __init__(self, jobj):
super(UnrecognizedChallenge, self).__init__()
object.__setattr__(self, "jobj", jobj)
def to_partial_json(self):
# pylint: disable=no-member
return self.jobj
@classmethod
def from_json(cls, jobj):
return cls(jobj)
class _TokenDVChallenge(DVChallenge):
"""DV Challenge with token.
:ivar bytes token:
"""
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
# TODO: acme-spec doesn't specify token as base64-encoded value
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
# XXX: rename to ~token_good_for_url
@property
def good_token(self): # XXX: @token.decoder
"""Is `token` good?
.. todo:: acme-spec wants "It MUST NOT contain any non-ASCII
characters", but it should also warrant that it doesn't
contain ".." or "/"...
"""
# TODO: check that path combined with uri does not go above
# URI_ROOT_PATH!
return b'..' not in self.token and b'/' not in self.token
class KeyAuthorizationChallengeResponse(ChallengeResponse):
"""Response to Challenges based on Key Authorization.
:param unicode key_authorization:
"""
key_authorization = jose.Field("keyAuthorization")
thumbprint_hash_function = hashes.SHA256
def verify(self, chall, account_public_key):
"""Verify the key authorization.
:param KeyAuthorization chall: Challenge that corresponds to
this response.
:param JWK account_public_key:
:return: ``True`` iff verification of the key authorization was
successful.
:rtype: bool
"""
parts = self.key_authorization.split('.') # pylint: disable=no-member
if len(parts) != 2:
logger.debug("Key authorization (%r) is not well formed",
self.key_authorization)
return False
if parts[0] != chall.encode("token"):
logger.debug("Mismatching token in key authorization: "
"%r instead of %r", parts[0], chall.encode("token"))
return False
thumbprint = jose.b64encode(account_public_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
if parts[1] != thumbprint:
logger.debug("Mismatching thumbprint in key authorization: "
"%r instead of %r", parts[0], thumbprint)
return False
return True
class KeyAuthorizationChallenge(_TokenDVChallenge):
# pylint: disable=abstract-class-little-used,too-many-ancestors
"""Challenge based on Key Authorization.
:param response_cls: Subclass of `KeyAuthorizationChallengeResponse`
that will be used to generate `response`.
"""
__metaclass__ = abc.ABCMeta
response_cls = NotImplemented
thumbprint_hash_function = (
KeyAuthorizationChallengeResponse.thumbprint_hash_function)
def key_authorization(self, account_key):
"""Generate Key Authorization.
:param JWK account_key:
:rtype unicode:
"""
return self.encode("token") + "." + jose.b64encode(
account_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
def response(self, account_key):
"""Generate response to the challenge.
:param JWK account_key:
:returns: Response (initialized `response_cls`) to the challenge.
:rtype: KeyAuthorizationChallengeResponse
"""
return self.response_cls(
key_authorization=self.key_authorization(account_key))
@abc.abstractmethod
def validation(self, account_key, **kwargs):
"""Generate validation for the challenge.
Subclasses must implement this method, but they are likely to
return completely different data structures, depending on what's
necessary to complete the challenge. Interepretation of that
return value must be known to the caller.
:param JWK account_key:
:returns: Challenge-specific validation.
"""
raise NotImplementedError() # pragma: no cover
def response_and_validation(self, account_key, *args, **kwargs):
"""Generate response and validation.
Convenience function that return results of `response` and
`validation`.
:param JWK account_key:
:rtype: tuple
"""
return (self.response(account_key),
self.validation(account_key, *args, **kwargs))
@ChallengeResponse.register
class HTTP01Response(KeyAuthorizationChallengeResponse):
"""ACME http-01 challenge response."""
typ = "http-01"
PORT = 80
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
WHITESPACE_CUTSET = "\n\r\t "
"""Whitespace characters which should be ignored at the end of the body."""
def simple_verify(self, chall, domain, account_public_key, port=None):
"""Simple verify.
:param challenges.SimpleHTTP chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param account_public_key: Public key for the key pair
being authorized. If ``None`` key verification is not
performed!
:param JWK account_public_key:
:param int port: Port used in the validation.
:returns: ``True`` iff validation is successful, ``False``
otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
# TODO: ACME specification defines URI template that doesn't
# allow to use a custom port... Make sure port is not in the
# request URI, if it's standard.
if port is not None and port != self.PORT:
logger.warning(
"Using non-standard port for http-01 verification: %s", port)
domain += ":{0}".format(port)
uri = chall.uri(domain)
logger.debug("Verifying %s at %s...", chall.typ, uri)
try:
http_response = requests.get(uri)
except requests.exceptions.RequestException as error:
logger.error("Unable to reach %s: %s", uri, error)
return False
logger.debug("Received %s: %s. Headers: %s", http_response,
http_response.text, http_response.headers)
challenge_response = http_response.text.rstrip(self.WHITESPACE_CUTSET)
if self.key_authorization != challenge_response:
logger.debug("Key authorization from response (%r) doesn't match "
"HTTP response (%r)", self.key_authorization,
challenge_response)
return False
return True
@Challenge.register # pylint: disable=too-many-ancestors
class HTTP01(KeyAuthorizationChallenge):
"""ACME http-01 challenge."""
response_cls = HTTP01Response
typ = response_cls.typ
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
@property
def path(self):
"""Path (starting with '/') for provisioned resource.
:rtype: string
"""
return '/' + self.URI_ROOT_PATH + '/' + self.encode('token')
def uri(self, domain):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param unicode domain: Domain name being verified.
:rtype: string
"""
return "http://" + domain + self.path
def validation(self, account_key, **unused_kwargs):
"""Generate validation.
:param JWK account_key:
:rtype: unicode
"""
return self.key_authorization(account_key)
@ChallengeResponse.register
class TLSSNI01Response(KeyAuthorizationChallengeResponse):
"""ACME tls-sni-01 challenge response."""
typ = "tls-sni-01"
DOMAIN_SUFFIX = b".acme.invalid"
"""Domain name suffix."""
PORT = 443
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
@property
def z(self):
"""``z`` value used for verification.
:rtype bytes:
"""
return hashlib.sha256(
self.key_authorization.encode("utf-8")).hexdigest().lower().encode()
@property
def z_domain(self):
"""Domain name used for verification, generated from `z`.
:rtype bytes:
"""
return self.z[:32] + b'.' + self.z[32:] + self.DOMAIN_SUFFIX
def gen_cert(self, key=None, bits=2048):
"""Generate tls-sni-01 certificate.
:param OpenSSL.crypto.PKey key: Optional private key used in
certificate generation. If not provided (``None``), then
fresh key will be generated.
:param int bits: Number of bits for newly generated key.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
if key is None:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return crypto_util.gen_ss_cert(key, [
# z_domain is too big to fit into CN, hence first dummy domain
'dummy', self.z_domain.decode()], force_san=True), key
def probe_cert(self, domain, **kwargs):
"""Probe tls-sni-01 challenge certificate.
:param unicode domain:
"""
# TODO: domain is not necessary if host is provided
if "host" not in kwargs:
host = socket.gethostbyname(domain)
logging.debug('%s resolved to %s', domain, host)
kwargs["host"] = host
kwargs.setdefault("port", self.PORT)
kwargs["name"] = self.z_domain
# TODO: try different methods?
# pylint: disable=protected-access
return crypto_util.probe_sni(**kwargs)
def verify_cert(self, cert):
"""Verify tls-sni-01 challenge certificate."""
# pylint: disable=protected-access
sans = crypto_util._pyopenssl_cert_or_req_san(cert)
logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans)
return self.z_domain.decode() in sans
def simple_verify(self, chall, domain, account_public_key,
cert=None, **kwargs):
"""Simple verify.
Verify ``validation`` using ``account_public_key``, optionally
probe tls-sni-01 certificate and check using `verify_cert`.
:param .challenges.TLSSNI01 chall: Corresponding challenge.
:param str domain: Domain name being validated.
:param JWK account_public_key:
:param OpenSSL.crypto.X509 cert: Optional certificate. If not
provided (``None``) certificate will be retrieved using
`probe_cert`.
:param int port: Port used to probe the certificate.
:returns: ``True`` iff client's control of the domain has been
verified, ``False`` otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
if cert is None:
try:
cert = self.probe_cert(domain=domain, **kwargs)
except errors.Error as error:
logger.debug(error, exc_info=True)
return False
return self.verify_cert(cert)
@Challenge.register # pylint: disable=too-many-ancestors
class TLSSNI01(KeyAuthorizationChallenge):
"""ACME tls-sni-01 challenge."""
response_cls = TLSSNI01Response
typ = response_cls.typ
# boulder#962, ietf-wg-acme#22
#n = jose.Field("n", encoder=int, decoder=int)
def validation(self, account_key, **kwargs):
"""Generate validation.
:param JWK account_key:
:param OpenSSL.crypto.PKey cert_key: Optional private key used
in certificate generation. If not provided (``None``), then
fresh key will be generated.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
return self.response(account_key).gen_cert(key=kwargs.get('cert_key'))
@Challenge.register
class RecoveryContact(ContinuityChallenge):
"""ACME "recoveryContact" challenge.
:ivar unicode activation_url:
:ivar unicode success_url:
:ivar unicode contact:
"""
typ = "recoveryContact"
activation_url = jose.Field("activationURL", omitempty=True)
success_url = jose.Field("successURL", omitempty=True)
contact = jose.Field("contact", omitempty=True)
@ChallengeResponse.register
class RecoveryContactResponse(ChallengeResponse):
"""ACME "recoveryContact" challenge response.
:ivar unicode token:
"""
typ = "recoveryContact"
token = jose.Field("token", omitempty=True)
@Challenge.register
class ProofOfPossession(ContinuityChallenge):
"""ACME "proofOfPossession" challenge.
:ivar .JWAAlgorithm alg:
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar hints: Various clues for the client (:class:`Hints`).
"""
typ = "proofOfPossession"
NONCE_SIZE = 16
class Hints(jose.JSONObjectWithFields):
"""Hints for "proofOfPossession" challenge.
:ivar JWK jwk: JSON Web Key
:ivar tuple cert_fingerprints: `tuple` of `unicode`
:ivar tuple certs: Sequence of :class:`acme.jose.ComparableX509`
certificates.
:ivar tuple subject_key_identifiers: `tuple` of `unicode`
:ivar tuple issuers: `tuple` of `unicode`
:ivar tuple authorized_for: `tuple` of `unicode`
"""
jwk = jose.Field("jwk", decoder=jose.JWK.from_json)
cert_fingerprints = jose.Field(
"certFingerprints", omitempty=True, default=())
certs = jose.Field("certs", omitempty=True, default=())
subject_key_identifiers = jose.Field(
"subjectKeyIdentifiers", omitempty=True, default=())
serial_numbers = jose.Field("serialNumbers", omitempty=True, default=())
issuers = jose.Field("issuers", omitempty=True, default=())
authorized_for = jose.Field("authorizedFor", omitempty=True, default=())
@certs.encoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.encode_cert(cert) for cert in value)
@certs.decoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.decode_cert(cert) for cert in value)
alg = jose.Field("alg", decoder=jose.JWASignature.from_json)
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
hints = jose.Field("hints", decoder=Hints.from_json)
@ChallengeResponse.register
class ProofOfPossessionResponse(ChallengeResponse):
"""ACME "proofOfPossession" challenge response.
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar acme.other.Signature signature: Sugnature of this message.
"""
typ = "proofOfPossession"
NONCE_SIZE = ProofOfPossession.NONCE_SIZE
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
signature = jose.Field("signature", decoder=other.Signature.from_json)
def verify(self):
"""Verify the challenge."""
# self.signature is not Field | pylint: disable=no-member
return self.signature.verify(self.nonce)
@Challenge.register # pylint: disable=too-many-ancestors
class DNS(_TokenDVChallenge):
"""ACME "dns" challenge."""
typ = "dns"
LABEL = "_acme-challenge"
"""Label clients prepend to the domain name being validated."""
def gen_validation(self, account_key, alg=jose.RS256, **kwargs):
"""Generate validation.
:param .JWK account_key: Private account key.
:param .JWA alg:
:returns: This challenge wrapped in `.JWS`
:rtype: .JWS
"""
return jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs)
def check_validation(self, validation, account_public_key):
"""Check validation.
:param JWS validation:
:param JWK account_public_key:
:rtype: bool
"""
if not validation.verify(key=account_public_key):
return False
try:
return self == self.json_loads(
validation.payload.decode('utf-8'))
except jose.DeserializationError as error:
logger.debug("Checking validation for DNS failed: %s", error)
return False
def gen_response(self, account_key, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:param .JWA alg:
:rtype: DNSResponse
"""
return DNSResponse(validation=self.gen_validation(
self, account_key, **kwargs))
def validation_domain_name(self, name):
"""Domain name for TXT validation record.
:param unicode name: Domain name being validated.
"""
return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response.
:param JWS validation:
"""
typ = "dns"
validation = jose.Field("validation", decoder=jose.JWS.from_json)
def check_validation(self, chall, account_public_key):
"""Check validation.
:param challenges.DNS chall:
:param JWK account_public_key:
:rtype: bool
"""
return chall.check_validation(self.validation, account_public_key)
| apache-2.0 | 8,573,538,049,007,753,000 | 30.30721 | 80 | 0.631871 | false |
maas/maas | src/maasserver/websockets/handlers/__init__.py | 1 | 3033 | # Copyright 2015-2020 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Handlers for the WebSocket connections."""
# Note: please keep this array in a consistent order with the imports below,
# so that it's easy to sanity-check.
__all__ = [
"BootResourceHandler",
"ConfigHandler",
"ControllerHandler",
"DHCPSnippetHandler",
"DeviceHandler",
"DiscoveryHandler",
"DomainHandler",
"EventHandler",
"FabricHandler",
"GeneralHandler",
"IPRangeHandler",
"IPRangeHandler",
"MachineHandler",
"NodeDeviceHandler",
"NodeResultHandler",
"NotificationHandler",
"PackageRepositoryHandler",
"PodHandler",
"ResourcePoolHandler",
"SSHKeyHandler",
"SSLKeyHandler",
"ScriptHandler",
"ServiceHandler",
"SpaceHandler",
"StaticRouteHandler",
"SubnetHandler",
"TagHandler",
"TokenHandler",
"UserHandler",
"VLANHandler",
"ZoneHandler",
]
from maasserver.websockets.handlers.bootresource import BootResourceHandler
from maasserver.websockets.handlers.config import ConfigHandler
from maasserver.websockets.handlers.controller import ControllerHandler
from maasserver.websockets.handlers.device import DeviceHandler
from maasserver.websockets.handlers.dhcpsnippet import DHCPSnippetHandler
from maasserver.websockets.handlers.discovery import DiscoveryHandler
from maasserver.websockets.handlers.domain import DomainHandler
from maasserver.websockets.handlers.event import EventHandler
from maasserver.websockets.handlers.fabric import FabricHandler
from maasserver.websockets.handlers.general import GeneralHandler
from maasserver.websockets.handlers.iprange import IPRangeHandler
from maasserver.websockets.handlers.machine import MachineHandler
from maasserver.websockets.handlers.node_device import NodeDeviceHandler
from maasserver.websockets.handlers.node_result import NodeResultHandler
from maasserver.websockets.handlers.notification import NotificationHandler
from maasserver.websockets.handlers.packagerepository import (
PackageRepositoryHandler,
)
from maasserver.websockets.handlers.pod import PodHandler
from maasserver.websockets.handlers.resourcepool import ResourcePoolHandler
from maasserver.websockets.handlers.script import ScriptHandler
from maasserver.websockets.handlers.service import ServiceHandler
from maasserver.websockets.handlers.space import SpaceHandler
from maasserver.websockets.handlers.sshkey import SSHKeyHandler
from maasserver.websockets.handlers.sslkey import SSLKeyHandler
from maasserver.websockets.handlers.staticroute import StaticRouteHandler
from maasserver.websockets.handlers.subnet import SubnetHandler
from maasserver.websockets.handlers.tag import TagHandler
from maasserver.websockets.handlers.token import TokenHandler
from maasserver.websockets.handlers.user import UserHandler
from maasserver.websockets.handlers.vlan import VLANHandler
from maasserver.websockets.handlers.zone import ZoneHandler
| agpl-3.0 | 1,806,827,872,331,416,600 | 40.547945 | 76 | 0.817343 | false |
kalvdans/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py | 47 | 6803 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sign, arange
from .go_benchmark import Benchmark
class Zacharov(Benchmark):
r"""
Zacharov objective function.
This class defines the Zacharov [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left ( \frac{1}{2}
\sum_{i=1}^{n} i x_i \right )^2
+ \left ( \frac{1}{2} \sum_{i=1}^{n} i x_i
\right )^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-1, 1], [-1, 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(arange(1, self.N + 1) * x)
return u + (0.5 * v) ** 2 + (0.5 * v) ** 4
class ZeroSum(Benchmark):
r"""
ZeroSum objective function.
This class defines the ZeroSum [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ZeroSum}}(x) = \begin{cases}
0 & \textrm{if} \sum_{i=1}^n x_i = 0 \\
1 + \left(10000 \left |\sum_{i=1}^n x_i\right|
\right)^{0.5} & \textrm{otherwise}
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` where :math:`\sum_{i=1}^n x_i = 0`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
if abs(sum(x)) < 3e-16:
return 0.0
return 1.0 + (10000.0 * abs(sum(x))) ** 0.5
class Zettl(Benchmark):
r"""
Zettl objective function.
This class defines the Zettl [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Zettl}}(x) = \frac{1}{4} x_{1} + \left(x_{1}^{2} - 2 x_{1}
+ x_{2}^{2}\right)^{2}
with :math:`x_i \in [-1, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.0037912` for :math:`x = [-0.029896, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.02989597760285287, 0.0]]
self.fglob = -0.003791237220468656
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]
class Zimmerman(Benchmark):
r"""
Zimmerman objective function.
This class defines the Zimmerman [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zimmerman}}(x) = \max \left[Zh1(x), Zp(Zh2(x))
\textrm{sgn}(Zh2(x)), Zp(Zh3(x))
\textrm{sgn}(Zh3(x)),
Zp(-x_1)\textrm{sgn}(x_1),
Zp(-x_2)\textrm{sgn}(x_2) \right]
Where, in this exercise:
.. math::
\begin{cases}
Zh1(x) = 9 - x_1 - x_2 \\
Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \\
Zh3(x) = x_1x_2 - 14 \\
Zp(t) = 100(1 + t)
\end{cases}
Where :math:`x` is a vector and :math:`t` is a scalar.
Here, :math:`x_i \in [0, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [7, 2]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO implementation from Gavana
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))
self.custom_bounds = ([0.0, 8.0], [0.0, 8.0])
self.global_optimum = [[7.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
Zh1 = lambda x: 9.0 - x[0] - x[1]
Zh2 = lambda x: (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
Zh3 = lambda x: x[0] * x[1] - 14.0
Zp = lambda x: 100.0 * (1.0 + x)
return max(Zh1(x),
Zp(Zh2(x)) * sign(Zh2(x)),
Zp(Zh3(x)) * sign(Zh3(x)),
Zp(-x[0]) * sign(x[0]),
Zp(-x[1]) * sign(x[1]))
class Zirilli(Benchmark):
r"""
Zettl objective function.
This class defines the Zirilli [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.3523` for :math:`x = [-1.0465, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[-1.0465, 0.0]]
self.fglob = -0.35238603
def fun(self, x, *args):
self.nfev += 1
return 0.25 * x[0] ** 4 - 0.5 * x[0] ** 2 + 0.1 * x[0] + 0.5 * x[1] ** 2
| bsd-3-clause | 1,862,371,237,927,457,000 | 28.837719 | 80 | 0.51345 | false |
r-o-b-b-i-e/pootle | pootle/apps/accounts/managers.py | 1 | 3833 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.contrib.auth.models import BaseUserManager
from django.db.models import Q
from django.utils import timezone
from django.utils.lru_cache import lru_cache
from pootle_app.models.permissions import check_user_permission
from pootle_translationproject.models import TranslationProject
from . import utils
__all__ = ('UserManager', )
class UserManager(BaseUserManager):
"""Pootle User manager.
This manager hides the 'nobody' and 'default' users for normal
queries, since they are special users. Code that needs access to these
users should use the methods get_default_user and get_nobody_user.
"""
PERMISSION_USERS = ('default', 'nobody')
META_USERS = ('default', 'nobody', 'system')
def _create_user(self, username, email, password, is_superuser,
**extra_fields):
"""Creates and saves a User with the given username, email,
password and superuser status.
Adapted from the core ``auth.User`` model's ``UserManager``: we
have no use for the ``is_staff`` field.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
utils.validate_email_unique(email)
user = self.model(username=username, email=email,
is_active=True, is_superuser=is_superuser,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True,
**extra_fields)
@lru_cache()
def get_default_user(self):
return self.get_queryset().get(username='default')
@lru_cache()
def get_nobody_user(self):
return self.get_queryset().get(username='nobody')
@lru_cache()
def get_system_user(self):
return self.get_queryset().get(username='system')
def hide_permission_users(self):
return self.get_queryset().exclude(username__in=self.PERMISSION_USERS)
def hide_meta(self):
return self.get_queryset().exclude(username__in=self.META_USERS)
def meta_users(self):
return self.get_queryset().filter(username__in=self.META_USERS)
def get_users_with_permission(self, permission_code, project, language,
tp=None):
default = self.get_default_user()
tp = (
tp
or TranslationProject.objects.get(
project=project,
language=language))
directory = tp.directory
if check_user_permission(default, permission_code, directory):
return self.hide_meta().filter(is_active=True)
user_filter = Q(
permissionset__positive_permissions__codename=permission_code
)
user_filter &= (
Q(permissionset__directory__pootle_path=directory.pootle_path)
| Q(permissionset__directory__pootle_path=language.pootle_path)
| Q(permissionset__directory__pootle_path=project.pootle_path)
)
user_filter |= Q(is_superuser=True)
return self.get_queryset().filter(user_filter).distinct()
| gpl-3.0 | -9,179,225,736,352,510,000 | 33.531532 | 79 | 0.634751 | false |
run2/citytour | 4symantec/Lib/site-packages/numpy-1.9.2-py2.7-win-amd64.egg/numpy/core/fromnumeric.py | 23 | 90688 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the elements
into the reshaped array using this index order. 'C' means to
read / write the elements using C-like index order, with the last axis index
changing fastest, back to the first axis index changing slowest. 'F'
means to read / write the elements using Fortran-like index order, with
the first index changing fastest, and the last index changing slowest.
Note that the 'C' and 'F' options take no account of the memory layout
of the underlying array, and only refer to the order of indexing. 'A'
means to read / write the elements in Fortran-like index order if `a` is
Fortran *contiguous* in memory, C-like order otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying the
# initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array. For example,
let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : {int, array of ints}
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
If `a` is an ndarray, then a view of `a` is returned; otherwise
a new array is created.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a way that
the value of the element in kth position is in the position it would be in
a sorted array. All elements smaller than the kth element are moved before
this element and all equal or greater are moved behind it. The ordering of
the elements in the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The kth value of the element will be in
its final sorted position and all smaller elements will be moved before
it and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative order. The
available algorithms have the following properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently, partitioning
along the last axis is faster and uses less space than partitioning
along any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the algorithm
specified by the `kind` keyword. It returns an array of indices of the
same shape as `a` that index data along the given axis in partitioned
order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The kth element will be in its final
sorted position and all smaller elements will be moved before it and
all larger elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all of them into
their sorted position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
try:
argpartition = a.argpartition
except AttributeError:
return _wrapit(a, 'argpartition',kth, axis, kind, order)
return argpartition(kth, axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
"""
Indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
"""
Return the indices of the minimum values along an axis.
See Also
--------
argmax : Similar function. Please refer to `numpy.argmax` for detailed
documentation.
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
.. versionadded:: 1.7.0
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side, sorter)
return searchsorted(v, side, sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=(2,)).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
In NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In NumPy 1.10, it will return a read/write view, Writing to the returned
array will alter your original array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead of
just ``np.diagonal(a)``. This will work with both past and future versions
of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, a 1-D array containing the diagonal is returned.
If the dimension of `a` is larger, then an array of diagonals is
returned, "packed" from left-most dimension to right-most (e.g.,
if `a` is 3-D, then the diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""
Return a flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index changing
fastest, back to the first axis index changing slowest. 'F' means to
index the elements in Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that the 'C'
and 'F' options take no account of the memory layout of the underlying
array, and only refer to the order of axis indexing. 'A' means to read
the elements in Fortran-like index order if `a` is Fortran *contiguous*
in memory, C-like order otherwise. 'K' means to read the elements in
the order they occur in memory, except for reversing the data when
strides are negative. By default, 'C' index order is used.
Returns
-------
1d_array : ndarray
Output of the same dtype as `a`, and of shape ``(a.size,)``.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
Notes
-----
In C-like (row-major) order, in two dimensions, the row index varies the
slowest, and the column index the quickest. This can be generalized to
multiple dimensions, where row-major order implies that the index along the
first axis varies slowest, and the index along the last quickest. The
opposite holds for Fortran-like, or column-major, index ordering.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print np.ravel(x)
[1 2 3 4 5 6]
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print np.ravel(x.T)
[1 4 2 5 3 6]
>>> print np.ravel(x.T, order='A')
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return asarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`, containing
the indices of the non-zero elements in that dimension. The
corresponding non-zero values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed.
The default (`axis` = `None`) is perform a sum over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a sum is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
elif type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
# NOTE: Dropping the keepdims parameters here...
return sum(axis=axis, dtype=dtype, out=out)
else:
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def product (a, axis=None, dtype=None, out=None, keepdims=False):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def sometrue(a, axis=None, out=None, keepdims=False):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
try:
return arr.any(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.any(axis=axis, out=out)
def alltrue (a, axis=None, out=None, keepdims=False):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
try:
return arr.all(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.all(axis=axis, out=out)
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
try:
return arr.any(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.any(axis=axis, out=out)
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
try:
return arr.all(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.all(axis=axis, out=out)
def cumsum (a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th order discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
return _methods._amax(a, axis=axis,
out=out, keepdims=keepdims)
# NOTE: Dropping the keepdims parameter
return amax(axis=axis, out=out)
else:
return _methods._amax(a, axis=axis,
out=out, keepdims=keepdims)
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
return _methods._amin(a, axis=axis,
out=out, keepdims=keepdims)
# NOTE: Dropping the keepdims parameter
return amin(axis=axis, out=out)
else:
return _methods._amin(a, axis=axis,
out=out, keepdims=keepdims)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed.
The default (`axis` = `None`) is perform a product over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a product is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
return prod(axis=axis, dtype=dtype, out=out)
else:
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.546875
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
if type(a) is not mu.ndarray:
try:
mean = a.mean
return mean(axis=axis, dtype=dtype, out=out)
except AttributeError:
pass
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.std(a)
0.45172946707416706
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925552653
"""
if type(a) is not mu.ndarray:
try:
std = a.std
return std(axis=axis, dtype=dtype, out=out, ddof=ddof)
except AttributeError:
pass
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
def var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.var(a)
0.20405951142311096
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932997387
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.20250000000000001
"""
if type(a) is not mu.ndarray:
try:
var = a.var
return var(axis=axis, dtype=dtype, out=out, ddof=ddof)
except AttributeError:
pass
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
| mit | -4,679,614,008,861,307,000 | 29.867257 | 84 | 0.589461 | false |
ecino/compassion-switzerland | partner_communication_switzerland/wizards/sub_sponsorship_wizard.py | 2 | 3563 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2017 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class SubSponsorshipWizard(models.TransientModel):
_inherit = "sds.subsponsorship.wizard"
@api.multi
def create_subsponsorship(self):
res = super(SubSponsorshipWizard, self).create_subsponsorship()
if self.child_id:
# In this case the sponsorship is already made
# we generate the departure letter.
sponsorship_id = self.env.context.get('active_id')
sponsorship = self.env['recurring.contract'].browse(sponsorship_id)
res = self.send_sub_communication(sponsorship) or res
return res
@api.multi
def no_sub(self):
""" No SUB for the sponsorship. """
res = super(SubSponsorshipWizard, self).no_sub()
sponsorship_id = self.env.context.get('active_id')
contract = self.env['recurring.contract'].browse(sponsorship_id)
res = self.send_sub_communication(contract) or res
return res
@api.model
def send_sub_communication(self, sponsorship):
"""
Selects and send the correct communication after making sub sponsorship
:param sponsorship: recurring.contract record
:return: Action for opening generated communication or False if no
communication was generated
"""
config = False
res = False
if sponsorship.state != 'active':
# Make sure new child has all info
sponsorship.sub_sponsorship_id.child_id.with_context(
async_mode=False).get_infos()
# Generate depart letter
child = sponsorship.child_id
lifecycle = child.lifecycle_ids and child.lifecycle_ids[0]
lifecycle_type = lifecycle and lifecycle.type or 'Unplanned Exit'
if lifecycle_type == 'Planned Exit':
config = self.env.ref(
'partner_communication_switzerland.'
'lifecycle_child_planned_exit'
)
elif lifecycle_type == 'Unplanned Exit':
config = self.env.ref(
'partner_communication_switzerland.'
'lifecycle_child_unplanned_exit'
)
if lifecycle and lifecycle.request_reason == 'deceased':
sponsorship = sponsorship.with_context(
default_need_call=True)
else:
# In case of No SUB, the contract can still be active.
# Contract is active -> generate no sub confirmation
config = self.env.ref(
'partner_communication_switzerland.planned_no_sub')
if config:
communications = sponsorship.send_communication(config)
res = {
'name': communications[0].subject,
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'partner.communication.job',
'domain': [('id', 'in', communications.ids)],
'target': 'current',
'context': self.env.context
}
return res
| agpl-3.0 | 7,113,222,375,823,380,000 | 40.430233 | 79 | 0.552063 | false |
MakeHer/edx-platform | lms/djangoapps/courseware/tests/test_video_mongo.py | 11 | 53200 | # -*- coding: utf-8 -*-
"""Video xmodule tests in mongo."""
import ddt
import itertools
import json
from collections import OrderedDict
from path import Path as path
from lxml import etree
from mock import patch, MagicMock, Mock
from nose.plugins.attrib import attr
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from xmodule.video_module import VideoDescriptor, bumper_utils, video_utils, rewrite_video_url
from xmodule.x_module import STUDENT_VIEW
from xmodule.tests.test_video import VideoDescriptorTestBase, instantiate_descriptor
from xmodule.tests.test_import import DummySystem
from xmodule.video_module.transcripts_utils import save_to_store, Transcript
from xmodule.modulestore.inheritance import own_metadata
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
)
from edxval.api import (
create_profile, create_video, get_video_info, ValCannotCreateError, ValVideoNotFoundError
)
from . import BaseTestXmodule
from .test_video_xml import SOURCE_XML
from .test_video_handlers import TestVideo
@attr('shard_1')
class TestVideoYouTube(TestVideo):
METADATA = {}
def test_video_constructor(self):
"""Make sure that all parameters extracted correctly from xml"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr('shard_1')
class TestVideoNonYouTube(TestVideo):
"""Integration tests: web client + mongo."""
DATA = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA,
}
METADATA = {}
def test_video_constructor(self):
"""Make sure that if the 'youtube' attribute is omitted in XML, then
the template generates an empty string for the YouTube streams.
"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "1.00:3_yD_cEKoCk",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr('shard_1')
class TestGetHtmlMethod(BaseTestXmodule):
'''
Make sure that `get_html` works correctly.
'''
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestGetHtmlMethod, self).setUp()
self.setup_course()
self.default_metadata_dict = OrderedDict({
"saveStateUrl": "",
"autoplay": settings.FEATURES.get('AUTOPLAY_VIDEOS', True),
"streams": "1.00:3_yD_cEKoCk",
"sub": "a_sub_file.srt.sjson",
"sources": '[]',
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})
def test_get_html_track(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="{sub}" download_track="{download_track}"
start_time="01:00:03" end_time="01:00:10" download_video="true"
>
<source src="example.mp4"/>
<source src="example.webm"/>
{track}
{transcripts}
</video>
"""
cases = [
{
'download_track': u'true',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'http://www.example.com/track',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'false',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '<transcript language="uk" src="ukrainian.srt" />',
},
]
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': '',
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
metadata = self.default_metadata_dict
metadata['sources'] = sources
DATA = SOURCE_XML.format(
download_track=data['download_track'],
track=data['track'],
sub=data['sub'],
transcripts=data['transcripts'],
)
self.initialize_module(data=DATA)
track_url = self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'download'
).rstrip('/?')
context = self.item_descriptor.render(STUDENT_VIEW).content
metadata.update({
'transcriptLanguages': {"en": "English"} if not data['transcripts'] else {"uk": u'Українська'},
'transcriptLanguage': u'en' if not data['transcripts'] or data.get('sub') else u'uk',
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sub': data['sub'],
})
expected_context.update({
'transcript_download_format': (
None if self.item_descriptor.track and self.item_descriptor.download_track else 'srt'
),
'track': (
track_url if data['expected_track_url'] == u'a_sub_file.srt.sjson' else data['expected_track_url']
),
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(metadata)
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
def test_get_html_source(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
cases = [
# self.download_video == True
{
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': [],
'result': {},
},
# self.download_video == False
{
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'sources': [u'example.mp4', u'example.webm'],
},
},
]
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_non_existent_edx_video_id(self):
"""
Tests the VideoModule get_html where a edx_video_id is given but a video is not found
"""
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
no_video_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "meow",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
}
}
DATA = SOURCE_XML.format(
download_video=no_video_data['download_video'],
source=no_video_data['source'],
sources=no_video_data['sources'],
edx_video_id=no_video_data['edx_video_id']
)
self.initialize_module(data=DATA)
# Referencing a non-existent VAL ID in courseware won't cause an error --
# it'll just fall back to the values in the VideoDescriptor.
self.assertIn("example_source.mp4", self.item_descriptor.render(STUDENT_VIEW).content)
@patch('edxval.api.get_video_info')
def test_get_html_with_mocked_edx_video_id(self, mock_get_video_info):
mock_get_video_info.return_value = {
'url': '/edxval/video/example',
'edx_video_id': u'example',
'duration': 111.0,
'client_video_id': u'The example video',
'encoded_videos': [
{
'url': u'http://www.meowmix.com',
'file_size': 25556,
'bitrate': 9600,
'profile': u'desktop_mp4'
}
]
}
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
# test with download_video set to false and make sure download_video_link is not set (is None)
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "mock item",
'result': {
'download_video_link': None,
# make sure the desktop_mp4 url is included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm', u'http://www.meowmix.com'],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['autoplay'] = False
metadata['sources'] = ""
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_existing_edx_video_id(self):
# create test profiles and their encodings
encoded_videos = []
for profile, extension in [("desktop_webm", "webm"), ("desktop_mp4", "mp4")]:
create_profile(profile)
encoded_videos.append(
dict(
url=u"http://fake-video.edx.org/thundercats.{}".format(extension),
file_size=9000,
bitrate=42,
profile=profile,
)
)
result = create_video(
dict(
client_video_id="Thunder Cats",
duration=111,
edx_video_id="thundercats",
status='test',
encoded_videos=encoded_videos
)
)
self.assertEqual(result, "thundercats")
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "thundercats",
'result': {
'download_video_link': u'http://fake-video.edx.org/thundercats.mp4',
# make sure the urls for the various encodings are included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm'] +
[video['url'] for video in encoded_videos],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['sources'] = ""
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata,
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
# pylint: disable=invalid-name
@patch('xmodule.video_module.video_module.BrandingInfoConfig')
@patch('xmodule.video_module.video_module.rewrite_video_url')
def test_get_html_cdn_source(self, mocked_get_video, mock_BrandingInfoConfig):
"""
Test if sources got from CDN
"""
mock_BrandingInfoConfig.get_config.return_value = {
"CN": {
'url': 'http://www.xuetangx.com',
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com'
}
}
def side_effect(*args, **kwargs):
cdn = {
'http://example.com/example.mp4': 'http://cdn-example.com/example.mp4',
'http://example.com/example.webm': 'http://cdn-example.com/example.webm',
}
return cdn.get(args[1])
mocked_get_video.side_effect = side_effect
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
edx_video_id="{edx_video_id}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
case_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="http://example.com/example.mp4"/>
<source src="http://example.com/example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [
u'http://cdn-example.com/example.mp4',
u'http://cdn-example.com/example.webm'
],
},
}
# test with and without edx_video_id specified.
cases = [
dict(case_data, edx_video_id=""),
dict(case_data, edx_video_id="vid-v1:12345"),
]
initial_context = {
'branding_info': {
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com',
'url': 'http://www.xuetangx.com'
},
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': None,
'handout': None,
'id': None,
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id'],
)
self.initialize_module(data=DATA)
self.item_descriptor.xmodule_runtime.user_location = 'CN'
context = self.item_descriptor.render('student_view').content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
@attr('shard_1')
class TestVideoCDNRewriting(BaseTestXmodule):
"""
Tests for Video CDN.
"""
def setUp(self, *args, **kwargs):
super(TestVideoCDNRewriting, self).setUp(*args, **kwargs)
self.original_video_file = "original_video.mp4"
self.original_video_url = "http://www.originalvideo.com/" + self.original_video_file
@patch.dict("django.conf.settings.CDN_VIDEO_URLS",
{"CN": "https://chinacdn.cn/"})
def test_rewrite_video_url_success(self):
"""
Test successful CDN request.
"""
cdn_response_video_url = settings.CDN_VIDEO_URLS["CN"] + self.original_video_file
self.assertEqual(
rewrite_video_url(settings.CDN_VIDEO_URLS["CN"], self.original_video_url),
cdn_response_video_url
)
@patch.dict("django.conf.settings.CDN_VIDEO_URLS",
{"CN": "https://chinacdn.cn/"})
def test_rewrite_url_concat(self):
"""
Test that written URLs are returned clean despite input
"""
cdn_response_video_url = settings.CDN_VIDEO_URLS["CN"] + "original_video.mp4"
self.assertEqual(
rewrite_video_url(settings.CDN_VIDEO_URLS["CN"] + "///", self.original_video_url),
cdn_response_video_url
)
def test_rewrite_video_url_invalid_url(self):
"""
Test if no alternative video in CDN exists.
"""
invalid_cdn_url = 'http://http://fakecdn.com/'
self.assertIsNone(rewrite_video_url(invalid_cdn_url, self.original_video_url))
def test_none_args(self):
"""
Ensure None args return None
"""
self.assertIsNone(rewrite_video_url(None, None))
def test_emptystring_args(self):
"""
Ensure emptyrstring args return None
"""
self.assertIsNone(rewrite_video_url("", ""))
@attr('shard_1')
class TestVideoDescriptorInitialization(BaseTestXmodule):
"""
Make sure that module initialization works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestVideoDescriptorInitialization, self).setUp()
self.setup_course()
def test_source_not_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertEqual(self.item_descriptor.source, 'http://example.org/video.mp4')
self.assertTrue(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
def test_source_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://example.org/video.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertTrue(self.item_descriptor.download_video)
self.assertFalse(self.item_descriptor.source_visible)
def test_download_video_is_explicitly_set(self):
metadata = {
'track': u'http://some_track.srt',
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
'download_video': False,
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertIn('download_video', fields)
self.assertFalse(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
self.assertTrue(self.item_descriptor.download_track)
def test_source_is_empty(self):
metadata = {
'source': '',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertFalse(self.item_descriptor.download_video)
@attr('shard_1')
@ddt.ddt
class TestEditorSavedMethod(BaseTestXmodule):
"""
Make sure that `editor_saved` method works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestEditorSavedMethod, self).setUp()
self.setup_course()
self.metadata = {
'source': 'http://youtu.be/3_yD_cEKoCk',
'html5_sources': ['http://example.org/video.mp4'],
}
# path to subs_3_yD_cEKoCk.srt.sjson file
self.file_name = 'subs_3_yD_cEKoCk.srt.sjson'
# pylint: disable=no-value-for-parameter
self.test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname().dirname()
self.file_path = self.test_dir + '/common/test/data/uploads/' + self.file_name
@ddt.data(TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE)
def test_editor_saved_when_html5_sub_not_exist(self, default_store):
"""
When there is youtube_sub exist but no html5_sub present for
html5_sources, editor_saved function will generate new html5_sub
for video.
"""
self.MODULESTORE = default_store # pylint: disable=invalid-name
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "r") as myfile:
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_video.srt.sjson does not exist before calling editor_saved function
with self.assertRaises(NotFoundError):
Transcript.get_asset(item.location, 'subs_video.srt.sjson')
old_metadata = own_metadata(item)
# calling editor_saved will generate new file subs_video.srt.sjson for html5_sources
item.editor_saved(self.user, old_metadata, None)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_3_yD_cEKoCk.srt.sjson'), StaticContent)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent)
@ddt.data(TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE)
def test_editor_saved_when_youtube_and_html5_subs_exist(self, default_store):
"""
When both youtube_sub and html5_sub already exist then no new
sub will be generated by editor_saved function.
"""
self.MODULESTORE = default_store
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "r") as myfile:
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
save_to_store(myfile.read(), 'subs_video.srt.sjson', 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_3_yD_cEKoCk.srt.sjson and subs_video.srt.sjson already exist
self.assertIsInstance(Transcript.get_asset(item.location, self.file_name), StaticContent)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent)
old_metadata = own_metadata(item)
with patch('xmodule.video_module.video_module.manage_video_subtitles_save') as manage_video_subtitles_save:
item.editor_saved(self.user, old_metadata, None)
self.assertFalse(manage_video_subtitles_save.called)
@ddt.ddt
class TestVideoDescriptorStudentViewJson(TestCase):
"""
Tests for the student_view_data method on VideoDescriptor.
"""
TEST_DURATION = 111.0
TEST_PROFILE = "mobile"
TEST_SOURCE_URL = "http://www.example.com/source.mp4"
TEST_LANGUAGE = "ge"
TEST_ENCODED_VIDEO = {
'profile': TEST_PROFILE,
'bitrate': 333,
'url': 'http://example.com/video',
'file_size': 222,
}
TEST_EDX_VIDEO_ID = 'test_edx_video_id'
def setUp(self):
super(TestVideoDescriptorStudentViewJson, self).setUp()
sample_xml = (
"<video display_name='Test Video'> " +
"<source src='" + self.TEST_SOURCE_URL + "'/> " +
"<transcript language='" + self.TEST_LANGUAGE + "' src='german_translation.srt' /> " +
"</video>"
)
self.transcript_url = "transcript_url"
self.video = instantiate_descriptor(data=sample_xml)
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
def setup_val_video(self, associate_course_in_val=False):
"""
Creates a video entry in VAL.
Arguments:
associate_course - If True, associates the test course with the video in VAL.
"""
create_profile('mobile')
create_video({
'edx_video_id': self.TEST_EDX_VIDEO_ID,
'client_video_id': 'test_client_video_id',
'duration': self.TEST_DURATION,
'status': 'dummy',
'encoded_videos': [self.TEST_ENCODED_VIDEO],
'courses': [self.video.location.course_key] if associate_course_in_val else [],
})
self.val_video = get_video_info(self.TEST_EDX_VIDEO_ID) # pylint: disable=attribute-defined-outside-init
def get_result(self, allow_cache_miss=True):
"""
Returns the result from calling the video's student_view_data method.
Arguments:
allow_cache_miss is passed in the context to the student_view_data method.
"""
context = {
"profiles": [self.TEST_PROFILE],
"allow_cache_miss": "True" if allow_cache_miss else "False"
}
return self.video.student_view_data(context)
def verify_result_with_fallback_url(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {"fallback": {"url": self.TEST_SOURCE_URL, "file_size": 0}},
}
)
def verify_result_with_val_profile(self, result):
"""
Verifies the result is as expected when returning video data from VAL.
"""
self.assertDictContainsSubset(
result.pop("encoded_videos")[self.TEST_PROFILE],
self.TEST_ENCODED_VIDEO,
)
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": self.TEST_DURATION,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
}
)
def test_only_on_web(self):
self.video.only_on_web = True
result = self.get_result()
self.assertDictEqual(result, {"only_on_web": True})
def test_no_edx_video_id(self):
result = self.get_result()
self.verify_result_with_fallback_url(result)
@ddt.data(
*itertools.product([True, False], [True, False], [True, False])
)
@ddt.unpack
def test_with_edx_video_id(self, allow_cache_miss, video_exists_in_val, associate_course_in_val):
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
if video_exists_in_val:
self.setup_val_video(associate_course_in_val)
result = self.get_result(allow_cache_miss)
if video_exists_in_val and (associate_course_in_val or allow_cache_miss):
self.verify_result_with_val_profile(result)
else:
self.verify_result_with_fallback_url(result)
@attr('shard_1')
class VideoDescriptorTest(TestCase, VideoDescriptorTestBase):
"""
Tests for video descriptor that requires access to django settings.
"""
def setUp(self):
super(VideoDescriptorTest, self).setUp()
self.descriptor.runtime.handler_url = MagicMock()
def test_get_context(self):
""""
Test get_context.
This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields.
Which, in turn, uses settings.LANGUAGES from django setttings.
"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
def test_export_val_data(self):
self.descriptor.edx_video_id = 'test_edx_video_id'
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
actual = self.descriptor.definition_to_xml(resource_fs=None)
expected_str = """
<video download_video="false" url_name="SampleProblem">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
</video_asset>
</video>
"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_export_val_data_not_found(self):
self.descriptor.edx_video_id = 'nonexistent'
actual = self.descriptor.definition_to_xml(resource_fs=None)
expected_str = """<video download_video="false" url_name="SampleProblem"/>"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_import_val_data(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
</video_asset>
</video>
"""
id_generator = Mock()
id_generator.target_course_id = "test_course_id"
video = VideoDescriptor.from_xml(xml_data, module_system, id_generator)
self.assertEqual(video.edx_video_id, 'test_edx_video_id')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['client_video_id'], 'test_client_video_id')
self.assertEqual(video_data['duration'], 111)
self.assertEqual(video_data['status'], 'imported')
self.assertEqual(video_data['courses'], [id_generator.target_course_id])
self.assertEqual(video_data['encoded_videos'][0]['profile'], 'mobile')
self.assertEqual(video_data['encoded_videos'][0]['url'], 'http://example.com/video')
self.assertEqual(video_data['encoded_videos'][0]['file_size'], 222)
self.assertEqual(video_data['encoded_videos'][0]['bitrate'], 333)
def test_import_val_data_invalid(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
# Negative file_size is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="-222" bitrate="333"/>
</video_asset>
</video>
"""
with self.assertRaises(ValCannotCreateError):
VideoDescriptor.from_xml(xml_data, module_system, id_generator=Mock())
with self.assertRaises(ValVideoNotFoundError):
get_video_info("test_edx_video_id")
class TestVideoWithBumper(TestVideo):
"""
Tests rendered content in presence of video bumper.
"""
CATEGORY = "video"
METADATA = {}
FEATURES = settings.FEATURES
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
def test_is_bumper_enabled(self, get_bumper_settings):
"""
Check that bumper is (not)shown if ENABLE_VIDEO_BUMPER is (False)True
Assume that bumper settings are correct.
"""
self.FEATURES.update({
"SHOW_BUMPER_PERIODICITY": 1,
"ENABLE_VIDEO_BUMPER": True,
})
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
with override_settings(FEATURES=self.FEATURES):
self.assertTrue(bumper_utils.is_bumper_enabled(self.item_descriptor))
self.FEATURES.update({"ENABLE_VIDEO_BUMPER": False})
with override_settings(FEATURES=self.FEATURES):
self.assertFalse(bumper_utils.is_bumper_enabled(self.item_descriptor))
@patch('xmodule.video_module.bumper_utils.is_bumper_enabled')
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
@patch('edxval.api.get_urls_for_profiles')
def test_bumper_metadata(self, get_url_for_profiles, get_bumper_settings, is_bumper_enabled):
"""
Test content with rendered bumper metadata.
"""
get_url_for_profiles.return_value = {
"desktop_mp4": "http://test_bumper.mp4",
"desktop_webm": "",
}
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
is_bumper_enabled.return_value = True
content = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': json.dumps(OrderedDict({
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
"showCaptions": "true",
"sources": ["http://test_bumper.mp4"],
'streams': '',
"transcriptLanguage": "en",
"transcriptLanguages": {"en": "English"},
"transcriptTranslationUrl": video_utils.set_query_parameter(
self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'), 'is_bumper', 1
),
"transcriptAvailableTranslationsUrl": video_utils.set_query_parameter(
self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'), 'is_bumper', 1
),
})),
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': json.dumps(OrderedDict({
"url": "http://img.youtube.com/vi/ZwkTiUPN0mg/0.jpg",
"type": "youtube"
}))
}
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
self.assertEqual(content, expected_content)
| agpl-3.0 | -7,379,228,736,660,660,000 | 38.61997 | 118 | 0.542091 | false |
kidburglar/youtube-dl | youtube_dl/extractor/acast.py | 6 | 4722 | # coding: utf-8
from __future__ import unicode_literals
import re
import functools
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
try_get,
unified_timestamp,
OnDemandPagedList,
)
class ACastIE(InfoExtractor):
IE_NAME = 'acast'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:embed|www)\.)?acast\.com/|
play\.acast\.com/s/
)
(?P<channel>[^/]+)/(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
'md5': 'a02393c74f3bdb1801c3ec2695577ce0',
'info_dict': {
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
'ext': 'mp3',
'title': '2. Raggarmordet - Röster ur det förflutna',
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
'timestamp': 1477346700,
'upload_date': '20161024',
'duration': 2766.602563,
'creator': 'Anton Berg & Martin Johnson',
'series': 'Spår',
'episode': '2. Raggarmordet - Röster ur det förflutna',
}
}, {
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
'only_matching': True,
}, {
'url': 'https://play.acast.com/s/rattegangspodden/s04e09-styckmordet-i-helenelund-del-22',
'only_matching': True,
}]
def _real_extract(self, url):
channel, display_id = re.match(self._VALID_URL, url).groups()
s = self._download_json(
'https://play-api.acast.com/stitch/%s/%s' % (channel, display_id),
display_id)['result']
media_url = s['url']
cast_data = self._download_json(
'https://play-api.acast.com/splash/%s/%s' % (channel, display_id),
display_id)['result']
e = cast_data['episode']
title = e['name']
return {
'id': compat_str(e['id']),
'display_id': display_id,
'url': media_url,
'title': title,
'description': e.get('description') or e.get('summary'),
'thumbnail': e.get('image'),
'timestamp': unified_timestamp(e.get('publishingDate')),
'duration': float_or_none(s.get('duration') or e.get('duration')),
'filesize': int_or_none(e.get('contentLength')),
'creator': try_get(cast_data, lambda x: x['show']['author'], compat_str),
'series': try_get(cast_data, lambda x: x['show']['name'], compat_str),
'season_number': int_or_none(e.get('seasonNumber')),
'episode': title,
'episode_number': int_or_none(e.get('episodeNumber')),
}
class ACastChannelIE(InfoExtractor):
IE_NAME = 'acast:channel'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?acast\.com/|
play\.acast\.com/s/
)
(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'https://www.acast.com/todayinfocus',
'info_dict': {
'id': '4efc5294-5385-4847-98bd-519799ce5786',
'title': 'Today in Focus',
'description': 'md5:9ba5564de5ce897faeb12963f4537a64',
},
'playlist_mincount': 35,
}, {
'url': 'http://play.acast.com/s/ft-banking-weekly',
'only_matching': True,
}]
_API_BASE_URL = 'https://play.acast.com/api/'
_PAGE_SIZE = 10
@classmethod
def suitable(cls, url):
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
def _fetch_page(self, channel_slug, page):
casts = self._download_json(
self._API_BASE_URL + 'channels/%s/acasts?page=%s' % (channel_slug, page),
channel_slug, note='Download page %d of channel data' % page)
for cast in casts:
yield self.url_result(
'https://play.acast.com/s/%s/%s' % (channel_slug, cast['url']),
'ACast', cast['id'])
def _real_extract(self, url):
channel_slug = self._match_id(url)
channel_data = self._download_json(
self._API_BASE_URL + 'channels/%s' % channel_slug, channel_slug)
entries = OnDemandPagedList(functools.partial(
self._fetch_page, channel_slug), self._PAGE_SIZE)
return self.playlist_result(entries, compat_str(
channel_data['id']), channel_data['name'], channel_data.get('description'))
| unlicense | 9,202,544,453,285,693,000 | 36.736 | 98 | 0.519186 | false |
onelife/rt-thread | bsp/stm32/stm32f769-st-disco/rtconfig.py | 4 | 4042 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m7'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M7.fp.sp'
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M7'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv5_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M7'
AFLAGS += ' --fpu VFPv5_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT)
| gpl-2.0 | 4,391,509,777,458,285,600 | 26.127517 | 152 | 0.560861 | false |
onebit0fme/conveyance-tornado | main.py | 1 | 1523 | import tornado.ioloop
import tornado.web
import sys
import json
import jsonschema
from conveyance import Conveyance, ValidationError
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, I'm Conveyance")
def post(self, *args, **kwargs):
self.set_header("Content-Type", "application/json")
try:
data = json.loads(self.request.body.decode("utf8"))
conv = Conveyance(data)
compose = conv.compose()(conv.definitions, conv.resources)
except (ValidationError, jsonschema.ValidationError) as e:
# print('this')
# raise tornado.web.HTTPError(404, reason=e.args[0])
# compose = {
# "validation_error": e
# }
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
data = {
"error": str(e)
}
self.write(json.dumps(data))
raise tornado.web.Finish()
except:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
data = {
"error": sys.exc_info()[0]
}
self.write(json.dumps(data))
raise tornado.web.Finish()
self.write(json.dumps(compose))
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| gpl-2.0 | -5,091,105,326,290,818,000 | 26.690909 | 74 | 0.563362 | false |
qsnake/gpaw | doc/install/BGP/bgp_gcc.py | 3 | 1315 | #!/usr/bin/env python
"""bgp_gcc.py is a wrapper for the BGP gcc compiler,
converting/removing incompatible gcc args. """
import sys
from subprocess import call
from glob import glob
args2change = {"-fno-strict-aliasing":"",
"-fmessage-length=0":"",
"-Wall":"",
"-std=c99":"",
"-fPIC":"",
"-g":"",
"-D_FORTIFY_SOURCE=2":"",
"-DNDEBUG":"",
"-UNDEBUG":"",
"-pthread":"",
"-shared":"",
"-Xlinker":"",
"-export-dynamic":"",
"-Wstrict-prototypes":"",
"-dynamic":"",
"-O3":"",
"-O2":"",
"-O1":""}
fragile_files = ["test.c"]
non_c99files = glob('c/libxc/src/*.c')
cmd = ""
opt = 1
for arg in sys.argv[1:]:
cmd += " "
t = arg.strip()
if t in fragile_files:
opt = 2
if t in non_c99files:
opt = 3
if t in args2change:
cmd += args2change[t]
else:
cmd += arg
flags_list = {1: "-g -O3 -std=c99 -fPIC",
2: "-g -O2 -std=c99 -fPIC",
3: "-g -O3 -fPIC",
}
flags = flags_list[opt]
cmd = "mpicc %s %s"%(flags, cmd)
print "\nexecmd: %s\n"%cmd
call(cmd, shell=True)
| gpl-3.0 | -5,352,558,975,580,986,000 | 22.909091 | 52 | 0.428137 | false |
PressLabs/silver | silver/models/documents/proforma.py | 1 | 5565 | # Copyright (c) 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from django_fsm import transition
from django.core.exceptions import ValidationError
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from silver.models.billing_entities import Provider
from silver.models.documents.base import (
BillingDocumentBase, BillingDocumentManager, BillingDocumentQuerySet)
from silver.models.documents.entries import DocumentEntry
from silver.models.documents.invoice import Invoice
class ProformaManager(BillingDocumentManager):
def get_queryset(self):
queryset = super(BillingDocumentManager, self).get_queryset()
return queryset.filter(kind='proforma').prefetch_related('proforma_entries__product_code',
'proforma_entries__invoice')
class Proforma(BillingDocumentBase):
objects = ProformaManager.from_queryset(BillingDocumentQuerySet)()
class Meta:
proxy = True
def __init__(self, *args, **kwargs):
super(Proforma, self).__init__(*args, **kwargs)
provider_field = self._meta.get_field("provider")
provider_field.related_name = "proformas"
customer_field = self._meta.get_field("customer")
customer_field.related_name = "proformas"
@property
def transactions(self):
return self.proforma_transactions.all()
def clean(self):
super(Proforma, self).clean()
if not self.series:
if not hasattr(self, 'provider'):
# the clean method is called even if the clean_fields method
# raises exceptions, to we check if the provider was specified
pass
elif not self.provider.proforma_series:
err_msg = {'series': 'You must either specify the series or '
'set a default proforma_series for the '
'provider.'}
raise ValidationError(err_msg)
@transition(field='state', source=BillingDocumentBase.STATES.DRAFT,
target=BillingDocumentBase.STATES.ISSUED)
def issue(self, issue_date=None, due_date=None):
self.archived_provider = self.provider.get_proforma_archivable_field_values()
super(Proforma, self)._issue(issue_date, due_date)
@transition(field='state', source=BillingDocumentBase.STATES.ISSUED,
target=BillingDocumentBase.STATES.PAID)
def pay(self, paid_date=None):
super(Proforma, self)._pay(paid_date)
if not self.related_document:
self.related_document = self._new_invoice()
self.related_document.issue()
self.related_document.pay(paid_date=paid_date)
# if the proforma is paid, the invoice due_date should be issue_date
self.related_document.due_date = self.related_document.issue_date
self.related_document.save()
self.save()
def create_invoice(self):
if self.state != BillingDocumentBase.STATES.ISSUED:
raise ValueError("You can't create an invoice from a %s proforma, "
"only from an issued one" % self.state)
if self.related_document:
raise ValueError("This proforma already has an invoice { %s }"
% self.related_document)
self.related_document = self._new_invoice()
self.related_document.issue()
self.save()
return self.related_document
def _new_invoice(self):
# Generate the new invoice based this proforma
invoice_fields = self.fields_for_automatic_invoice_generation
invoice_fields.update({'related_document': self})
invoice = Invoice.objects.create(**invoice_fields)
# For all the entries in the proforma => add the link to the new
# invoice
DocumentEntry.objects.filter(proforma=self).update(invoice=invoice)
return invoice
@property
def _starting_number(self):
return self.provider.proforma_starting_number
@property
def default_series(self):
try:
return self.provider.proforma_series
except Provider.DoesNotExist:
return ''
@property
def fields_for_automatic_invoice_generation(self):
fields = ['customer', 'provider', 'archived_customer',
'archived_provider', 'paid_date', 'cancel_date',
'sales_tax_percent', 'sales_tax_name', 'currency',
'transaction_currency', 'transaction_xe_rate',
'transaction_xe_date']
return {field: getattr(self, field, None) for field in fields}
@property
def entries(self):
return self.proforma_entries.all()
@receiver(pre_delete, sender=Proforma)
def delete_proforma_pdf_from_storage(sender, instance, **kwargs):
if instance.pdf:
# Delete the proforma's PDF
instance.pdf.pdf_file.delete(False)
| apache-2.0 | -6,906,302,606,249,241,000 | 36.601351 | 98 | 0.649057 | false |
reinaldomaslim/Singaboat_RobotX2016 | robotx_control/nodes/lis_cmdvel_boat.py | 3 | 9051 | #!/usr/bin/env python
# ren ye 20160517
# refernence:
# http://answers.ros.org/question/29706/twist-message-example-and-cmd_vel/
# http://blog.csdn.net/heyijia0327/article/details/41823809
# changelog, use arduino + rosserial to drive the esc
# import roslib
import rospy
import math
# import tf.transformations
from geometry_msgs.msg import Twist
from std_msgs.msg import UInt16MultiArray, MultiArrayDimension # for PWM signal
from dynamic_reconfigure.server import Server
from robotx_control.cfg import CalibrateMotorConfig
# from auv_ros_control.PCA9685PW import PCA9685PW
# from auv_ros_control.SC16IS750_I2C import SC16IS750
""" use ROS to send cmd_vel to AUV boat with
1) differential propeller drive
2) one propeller and one rudder
3) vector setup with 4 propellers
"""
class Differential(object):
""" two propellers identical mounted at bow,
speed controlled by PWM via Pi's I2C and PCA9685"""
_WHEEL_W = 0.3
def __init__(self, mode=1, i2c_address=0x40, pwm_channels=[0, 1]):
rospy.init_node('auv_driver', anonymous=True)
self.mode = rospy.get_param("~mode", mode)
if self.mode == 0: # i2c mode
self.pwm = PCA9685PW(address=i2c_address, debug=True)
self.pwm.initial_esc(pwm_channels)
self.pwm_channels = pwm_channels
else: # rosserial mode
self.pwm_pub = rospy.Publisher("pwm", UInt16MultiArray, queue_size=10)
def constrain(self, x):
""" constain the cmd vel value to max speed """
if x <= (-1 * self.max_speed):
return -1 * self.max_speed
elif x >= self.max_speed:
return self.max_speed
else:
return x
def dynamic_callback(self, config, level):
rospy.loginfo("""Reconfigure Request: \
{left_throttle_scale}, {right_throttle_scale}, \
{left_throttle_offset}, {right_throttle_offset}, \
{max_speed}, {yaw_rate}, {steering_direction}, \
{left_throttle_polarity}, {right_throttle_polarity}, \
""".format(**config))
self.left_scale_factor = config["left_throttle_scale"]
self.right_scale_factor = config["right_throttle_scale"]
self.left_offset = config["left_throttle_offset"]
self.right_offset = config["right_throttle_offset"]
self.max_speed = config["max_speed"]
self._D_T = config["yaw_rate"]
self.steering_direction = config["steering_direction"]
self.left_polarity = config["left_throttle_polarity"]
self.right_polarity = config["right_throttle_polarity"]
return config
def callback(self, msg):
""" callback the subscribe, get Twist data
and output to PWM signal """
# rospy.loginfo("Received a /cmd_vel message!")
# rospy.loginfo("L: [%f, %f, %f], A:[%f, %f, %f]"
# % (msg.linear.x, msg.linear.y, msg.linear.z,
# msg.angular.x, msg.angular.y, msg.angular.z))
cmd_twist_rotation = msg.angular.z # rotation
cmd_twist_x = msg.linear.x
cmd_twist_y = msg.linear.y
# Twist -> differential motor speed
propellerspeed = self.odom_to_speed(cmd_twist_x, cmd_twist_y, cmd_twist_rotation)
# correct the speed
corrected_speed = self.correct_speed(propellerspeed)
# rospy.loginfo(corrected_speed)
# send to the pwm chip
if self.mode == 0:
self.i2c_send(corrected_speed)
else:
self.arduino_send(corrected_speed)
def correct_speed(self, propellerspeed):
""" correct by scaling, offset and constrain """
# correct polarity
if self.left_polarity:
corrected_left_speed = propellerspeed[0] * \
self.left_scale_factor + self.left_offset
else:
corrected_left_speed = -1 * propellerspeed[0] * \
self.left_scale_factor + self.left_offset
if self.right_polarity:
corrected_right_speed = propellerspeed[1] * \
self.right_scale_factor + self.right_offset
else:
corrected_right_speed = -1 * propellerspeed[1] * \
self.right_scale_factor + self.right_offset
# constrain
return (self.constrain(corrected_left_speed),
self.constrain(corrected_right_speed))
def i2c_send(self, speed):
""" output differential signals to pca9685pw
with max speed and zero speed, check PCA9685 for details """
pwm = self.speed_to_pwm(speed)
for i in range(len(speed)):
print pwm[i] / 1000.0
self.pwm.setServoPulse(self.pwm_channels[i], pwm[i] / 1000.0)
def arduino_send(self, speed):
pwm_msg = UInt16MultiArray()
# pwm_msg.layout.dim = MultiArrayDimension()
# pwm_msg.layout.dim.label = "pwm_value"
# pwm_msg.layout.dim.size = len(self.pwm_channels)
pwm_msg.data = self.speed_to_pwm(speed)
# rospy.loginfo(pwm_msg.data)
self.pwm_pub.publish(pwm_msg)
def speed_to_pwm(self, speed):
# by experiment 20161130 nanyang lake
min_pwm = 1200
neutral_pwm = 1500
max_pwm = 1750
# pwm = k * speed + b
k = (max_pwm - min_pwm) / (self.max_speed + self.max_speed)
b = neutral_pwm
return [int(k * s + b) for s in speed]
def odom_to_speed(self, cmd_twist_x=0, cmd_twist_y=0, cmd_twist_rotation=0):
cent_speed = cmd_twist_x # center speed (m/s)
# differential speed
yawrate2 = self.yawrate_to_speed(cmd_twist_rotation)
if self.steering_direction:
Lwheelspeed = cent_speed + yawrate2 / 2
Rwheelspeed = cent_speed - yawrate2 / 2
else:
Lwheelspeed = cent_speed - yawrate2 / 2
Rwheelspeed = cent_speed + yawrate2 / 2
return Lwheelspeed, Rwheelspeed
def yawrate_to_speed(self, yawrate):
""" yawrate=dt*(RWs-LWs)/d """
return yawrate * self._WHEEL_W / self._D_T
def listener(self):
rospy.Subscriber("/cmd_vel", Twist, self.callback) # /cmd_vel
self.srv = Server(CalibrateMotorConfig, self.dynamic_callback)
rospy.spin()
class Vector4(Differential):
"""4 thrusters in a vector form front two have outer 45 as +ve,
rear two have inner 45 as +ve:
/(PWM1) \(PWM2)
\(PWM3) /(PWM4)
"""
def __init__(self, mode=0, i2c_address=0x40, pwm_channels=[0, 1, 2, 3]):
super(Differential, self).__init__(mode, i2c_address, pwm_channels)
def odom_to_speed(self, cmd_twist_x=0, cmd_twist_y=0,
cmd_twist_rotation=0):
twist_mag = math.sqrt(cmd_twist_x ** 2 + cmd_twist_y ** 2)
twist_ang = math.cos(cmd_twist_x / twist_mag)
twist_14 = twist_ang / math.cos(twist_ang - math.pi / 4)
twist_23 = twist_ang / math.sin(twist_ang - math.pi / 4)
# differential speed
yawrate2 = self.yawrate_to_speed(cmd_twist_rotation)
if self.steering_direction:
twist_1 = twist_14 - yawrate2 / 4
twist_4 = twist_14 + yawrate2 / 4
twist_2 = twist_23 - yawrate2 / 4
twist_3 = twist_23 + yawrate2 / 4
else:
twist_1 = twist_14 + yawrate2 / 4
twist_4 = twist_14 - yawrate2 / 4
twist_2 = twist_23 + yawrate2 / 4
twist_3 = twist_23 - yawrate2 / 4
return twist_1, twist_2, twist_3, twist_4
def correct_speed(self, propellerspeed):
""" correct by scaling, offset and constrain """
# correct polarity
if self.left_polarity:
corrected_twist_1 = propellerspeed[0] * \
self.left_scale_factor + self.left_offset
corrected_twist_3 = propellerspeed[2] * \
self.left_scale_factor + self.left_offset
else:
corrected_twist_1 = -1 * propellerspeed[0] * \
self.left_scale_factor + self.left_offset
corrected_twist_3 = -1 * propellerspeed[2] * \
self.left_scale_factor + self.left_offset
if self.right_polarity:
corrected_twist_2 = propellerspeed[1] * \
self.right_scale_factor + self.right_offset
corrected_twist_4 = propellerspeed[3] * \
self.right_scale_factor + self.right_offset
else:
corrected_twist_2 = -1 * propellerspeed[1] * \
self.right_scale_factor + self.right_offset
corrected_twist_4 = -1 * propellerspeed[3] * \
self.right_scale_factor + self.right_offset
# constrain
return (self.constrain(corrected_twist_1),
self.constrain(corrected_twist_2),
self.constrain(corrected_twist_3),
self.constrain(corrected_twist_4))
if __name__ == '__main__':
try:
auv_control = Differential(mode=1)
# auv_control = Vector4()
auv_control.listener()
except rospy.ROSInterruptException:
pass
| gpl-3.0 | -7,298,233,612,400,898,000 | 36.400826 | 89 | 0.59463 | false |
piffey/ansible | lib/ansible/modules/network/f5/bigip_gtm_wide_ip.py | 27 | 21011 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_gtm_wide_ip
short_description: Manages F5 BIG-IP GTM wide ip
description:
- Manages F5 BIG-IP GTM wide ip.
version_added: "2.0"
options:
pool_lb_method:
description:
- Specifies the load balancing method used to select a pool in this wide
IP. This setting is relevant only when multiple pools are configured
for a wide IP.
required: True
aliases: ['lb_method']
choices:
- round-robin
- ratio
- topology
- global-availability
version_added: 2.5
name:
description:
- Wide IP name. This name must be formatted as a fully qualified
domain name (FQDN). You can also use the alias C(wide_ip) but this
is deprecated and will be removed in a future Ansible version.
required: True
aliases:
- wide_ip
type:
description:
- Specifies the type of wide IP. GTM wide IPs need to be keyed by query
type in addition to name, since pool members need different attributes
depending on the response RDATA they are meant to supply. This value
is required if you are using BIG-IP versions >= 12.0.0.
choices:
- a
- aaaa
- cname
- mx
- naptr
- srv
version_added: 2.4
state:
description:
- When C(present) or C(enabled), ensures that the Wide IP exists and
is enabled.
- When C(absent), ensures that the Wide IP has been removed.
- When C(disabled), ensures that the Wide IP exists and is disabled.
default: present
choices:
- present
- absent
- disabled
- enabled
version_added: 2.4
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
pools:
description:
- The pools that you want associated with the Wide IP.
- If C(ratio) is not provided when creating a new Wide IP, it will default
to 1.
suboptions:
name:
description:
- The name of the pool to include
required: true
ratio:
description:
- Ratio for the pool.
- The system uses this number with the Ratio load balancing method.
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set lb method
bigip_gtm_wide_ip:
server: lb.mydomain.com
user: admin
password: secret
lb_method: round-robin
name: my-wide-ip.example.com
delegate_to: localhost
'''
RETURN = r'''
lb_method:
description: The new load balancing method used by the wide IP.
returned: changed
type: string
sample: topology
state:
description: The new state of the wide IP.
returned: changed
type: string
sample: disabled
'''
import re
from ansible.module_utils.six import iteritems
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'poolLbMode': 'pool_lb_method'
}
updatables = ['pool_lb_method', 'state', 'pools']
returnables = ['name', 'pool_lb_method', 'state', 'pools']
api_attributes = ['poolLbMode', 'enabled', 'disabled', 'pools']
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return False
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return False
@property
def pools(self):
result = []
if self._values['pools'] is None:
return None
pools = sorted(self._values['pools'], key=lambda x: x['order'])
for item in pools:
pool = dict()
pool.update(item)
name = '/{0}/{1}'.format(item['partition'], item['name'])
del pool['nameReference']
del pool['order']
del pool['name']
del pool['partition']
pool['name'] = name
result.append(pool)
return result
class ModuleParameters(Parameters):
@property
def pool_lb_method(self):
deprecated = [
'return_to_dns', 'null', 'static_persist', 'vs_capacity',
'least_conn', 'lowest_rtt', 'lowest_hops', 'packet_rate', 'cpu',
'hit_ratio', 'qos', 'bps', 'drop_packet', 'explicit_ip',
'connection_rate', 'vs_score'
]
if self._values['lb_method'] is None:
return None
lb_method = str(self._values['lb_method'])
if lb_method in deprecated:
raise F5ModuleError(
"The provided lb_method is not supported"
)
elif lb_method == 'global_availability':
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg='The provided lb_method is deprecated',
version='2.4'
)
)
lb_method = 'global-availability'
elif lb_method == 'round_robin':
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg='The provided lb_method is deprecated',
version='2.4'
)
)
lb_method = 'round-robin'
return lb_method
@property
def type(self):
if self._values['type'] is None:
return None
return str(self._values['type'])
@property
def name(self):
if self._values['name'] is None:
return None
if not re.search(r'.*\..*\..*', self._values['name']):
raise F5ModuleError(
"The provided name must be a valid FQDN"
)
return self._values['name']
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def enabled(self):
if self._values['state'] == 'disabled':
return False
elif self._values['state'] in ['present', 'enabled']:
return True
else:
return None
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['present', 'enabled']:
return False
else:
return None
@property
def pools(self):
result = []
if self._values['pools'] is None:
return None
for item in self._values['pools']:
pool = dict()
if 'ratio' in item:
pool['ratio'] = item['ratio']
pool['name'] = self._fqdn_name(item['name'])
result.append(pool)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def pool_lb_method(self):
result = dict(
lb_method=self._values['pool_lb_method'],
pool_lb_method=self._values['pool_lb_method'],
)
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
@property
def state(self):
if self.want.state == 'disabled' and self.have.enabled:
return self.want.state
elif self.want.state in ['present', 'enabled'] and self.have.disabled:
return self.want.state
@property
def pools(self):
result = self._diff_complex_items(self.want.pools, self.have.pools)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('untyped')
else:
manager = self.get_manager('typed')
return manager.exec_module()
def get_manager(self, type):
if type == 'typed':
return TypedManager(**self.kwargs)
elif type == 'untyped':
return UntypedManager(**self.kwargs)
def version_is_less_than_12(self):
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ["present", "disabled"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.want.lb_method is None:
raise F5ModuleError(
"The 'lb_method' option is required when state is 'present'"
)
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Wide IP")
return True
class UntypedManager(BaseManager):
def exists(self):
return self.client.api.tm.gtm.wideips.wideip.exists(
name=self.want.name,
partition=self.want.partition
)
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.gtm.wideips.wipeip.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.gtm.wideips.wideip.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.gtm.wideips.wideip.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.gtm.wideips.wideip.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class TypedManager(BaseManager):
def __init__(self, *args, **kwargs):
super(TypedManager, self).__init__(**kwargs)
if self.want.type is None:
raise F5ModuleError(
"The 'type' option is required for BIG-IP instances "
"greater than or equal to 12.x"
)
type_map = dict(
a='a_s',
aaaa='aaaas',
cname='cnames',
mx='mxs',
naptr='naptrs',
srv='srvs'
)
self.collection = type_map[self.want.type]
def exists(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def read_current_from_device(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
result = result.attrs
return ApiParameters(params=result)
def create_on_device(self):
params = self.want.api_params()
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
resource.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
deprecated = [
'return_to_dns', 'null', 'round_robin', 'static_persist',
'global_availability', 'vs_capacity', 'least_conn', 'lowest_rtt',
'lowest_hops', 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score'
]
supported = [
'round-robin', 'topology', 'ratio', 'global-availability'
]
lb_method_choices = deprecated + supported
self.supports_check_mode = True
argument_spec = dict(
pool_lb_method=dict(
choices=lb_method_choices,
aliases=['lb_method']
),
name=dict(
required=True,
aliases=['wide_ip']
),
type=dict(
choices=[
'a', 'aaaa', 'cname', 'mx', 'naptr', 'srv'
]
),
state=dict(
default='present',
choices=['absent', 'present', 'enabled', 'disabled']
),
pools=dict(
type='list',
options=dict(
name=dict(required=True),
ratio=dict(type='int')
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 862,586,670,232,281,700 | 29.144907 | 91 | 0.567417 | false |
openvstorage/arakoon | src/client/python/ArakoonValidators.py | 4 | 2748 | """
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ArakoonExceptions import ArakoonInvalidArguments
import ArakoonProtocol
import logging
from functools import wraps
class SignatureValidator :
def __init__ (self, *args ):
self.param_types = args
self.param_native_type_mapping = {
'int': int,
'string': str,
'bool': bool
}
def __call__ (self, f ):
@wraps(f)
def my_new_f ( *args, **kwargs ) :
new_args = list( args[1:] )
missing_args = f.func_code.co_varnames[len(args):]
for missing_arg in missing_args:
if( len(new_args) == len(self.param_types) ) :
break
if( kwargs.has_key(missing_arg) ) :
pos = f.func_code.co_varnames.index( missing_arg )
# if pos > len(new_args):
# new_args.append( None )
new_args.insert(pos, kwargs[missing_arg])
del kwargs[missing_arg]
if len( kwargs ) > 0:
raise ArakoonInvalidArguments( f.func_name, list(kwargs.iteritems()) )
i = 0
error_key_values = []
for (arg, arg_type) in zip(new_args, self.param_types) :
if not self.validate(arg, arg_type):
error_key_values.append( (f.func_code.co_varnames[i+1],new_args[i]) )
i += 1
if len(error_key_values) > 0 :
raise ArakoonInvalidArguments( f.func_name, error_key_values )
return f( args[0], *new_args )
return my_new_f
def validate(self,arg,arg_type):
if self.param_native_type_mapping.has_key( arg_type ):
return isinstance(arg,self.param_native_type_mapping[arg_type] )
elif arg_type == 'string_option' :
return isinstance( arg, str ) or arg is None
elif arg_type == 'sequence' :
return isinstance( arg, ArakoonProtocol.Sequence )
else:
raise RuntimeError( "Invalid argument type supplied: %s" % arg_type )
| apache-2.0 | -5,356,192,753,668,729,000 | 33.797468 | 89 | 0.562591 | false |
apple/swift-lldb | packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py | 5 | 1375 | from __future__ import print_function
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
@debugserver_test
def test_single_step_only_steps_one_instruction_with_s_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.single_step_only_steps_one_instruction(
use_Hc_packet=True, step_instruction="s")
@skipIfWindows # No pty support to test any inferior std -i/e/o
@llgs_test
@expectedFailureAndroid(
bugnumber="llvm.org/pr24739",
archs=[
"arm",
"aarch64"])
@expectedFailureAll(
oslist=["linux"],
archs=[
"arm",
"aarch64"],
bugnumber="llvm.org/pr24739")
@skipIf(triple='^mips')
def test_single_step_only_steps_one_instruction_with_s_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.single_step_only_steps_one_instruction(
use_Hc_packet=True, step_instruction="s")
| apache-2.0 | 5,775,667,500,125,585,000 | 31.738095 | 106 | 0.653818 | false |
thirumoorthir/My_python_programs | app1_version.1.py | 1 | 1312 | #!/usr/bin/python
# This is an simple python program to read content in an file and compute it and stores result.
import decimal
def amount_hand():
print "The amount in your hand is %d" %balance
def write():
f = open('balance1','w') #program open's balance file and write data output to it .
f.write('%d' %balance)
f.close()
while True:
choice = raw_input("What you want to do\n 1.For add amount \n 2.For subtract\n 3.For print balance \n 4.For exit this program \n Enter your choice:")
f = open('balance1','r')
num_lines = sum(1 for line in open('balance1'))
print "There are %d number of lines in balance file"%num_lines
balance=f.read()
balance = int (balance)
if choice == '1':
amount = raw_input("Enter amount to add:")
descrb = raw_input("Enter description for that income:")
amount = int (amount)
balance = amount + balance
write()
amount_hand()
elif choice == '2':
amount = raw_input ( "Enter amount to subtract:")
descrb1 = raw_input("Enter description for that expence:")
amount = float (amount)
balance = balance - amount
write()
amount_hand()
elif choice =='3':
amount_hand()
elif choice =='4':
break
| apache-2.0 | 1,360,912,161,278,549,000 | 35.444444 | 153 | 0.60061 | false |
OndrejIT/pyload | module/plugins/hoster/NowVideoSx.py | 8 | 1609 | # -*- coding: utf-8 -*-
import re
from ..internal.SimpleHoster import SimpleHoster
class NowVideoSx(SimpleHoster):
__name__ = "NowVideoSx"
__type__ = "hoster"
__version__ = "0.17"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?nowvideo\.[a-zA-Z]{2,}/(video/|mobile/(#/videos/|.+?id=))(?P<ID>\w+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """NowVideo.sx hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
URL_REPLACEMENTS = [
(__pattern__ + ".*",
r'http://www.nowvideo.sx/video/\g<ID>')]
NAME_PATTERN = r'<h4>(?P<N>.+?)<'
OFFLINE_PATTERN = r'>This file no longer exists'
LINK_FREE_PATTERN = r'<source src="(.+?)"'
LINK_PREMIUM_PATTERN = r'<div id="content_player" >\s*<a href="(.+?)"'
def setup(self):
self.resume_download = True
self.multiDL = True
def handle_free(self, pyfile):
self.data = self.load(
"http://www.nowvideo.sx/mobile/video.php",
get={
'id': self.info['pattern']['ID']})
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
self.link = m.group(1)
| gpl-3.0 | 3,576,412,993,236,050,400 | 32.520833 | 106 | 0.530764 | false |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/test/test_internet.py | 1 | 50248 | # Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.internet import reactor, protocol, error, abstract, defer
from twisted.internet import interfaces, base
from twisted.internet.tcp import Connection
from twisted.test.time_helpers import Clock
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.python import util, runtime
import os
import sys
import time
import socket
class ThreePhaseEventTests(unittest.TestCase):
"""
Tests for the private implementation helpers for system event triggers.
"""
def setUp(self):
"""
Create a trigger, an argument, and an event to be used by tests.
"""
self.trigger = lambda x: None
self.arg = object()
self.event = base._ThreePhaseEvent()
def test_addInvalidPhase(self):
"""
L{_ThreePhaseEvent.addTrigger} should raise L{KeyError} when called
with an invalid phase.
"""
self.assertRaises(
KeyError,
self.event.addTrigger, 'xxx', self.trigger, self.arg)
def test_addBeforeTrigger(self):
"""
L{_ThreePhaseEvent.addTrigger} should accept C{'before'} as a phase, a
callable, and some arguments and add the callable with the arguments to
the before list.
"""
self.event.addTrigger('before', self.trigger, self.arg)
self.assertEqual(
self.event.before,
[(self.trigger, (self.arg,), {})])
def test_addDuringTrigger(self):
"""
L{_ThreePhaseEvent.addTrigger} should accept C{'during'} as a phase, a
callable, and some arguments and add the callable with the arguments to
the during list.
"""
self.event.addTrigger('during', self.trigger, self.arg)
self.assertEqual(
self.event.during,
[(self.trigger, (self.arg,), {})])
def test_addAfterTrigger(self):
"""
L{_ThreePhaseEvent.addTrigger} should accept C{'after'} as a phase, a
callable, and some arguments and add the callable with the arguments to
the after list.
"""
self.event.addTrigger('after', self.trigger, self.arg)
self.assertEqual(
self.event.after,
[(self.trigger, (self.arg,), {})])
def test_removeTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should accept an opaque object
previously returned by L{_ThreePhaseEvent.addTrigger} and remove the
associated trigger.
"""
handle = self.event.addTrigger('before', self.trigger, self.arg)
self.event.removeTrigger(handle)
self.assertEqual(self.event.before, [])
def test_removeNonexistentTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should raise L{ValueError} when given
an object not previously returned by L{_ThreePhaseEvent.addTrigger}.
"""
self.assertRaises(ValueError, self.event.removeTrigger, object())
def test_removeRemovedTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should raise L{ValueError} the second
time it is called with an object returned by
L{_ThreePhaseEvent.addTrigger}.
"""
handle = self.event.addTrigger('before', self.trigger, self.arg)
self.event.removeTrigger(handle)
self.assertRaises(ValueError, self.event.removeTrigger, handle)
def test_removeAlmostValidTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should raise L{ValueError} if it is
given a trigger handle which resembles a valid trigger handle aside
from its phase being incorrect.
"""
self.assertRaises(
KeyError,
self.event.removeTrigger, ('xxx', self.trigger, (self.arg,), {}))
def test_fireEvent(self):
"""
L{_ThreePhaseEvent.fireEvent} should call I{before}, I{during}, and
I{after} phase triggers in that order.
"""
events = []
self.event.addTrigger('after', events.append, ('first', 'after'))
self.event.addTrigger('during', events.append, ('first', 'during'))
self.event.addTrigger('before', events.append, ('first', 'before'))
self.event.addTrigger('before', events.append, ('second', 'before'))
self.event.addTrigger('during', events.append, ('second', 'during'))
self.event.addTrigger('after', events.append, ('second', 'after'))
self.assertEqual(events, [])
self.event.fireEvent()
self.assertEqual(events,
[('first', 'before'), ('second', 'before'),
('first', 'during'), ('second', 'during'),
('first', 'after'), ('second', 'after')])
def test_asynchronousBefore(self):
"""
L{_ThreePhaseEvent.fireEvent} should wait for any L{Deferred} returned
by a I{before} phase trigger before proceeding to I{during} events.
"""
events = []
beforeResult = Deferred()
self.event.addTrigger('before', lambda: beforeResult)
self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.assertEqual(events, [])
self.event.fireEvent()
self.assertEqual(events, [])
beforeResult.callback(None)
self.assertEqual(events, ['during', 'after'])
def test_beforeTriggerException(self):
"""
If a before-phase trigger raises a synchronous exception, it should be
logged and the remaining triggers should be run.
"""
events = []
class DummyException(Exception):
pass
def raisingTrigger():
raise DummyException()
self.event.addTrigger('before', raisingTrigger)
self.event.addTrigger('before', events.append, 'before')
self.event.addTrigger('during', events.append, 'during')
self.event.fireEvent()
self.assertEqual(events, ['before', 'during'])
errors = self.flushLoggedErrors(DummyException)
self.assertEqual(len(errors), 1)
def test_duringTriggerException(self):
"""
If a during-phase trigger raises a synchronous exception, it should be
logged and the remaining triggers should be run.
"""
events = []
class DummyException(Exception):
pass
def raisingTrigger():
raise DummyException()
self.event.addTrigger('during', raisingTrigger)
self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.assertEqual(events, ['during', 'after'])
errors = self.flushLoggedErrors(DummyException)
self.assertEqual(len(errors), 1)
def test_synchronousRemoveAlreadyExecutedBefore(self):
"""
If a before-phase trigger tries to remove another before-phase trigger
which has already run, a warning should be emitted.
"""
events = []
def removeTrigger():
self.event.removeTrigger(beforeHandle)
beforeHandle = self.event.addTrigger('before', events.append, ('first', 'before'))
self.event.addTrigger('before', removeTrigger)
self.event.addTrigger('before', events.append, ('second', 'before'))
self.assertWarns(
DeprecationWarning,
"Removing already-fired system event triggers will raise an "
"exception in a future version of Twisted.",
__file__,
self.event.fireEvent)
self.assertEqual(events, [('first', 'before'), ('second', 'before')])
def test_synchronousRemovePendingBefore(self):
"""
If a before-phase trigger removes another before-phase trigger which
has not yet run, the removed trigger should not be run.
"""
events = []
self.event.addTrigger(
'before', lambda: self.event.removeTrigger(beforeHandle))
beforeHandle = self.event.addTrigger(
'before', events.append, ('first', 'before'))
self.event.addTrigger('before', events.append, ('second', 'before'))
self.event.fireEvent()
self.assertEqual(events, [('second', 'before')])
def test_synchronousBeforeRemovesDuring(self):
"""
If a before-phase trigger removes a during-phase trigger, the
during-phase trigger should not be run.
"""
events = []
self.event.addTrigger(
'before', lambda: self.event.removeTrigger(duringHandle))
duringHandle = self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.assertEqual(events, ['after'])
def test_asynchronousBeforeRemovesDuring(self):
"""
If a before-phase trigger returns a L{Deferred} and later removes a
during-phase trigger before the L{Deferred} fires, the during-phase
trigger should not be run.
"""
events = []
beforeResult = Deferred()
self.event.addTrigger('before', lambda: beforeResult)
duringHandle = self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.event.removeTrigger(duringHandle)
beforeResult.callback(None)
self.assertEqual(events, ['after'])
def test_synchronousBeforeRemovesConspicuouslySimilarDuring(self):
"""
If a before-phase trigger removes a during-phase trigger which is
identical to an already-executed before-phase trigger aside from their
phases, no warning should be emitted and the during-phase trigger
should not be run.
"""
events = []
def trigger():
events.append('trigger')
self.event.addTrigger('before', trigger)
self.event.addTrigger(
'before', lambda: self.event.removeTrigger(duringTrigger))
duringTrigger = self.event.addTrigger('during', trigger)
self.event.fireEvent()
self.assertEqual(events, ['trigger'])
def test_synchronousRemovePendingDuring(self):
"""
If a during-phase trigger removes another during-phase trigger which
has not yet run, the removed trigger should not be run.
"""
events = []
self.event.addTrigger(
'during', lambda: self.event.removeTrigger(duringHandle))
duringHandle = self.event.addTrigger(
'during', events.append, ('first', 'during'))
self.event.addTrigger(
'during', events.append, ('second', 'during'))
self.event.fireEvent()
self.assertEqual(events, [('second', 'during')])
def test_triggersRunOnce(self):
"""
A trigger should only be called on the first call to
L{_ThreePhaseEvent.fireEvent}.
"""
events = []
self.event.addTrigger('before', events.append, 'before')
self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.event.fireEvent()
self.assertEqual(events, ['before', 'during', 'after'])
def test_finishedBeforeTriggersCleared(self):
"""
The temporary list L{_ThreePhaseEvent.finishedBefore} should be emptied
and the state reset to C{'BASE'} before the first during-phase trigger
executes.
"""
events = []
def duringTrigger():
events.append('during')
self.assertEqual(self.event.finishedBefore, [])
self.assertEqual(self.event.state, 'BASE')
self.event.addTrigger('before', events.append, 'before')
self.event.addTrigger('during', duringTrigger)
self.event.fireEvent()
self.assertEqual(events, ['before', 'during'])
class SystemEventTestCase(unittest.TestCase):
"""
Tests for the reactor's implementation of the C{fireSystemEvent},
C{addSystemEventTrigger}, and C{removeSystemEventTrigger} methods of the
L{IReactorCore} interface.
@ivar triggers: A list of the handles to triggers which have been added to
the reactor.
"""
def setUp(self):
"""
Create an empty list in which to store trigger handles.
"""
self.triggers = []
def tearDown(self):
"""
Remove all remaining triggers from the reactor.
"""
while self.triggers:
trigger = self.triggers.pop()
try:
reactor.removeSystemEventTrigger(trigger)
except (ValueError, KeyError):
pass
def addTrigger(self, event, phase, func):
"""
Add a trigger to the reactor and remember it in C{self.triggers}.
"""
t = reactor.addSystemEventTrigger(event, phase, func)
self.triggers.append(t)
return t
def removeTrigger(self, trigger):
"""
Remove a trigger by its handle from the reactor and from
C{self.triggers}.
"""
reactor.removeSystemEventTrigger(trigger)
self.triggers.remove(trigger)
def _addSystemEventTriggerTest(self, phase):
eventType = 'test'
events = []
def trigger():
events.append(None)
self.addTrigger(phase, eventType, trigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, [None])
def test_beforePhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should accept the C{'before'}
phase and not call the given object until the right event is fired.
"""
self._addSystemEventTriggerTest('before')
def test_duringPhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should accept the C{'during'}
phase and not call the given object until the right event is fired.
"""
self._addSystemEventTriggerTest('during')
def test_afterPhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should accept the C{'after'}
phase and not call the given object until the right event is fired.
"""
self._addSystemEventTriggerTest('after')
def test_unknownPhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should reject phases other than
C{'before'}, C{'during'}, or C{'after'}.
"""
eventType = 'test'
self.assertRaises(
KeyError, self.addTrigger, 'xxx', eventType, lambda: None)
def test_beforePreceedsDuring(self):
"""
L{IReactorCore.addSystemEventTrigger} should call triggers added to the
C{'before'} phase before it calls triggers added to the C{'during'}
phase.
"""
eventType = 'test'
events = []
def beforeTrigger():
events.append('before')
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, beforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['before', 'during'])
def test_duringPreceedsAfter(self):
"""
L{IReactorCore.addSystemEventTrigger} should call triggers added to the
C{'during'} phase before it calls triggers added to the C{'after'}
phase.
"""
eventType = 'test'
events = []
def duringTrigger():
events.append('during')
def afterTrigger():
events.append('after')
self.addTrigger('during', eventType, duringTrigger)
self.addTrigger('after', eventType, afterTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['during', 'after'])
def test_beforeReturnsDeferred(self):
"""
If a trigger added to the C{'before'} phase of an event returns a
L{Deferred}, the C{'during'} phase should be delayed until it is called
back.
"""
triggerDeferred = Deferred()
eventType = 'test'
events = []
def beforeTrigger():
return triggerDeferred
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, beforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, [])
triggerDeferred.callback(None)
self.assertEqual(events, ['during'])
def test_multipleBeforeReturnDeferred(self):
"""
If more than one trigger added to the C{'before'} phase of an event
return L{Deferred}s, the C{'during'} phase should be delayed until they
are all called back.
"""
firstDeferred = Deferred()
secondDeferred = Deferred()
eventType = 'test'
events = []
def firstBeforeTrigger():
return firstDeferred
def secondBeforeTrigger():
return secondDeferred
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, firstBeforeTrigger)
self.addTrigger('before', eventType, secondBeforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, [])
firstDeferred.callback(None)
self.assertEqual(events, [])
secondDeferred.callback(None)
self.assertEqual(events, ['during'])
def test_subsequentBeforeTriggerFiresPriorBeforeDeferred(self):
"""
If a trigger added to the C{'before'} phase of an event calls back a
L{Deferred} returned by an earlier trigger in the C{'before'} phase of
the same event, the remaining C{'before'} triggers for that event
should be run and any further L{Deferred}s waited on before proceeding
to the C{'during'} events.
"""
eventType = 'test'
events = []
firstDeferred = Deferred()
secondDeferred = Deferred()
def firstBeforeTrigger():
return firstDeferred
def secondBeforeTrigger():
firstDeferred.callback(None)
def thirdBeforeTrigger():
events.append('before')
return secondDeferred
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, firstBeforeTrigger)
self.addTrigger('before', eventType, secondBeforeTrigger)
self.addTrigger('before', eventType, thirdBeforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['before'])
secondDeferred.callback(None)
self.assertEqual(events, ['before', 'during'])
def test_removeSystemEventTrigger(self):
"""
A trigger removed with L{IReactorCore.removeSystemEventTrigger} should
not be called when the event fires.
"""
eventType = 'test'
events = []
def firstBeforeTrigger():
events.append('first')
def secondBeforeTrigger():
events.append('second')
self.addTrigger('before', eventType, firstBeforeTrigger)
self.removeTrigger(
self.addTrigger('before', eventType, secondBeforeTrigger))
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['first'])
def test_removeNonExistentSystemEventTrigger(self):
"""
Passing an object to L{IReactorCore.removeSystemEventTrigger} which was
not returned by a previous call to
L{IReactorCore.addSystemEventTrigger} or which has already been passed
to C{removeSystemEventTrigger} should result in L{TypeError},
L{KeyError}, or L{ValueError} being raised.
"""
b = self.addTrigger('during', 'test', lambda: None)
self.removeTrigger(b)
self.assertRaises(
TypeError, reactor.removeSystemEventTrigger, None)
self.assertRaises(
ValueError, reactor.removeSystemEventTrigger, b)
self.assertRaises(
KeyError,
reactor.removeSystemEventTrigger,
(b[0], ('xxx',) + b[1][1:]))
def test_interactionBetweenDifferentEvents(self):
"""
L{IReactorCore.fireSystemEvent} should behave the same way for a
particular system event regardless of whether Deferreds are being
waited on for a different system event.
"""
events = []
firstEvent = 'first-event'
firstDeferred = Deferred()
def beforeFirstEvent():
events.append(('before', 'first'))
return firstDeferred
def afterFirstEvent():
events.append(('after', 'first'))
secondEvent = 'second-event'
secondDeferred = Deferred()
def beforeSecondEvent():
events.append(('before', 'second'))
return secondDeferred
def afterSecondEvent():
events.append(('after', 'second'))
self.addTrigger('before', firstEvent, beforeFirstEvent)
self.addTrigger('after', firstEvent, afterFirstEvent)
self.addTrigger('before', secondEvent, beforeSecondEvent)
self.addTrigger('after', secondEvent, afterSecondEvent)
self.assertEqual(events, [])
# After this, firstEvent should be stuck before 'during' waiting for
# firstDeferred.
reactor.fireSystemEvent(firstEvent)
self.assertEqual(events, [('before', 'first')])
# After this, secondEvent should be stuck before 'during' waiting for
# secondDeferred.
reactor.fireSystemEvent(secondEvent)
self.assertEqual(events, [('before', 'first'), ('before', 'second')])
# After this, firstEvent should have finished completely, but
# secondEvent should be at the same place.
firstDeferred.callback(None)
self.assertEqual(events, [('before', 'first'), ('before', 'second'),
('after', 'first')])
# After this, secondEvent should have finished completely.
secondDeferred.callback(None)
self.assertEqual(events, [('before', 'first'), ('before', 'second'),
('after', 'first'), ('after', 'second')])
class TimeTestCase(unittest.TestCase):
"""
Tests for the IReactorTime part of the reactor.
"""
def test_seconds(self):
"""
L{twisted.internet.reactor.seconds} should return something
like a number.
1. This test specifically does not assert any relation to the
"system time" as returned by L{time.time} or
L{twisted.python.runtime.seconds}, because at some point we
may find a better option for scheduling calls than
wallclock-time.
2. This test *also* does not assert anything about the type of
the result, because operations may not return ints or
floats: For example, datetime-datetime == timedelta(0).
"""
now = reactor.seconds()
self.assertEquals(now-now+now, now)
def test_callLaterUsesReactorSecondsInDelayedCall(self):
"""
L{reactor.callLater} should use the reactor's seconds factory
to produce the time at which the DelayedCall will be called.
"""
oseconds = reactor.seconds
reactor.seconds = lambda: 100
try:
call = reactor.callLater(5, lambda: None)
self.assertEquals(call.getTime(), 105)
finally:
reactor.seconds = oseconds
def test_callLaterUsesReactorSecondsAsDelayedCallSecondsFactory(self):
"""
L{reactor.callLater} should propagate its own seconds factory
to the DelayedCall to use as its own seconds factory.
"""
oseconds = reactor.seconds
reactor.seconds = lambda: 100
try:
call = reactor.callLater(5, lambda: None)
self.assertEquals(call.seconds(), 100)
finally:
reactor.seconds = oseconds
def test_callLater(self):
"""
Test that a DelayedCall really calls the function it is
supposed to call.
"""
d = Deferred()
reactor.callLater(0, d.callback, None)
d.addCallback(self.assertEqual, None)
return d
def test_cancelDelayedCall(self):
"""
Test that when a DelayedCall is cancelled it does not run.
"""
called = []
def function():
called.append(None)
call = reactor.callLater(0, function)
call.cancel()
# Schedule a call in two "iterations" to check to make sure that the
# above call never ran.
d = Deferred()
def check():
try:
self.assertEqual(called, [])
except:
d.errback()
else:
d.callback(None)
reactor.callLater(0, reactor.callLater, 0, check)
return d
def test_cancelCancelledDelayedCall(self):
"""
Test that cancelling a DelayedCall which has already been cancelled
raises the appropriate exception.
"""
call = reactor.callLater(0, lambda: None)
call.cancel()
self.assertRaises(error.AlreadyCancelled, call.cancel)
def test_cancelCalledDelayedCallSynchronous(self):
"""
Test that cancelling a DelayedCall in the DelayedCall's function as
that function is being invoked by the DelayedCall raises the
appropriate exception.
"""
d = Deferred()
def later():
try:
self.assertRaises(error.AlreadyCalled, call.cancel)
except:
d.errback()
else:
d.callback(None)
call = reactor.callLater(0, later)
return d
def test_cancelCalledDelayedCallAsynchronous(self):
"""
Test that cancelling a DelayedCall after it has run its function
raises the appropriate exception.
"""
d = Deferred()
def check():
try:
self.assertRaises(error.AlreadyCalled, call.cancel)
except:
d.errback()
else:
d.callback(None)
def later():
reactor.callLater(0, check)
call = reactor.callLater(0, later)
return d
def testCallLaterDelayAndReset(self):
"""
Test that the reactor handles DelayedCalls which have been
reset or delayed.
"""
clock = Clock()
clock.install()
try:
callbackTimes = [None, None]
def resetCallback():
callbackTimes[0] = clock()
def delayCallback():
callbackTimes[1] = clock()
ireset = reactor.callLater(2, resetCallback)
idelay = reactor.callLater(3, delayCallback)
clock.pump(reactor, [0, 1])
self.assertIdentical(callbackTimes[0], None)
self.assertIdentical(callbackTimes[1], None)
ireset.reset(2) # (now)1 + 2 = 3
idelay.delay(3) # (orig)3 + 3 = 6
clock.pump(reactor, [0, 1])
self.assertIdentical(callbackTimes[0], None)
self.assertIdentical(callbackTimes[1], None)
clock.pump(reactor, [0, 1])
self.assertEquals(callbackTimes[0], 3)
self.assertEquals(callbackTimes[1], None)
clock.pump(reactor, [0, 3])
self.assertEquals(callbackTimes[1], 6)
finally:
clock.uninstall()
def testCallLaterTime(self):
d = reactor.callLater(10, lambda: None)
try:
self.failUnless(d.getTime() - (time.time() + 10) < 1)
finally:
d.cancel()
def testCallInNextIteration(self):
calls = []
def f1():
calls.append('f1')
reactor.callLater(0.0, f2)
def f2():
calls.append('f2')
reactor.callLater(0.0, f3)
def f3():
calls.append('f3')
reactor.callLater(0, f1)
self.assertEquals(calls, [])
reactor.iterate()
self.assertEquals(calls, ['f1'])
reactor.iterate()
self.assertEquals(calls, ['f1', 'f2'])
reactor.iterate()
self.assertEquals(calls, ['f1', 'f2', 'f3'])
def testCallLaterOrder(self):
l = []
l2 = []
def f(x):
l.append(x)
def f2(x):
l2.append(x)
def done():
self.assertEquals(l, range(20))
def done2():
self.assertEquals(l2, range(10))
for n in range(10):
reactor.callLater(0, f, n)
for n in range(10):
reactor.callLater(0, f, n+10)
reactor.callLater(0.1, f2, n)
reactor.callLater(0, done)
reactor.callLater(0.1, done2)
d = Deferred()
reactor.callLater(0.2, d.callback, None)
return d
testCallLaterOrder.todo = "See bug 1396"
testCallLaterOrder.skip = "Trial bug, todo doesn't work! See bug 1397"
def testCallLaterOrder2(self):
# This time destroy the clock resolution so that it fails reliably
# even on systems that don't have a crappy clock resolution.
def seconds():
return int(time.time())
base_original = base.seconds
runtime_original = runtime.seconds
base.seconds = seconds
runtime.seconds = seconds
def cleanup(x):
runtime.seconds = runtime_original
base.seconds = base_original
return x
return maybeDeferred(self.testCallLaterOrder).addBoth(cleanup)
testCallLaterOrder2.todo = "See bug 1396"
testCallLaterOrder2.skip = "Trial bug, todo doesn't work! See bug 1397"
def testDelayedCallStringification(self):
# Mostly just make sure str() isn't going to raise anything for
# DelayedCalls within reason.
dc = reactor.callLater(0, lambda x, y: None, 'x', y=10)
str(dc)
dc.reset(5)
str(dc)
dc.cancel()
str(dc)
dc = reactor.callLater(0, lambda: None, x=[({'hello': u'world'}, 10j), reactor], *range(10))
str(dc)
dc.cancel()
str(dc)
def calledBack(ignored):
str(dc)
d = Deferred().addCallback(calledBack)
dc = reactor.callLater(0, d.callback, None)
str(dc)
return d
def testDelayedCallSecondsOverride(self):
"""
Test that the C{seconds} argument to DelayedCall gets used instead of
the default timing function, if it is not None.
"""
def seconds():
return 10
dc = base.DelayedCall(5, lambda: None, (), {}, lambda dc: None,
lambda dc: None, seconds)
self.assertEquals(dc.getTime(), 5)
dc.reset(3)
self.assertEquals(dc.getTime(), 13)
class CallFromThreadTests(unittest.TestCase):
def testWakeUp(self):
# Make sure other threads can wake up the reactor
d = Deferred()
def wake():
time.sleep(0.1)
# callFromThread will call wakeUp for us
reactor.callFromThread(d.callback, None)
reactor.callInThread(wake)
return d
if interfaces.IReactorThreads(reactor, None) is None:
testWakeUp.skip = "Nothing to wake up for without thread support"
def _stopCallFromThreadCallback(self):
self.stopped = True
def _callFromThreadCallback(self, d):
reactor.callFromThread(self._callFromThreadCallback2, d)
reactor.callLater(0, self._stopCallFromThreadCallback)
def _callFromThreadCallback2(self, d):
try:
self.assert_(self.stopped)
except:
# Send the error to the deferred
d.errback()
else:
d.callback(None)
def testCallFromThreadStops(self):
"""
Ensure that callFromThread from inside a callFromThread
callback doesn't sit in an infinite loop and lets other
things happen too.
"""
self.stopped = False
d = defer.Deferred()
reactor.callFromThread(self._callFromThreadCallback, d)
return d
class ReactorCoreTestCase(unittest.TestCase):
"""
Test core functionalities of the reactor.
"""
def test_run(self):
"""
Test that reactor.crash terminates reactor.run
"""
for i in xrange(3):
reactor.callLater(0.01, reactor.crash)
reactor.run()
def test_iterate(self):
"""
Test that reactor.iterate(0) doesn't block
"""
start = time.time()
# twisted timers are distinct from the underlying event loop's
# timers, so this fail-safe probably won't keep a failure from
# hanging the test
t = reactor.callLater(10, reactor.crash)
reactor.iterate(0) # shouldn't block
stop = time.time()
elapsed = stop - start
self.failUnless(elapsed < 8)
t.cancel()
class ReactorFDTestCase(unittest.TestCase):
"""
Tests for L{interfaces.IReactorFDSet}.
"""
def test_getReaders(self):
"""
Check that L{interfaces.IReactorFDSet.getReaders} reflects the actions
made with L{interfaces.IReactorFDSet.addReader} and
L{interfaces.IReactorFDSet.removeReader}.
"""
s = socket.socket()
self.addCleanup(s.close)
c = Connection(s, protocol.Protocol())
self.assertNotIn(c, reactor.getReaders())
reactor.addReader(c)
self.assertIn(c, reactor.getReaders())
reactor.removeReader(c)
self.assertNotIn(c, reactor.getReaders())
def test_getWriters(self):
"""
Check that L{interfaces.IReactorFDSet.getWriters} reflects the actions
made with L{interfaces.IReactorFDSet.addWriter} and
L{interfaces.IReactorFDSet.removeWriter}.
"""
s = socket.socket()
self.addCleanup(s.close)
c = Connection(s, protocol.Protocol())
self.assertNotIn(c, reactor.getWriters())
reactor.addWriter(c)
self.assertIn(c, reactor.getWriters())
reactor.removeWriter(c)
self.assertNotIn(c, reactor.getWriters())
if not interfaces.IReactorFDSet.providedBy(reactor):
ReactorFDTestCase.skip = "Reactor not providing IReactorFDSet"
class DelayedTestCase(unittest.TestCase):
def setUp(self):
self.finished = 0
self.counter = 0
self.timers = {}
self.deferred = defer.Deferred()
# ick. Sometimes there are magic timers already running:
# popsicle.Freezer.tick . Kill off all such timers now so they won't
# interfere with the test. Of course, this kind of requires that
# getDelayedCalls already works, so certain failure modes won't be
# noticed.
if not hasattr(reactor, "getDelayedCalls"):
return
for t in reactor.getDelayedCalls():
t.cancel()
reactor.iterate() # flush timers
def tearDown(self):
for t in self.timers.values():
t.cancel()
def checkTimers(self):
l1 = self.timers.values()
l2 = list(reactor.getDelayedCalls())
# There should be at least the calls we put in. There may be other
# calls that are none of our business and that we should ignore,
# though.
missing = []
for dc in l1:
if dc not in l2:
missing.append(dc)
if missing:
self.finished = 1
self.failIf(missing, "Should have been missing no calls, instead was missing " + repr(missing))
def callback(self, tag):
del self.timers[tag]
self.checkTimers()
def addCallback(self, tag):
self.callback(tag)
self.addTimer(15, self.callback)
def done(self, tag):
self.finished = 1
self.callback(tag)
self.deferred.callback(None)
def addTimer(self, when, callback):
self.timers[self.counter] = reactor.callLater(when * 0.01, callback,
self.counter)
self.counter += 1
self.checkTimers()
def testGetDelayedCalls(self):
if not hasattr(reactor, "getDelayedCalls"):
return
# This is not a race because we don't do anything which might call
# the reactor until we have all the timers set up. If we did, this
# test might fail on slow systems.
self.checkTimers()
self.addTimer(35, self.done)
self.addTimer(20, self.callback)
self.addTimer(30, self.callback)
which = self.counter
self.addTimer(29, self.callback)
self.addTimer(25, self.addCallback)
self.addTimer(26, self.callback)
self.timers[which].cancel()
del self.timers[which]
self.checkTimers()
self.deferred.addCallback(lambda x : self.checkTimers())
return self.deferred
def testActive(self):
dcall = reactor.callLater(0, lambda: None)
self.assertEquals(dcall.active(), 1)
reactor.iterate()
self.assertEquals(dcall.active(), 0)
resolve_helper = """
import %(reactor)s
%(reactor)s.install()
from twisted.internet import reactor
class Foo:
def __init__(self):
reactor.callWhenRunning(self.start)
self.timer = reactor.callLater(3, self.failed)
def start(self):
reactor.resolve('localhost').addBoth(self.done)
def done(self, res):
print 'done', res
reactor.stop()
def failed(self):
print 'failed'
self.timer = None
reactor.stop()
f = Foo()
reactor.run()
"""
class ChildResolveProtocol(protocol.ProcessProtocol):
def __init__(self, onCompletion):
self.onCompletion = onCompletion
def connectionMade(self):
self.output = []
self.error = []
def outReceived(self, out):
self.output.append(out)
def errReceived(self, err):
self.error.append(err)
def processEnded(self, reason):
self.onCompletion.callback((reason, self.output, self.error))
self.onCompletion = None
class Resolve(unittest.TestCase):
def testChildResolve(self):
# I've seen problems with reactor.run under gtk2reactor. Spawn a
# child which just does reactor.resolve after the reactor has
# started, fail if it does not complete in a timely fashion.
helperPath = os.path.abspath(self.mktemp())
helperFile = open(helperPath, 'w')
# Eeueuuggg
reactorName = reactor.__module__
helperFile.write(resolve_helper % {'reactor': reactorName})
helperFile.close()
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
helperDeferred = Deferred()
helperProto = ChildResolveProtocol(helperDeferred)
reactor.spawnProcess(helperProto, sys.executable, ("python", "-u", helperPath), env)
def cbFinished((reason, output, error)):
# If the output is "done 127.0.0.1\n" we don't really care what
# else happened.
output = ''.join(output)
if output != 'done 127.0.0.1\n':
self.fail((
"The child process failed to produce the desired results:\n"
" Reason for termination was: %r\n"
" Output stream was: %r\n"
" Error stream was: %r\n") % (reason.getErrorMessage(), output, ''.join(error)))
helperDeferred.addCallback(cbFinished)
return helperDeferred
if not interfaces.IReactorProcess(reactor, None):
Resolve.skip = "cannot run test: reactor doesn't support IReactorProcess"
class Counter:
index = 0
def add(self):
self.index = self.index + 1
class Order:
stage = 0
def a(self):
if self.stage != 0: raise RuntimeError
self.stage = 1
def b(self):
if self.stage != 1: raise RuntimeError
self.stage = 2
def c(self):
if self.stage != 2: raise RuntimeError
self.stage = 3
class CallFromThreadTestCase(unittest.TestCase):
"""Task scheduling from threads tests."""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "Nothing to test without thread support"
def schedule(self, *args, **kwargs):
"""Override in subclasses."""
reactor.callFromThread(*args, **kwargs)
def testScheduling(self):
c = Counter()
for i in range(100):
self.schedule(c.add)
for i in range(100):
reactor.iterate()
self.assertEquals(c.index, 100)
def testCorrectOrder(self):
o = Order()
self.schedule(o.a)
self.schedule(o.b)
self.schedule(o.c)
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEquals(o.stage, 3)
def testNotRunAtOnce(self):
c = Counter()
self.schedule(c.add)
# scheduled tasks should not be run at once:
self.assertEquals(c.index, 0)
reactor.iterate()
self.assertEquals(c.index, 1)
class MyProtocol(protocol.Protocol):
"""Sample protocol."""
class MyFactory(protocol.Factory):
"""Sample factory."""
protocol = MyProtocol
class ProtocolTestCase(unittest.TestCase):
def testFactory(self):
factory = MyFactory()
protocol = factory.buildProtocol(None)
self.assertEquals(protocol.factory, factory)
self.assert_( isinstance(protocol, factory.protocol) )
class DummyProducer(object):
"""
Very uninteresting producer implementation used by tests to ensure the
right methods are called by the consumer with which it is registered.
@type events: C{list} of C{str}
@ivar events: The producer/consumer related events which have happened to
this producer. Strings in this list may be C{'resume'}, C{'stop'}, or
C{'pause'}. Elements are added as they occur.
"""
def __init__(self):
self.events = []
def resumeProducing(self):
self.events.append('resume')
def stopProducing(self):
self.events.append('stop')
def pauseProducing(self):
self.events.append('pause')
class SillyDescriptor(abstract.FileDescriptor):
"""
A descriptor whose data buffer gets filled very fast.
Useful for testing FileDescriptor's IConsumer interface, since
the data buffer fills as soon as at least four characters are
written to it, and gets emptied in a single doWrite() cycle.
"""
bufferSize = 3
connected = True
def writeSomeData(self, data):
"""
Always write all data.
"""
return len(data)
def startWriting(self):
"""
Do nothing: bypass the reactor.
"""
stopWriting = startWriting
class ReentrantProducer(DummyProducer):
"""
Similar to L{DummyProducer}, but with a resumeProducing method which calls
back into an L{IConsumer} method of the consumer against which it is
registered.
@ivar consumer: The consumer with which this producer has been or will
be registered.
@ivar methodName: The name of the method to call on the consumer inside
C{resumeProducing}.
@ivar methodArgs: The arguments to pass to the consumer method invoked in
C{resumeProducing}.
"""
def __init__(self, consumer, methodName, *methodArgs):
super(ReentrantProducer, self).__init__()
self.consumer = consumer
self.methodName = methodName
self.methodArgs = methodArgs
def resumeProducing(self):
super(ReentrantProducer, self).resumeProducing()
getattr(self.consumer, self.methodName)(*self.methodArgs)
class TestProducer(unittest.TestCase):
"""
Test abstract.FileDescriptor's consumer interface.
"""
def test_doubleProducer(self):
"""
Verify that registering a non-streaming producer invokes its
resumeProducing() method and that you can only register one producer
at a time.
"""
fd = abstract.FileDescriptor()
fd.connected = 1
dp = DummyProducer()
fd.registerProducer(dp, 0)
self.assertEquals(dp.events, ['resume'])
self.assertRaises(RuntimeError, fd.registerProducer, DummyProducer(), 0)
def test_unconnectedFileDescriptor(self):
"""
Verify that registering a producer when the connection has already
been closed invokes its stopProducing() method.
"""
fd = abstract.FileDescriptor()
fd.disconnected = 1
dp = DummyProducer()
fd.registerProducer(dp, 0)
self.assertEquals(dp.events, ['stop'])
def _dontPausePullConsumerTest(self, methodName):
descriptor = SillyDescriptor()
producer = DummyProducer()
descriptor.registerProducer(producer, streaming=False)
self.assertEqual(producer.events, ['resume'])
del producer.events[:]
# Fill up the descriptor's write buffer so we can observe whether or
# not it pauses its producer in that case.
getattr(descriptor, methodName)('1234')
self.assertEqual(producer.events, [])
def test_dontPausePullConsumerOnWrite(self):
"""
Verify that FileDescriptor does not call producer.pauseProducing() on a
non-streaming pull producer in response to a L{IConsumer.write} call
which results in a full write buffer. Issue #2286.
"""
return self._dontPausePullConsumerTest('write')
def test_dontPausePullConsumerOnWriteSequence(self):
"""
Like L{test_dontPausePullConsumerOnWrite}, but for a call to
C{writeSequence} rather than L{IConsumer.write}.
C{writeSequence} is not part of L{IConsumer}, but
L{abstract.FileDescriptor} has supported consumery behavior in response
to calls to L{writeSequence} forever.
"""
return self._dontPausePullConsumerTest('writeSequence')
def _reentrantStreamingProducerTest(self, methodName):
descriptor = SillyDescriptor()
producer = ReentrantProducer(descriptor, methodName, 'spam')
descriptor.registerProducer(producer, streaming=True)
# Start things off by filling up the descriptor's buffer so it will
# pause its producer.
getattr(descriptor, methodName)('spam')
# Sanity check - make sure that worked.
self.assertEqual(producer.events, ['pause'])
del producer.events[:]
# After one call to doWrite, the buffer has been emptied so the
# FileDescriptor should resume its producer. That will result in an
# immediate call to FileDescriptor.write which will again fill the
# buffer and result in the producer being paused.
descriptor.doWrite()
self.assertEqual(producer.events, ['resume', 'pause'])
del producer.events[:]
# After a second call to doWrite, the exact same thing should have
# happened. Prior to the bugfix for which this test was written,
# FileDescriptor would have incorrectly believed its producer was
# already resumed (it was paused) and so not resume it again.
descriptor.doWrite()
self.assertEqual(producer.events, ['resume', 'pause'])
def test_reentrantStreamingProducerUsingWrite(self):
"""
Verify that FileDescriptor tracks producer's paused state correctly.
Issue #811, fixed in revision r12857.
"""
return self._reentrantStreamingProducerTest('write')
def test_reentrantStreamingProducerUsingWriteSequence(self):
"""
Like L{test_reentrantStreamingProducerUsingWrite}, but for calls to
C{writeSequence}.
C{writeSequence} is B{not} part of L{IConsumer}, however
C{abstract.FileDescriptor} has supported consumery behavior in response
to calls to C{writeSequence} forever.
"""
return self._reentrantStreamingProducerTest('writeSequence')
class PortStringification(unittest.TestCase):
if interfaces.IReactorTCP(reactor, None) is not None:
def testTCP(self):
p = reactor.listenTCP(0, protocol.ServerFactory())
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if interfaces.IReactorUDP(reactor, None) is not None:
def testUDP(self):
p = reactor.listenUDP(0, protocol.DatagramProtocol())
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if interfaces.IReactorSSL(reactor, None) is not None and ssl:
def testSSL(self, ssl=ssl):
pem = util.sibpath(__file__, 'server.pem')
p = reactor.listenSSL(0, protocol.ServerFactory(), ssl.DefaultOpenSSLContextFactory(pem, pem))
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
| apache-2.0 | -4,301,436,880,554,129,400 | 31.992777 | 106 | 0.611248 | false |
amenonsen/ansible | lib/ansible/modules/remote_management/manageiq/manageiq_tenant.py | 22 | 18428 | #!/usr/bin/python
#
# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <[email protected]>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_tenant
short_description: Management of tenants in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.8'
author: Evert Mulder (@evertmulder)
description:
- The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
requirements:
- manageiq-client
options:
state:
description:
- absent - tenant should not exist, present - tenant should be.
choices: ['absent', 'present']
default: 'present'
name:
description:
- The tenant name.
required: true
default: null
description:
description:
- The tenant description.
required: true
default: null
parent_id:
description:
- The id of the parent tenant. If not supplied the root tenant is used.
- The C(parent_id) takes president over C(parent) when supplied
required: false
default: null
parent:
description:
- The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
required: false
default: null
quotas:
description:
- The tenant quotas.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(cpu_allocated) (int): use null to remove the quota.'
- ' - C(mem_allocated) (GB): use null to remove the quota.'
- ' - C(storage_allocated) (GB): use null to remove the quota.'
- ' - C(vms_allocated) (int): use null to remove the quota.'
- ' - C(templates_allocated) (int): use null to remove the quota.'
required: false
default: null
'''
EXAMPLES = '''
- name: Update the root tenant in ManageIQ
manageiq_tenant:
name: 'My Company'
description: 'My company name'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Create a tenant in ManageIQ
manageiq_tenant:
name: 'Dep1'
description: 'Manufacturing department'
parent_id: 1
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete a tenant in ManageIQ
manageiq_tenant:
state: 'absent'
name: 'Dep1'
parent_id: 1
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
manageiq_tenant:
name: 'Dep1'
parent_id: 1
quotas:
- cpu_allocated: 100
- mem_allocated: 50
- vms_allocated: null
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete a tenant in ManageIQ using a token
manageiq_tenant:
state: 'absent'
name: 'Dep1'
parent_id: 1
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
validate_certs: False
'''
RETURN = '''
tenant:
description: The tenant.
returned: success
type: complex
contains:
id:
description: The tenant id
returned: success
type: int
name:
description: The tenant name
returned: success
type: str
description:
description: The tenant description
returned: success
type: str
parent_id:
description: The id of the parent tenant
returned: success
type: int
quotas:
description: List of tenant quotas
returned: success
type: list
sample:
cpu_allocated: 100
mem_allocated: 50
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQTenant(object):
"""
Object to execute tenant management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def tenant(self, name, parent_id, parent):
""" Search for tenant object by name and parent_id or parent
or the root tenant if no parent or parent_id is supplied.
Returns:
the parent tenant, None for the root tenant
the tenant or None if tenant was not found.
"""
if parent_id:
parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
if not parent_tenant_res:
self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
parent_tenant = parent_tenant_res[0]
tenants = self.client.collections.tenants.find_by(name=name)
for tenant in tenants:
try:
ancestry = tenant['ancestry']
except AttributeError:
ancestry = None
if ancestry:
tenant_parent_id = int(ancestry.split("/")[-1])
if int(tenant_parent_id) == parent_id:
return parent_tenant, tenant
return parent_tenant, None
else:
if parent:
parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
if not parent_tenant_res:
self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
if len(parent_tenant_res) > 1:
self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
parent_tenant = parent_tenant_res[0]
parent_id = parent_tenant['id']
tenants = self.client.collections.tenants.find_by(name=name)
for tenant in tenants:
try:
ancestry = tenant['ancestry']
except AttributeError:
ancestry = None
if ancestry:
tenant_parent_id = int(ancestry.split("/")[-1])
if tenant_parent_id == parent_id:
return parent_tenant, tenant
return parent_tenant, None
else:
# No parent or parent id supplied we select the root tenant
return None, self.client.collections.tenants.find_by(ancestry=None)[0]
def compare_tenant(self, tenant, name, description):
""" Compare tenant fields with new field values.
Returns:
false if tenant fields have some difference from new fields, true o/w.
"""
found_difference = (
(name and tenant['name'] != name) or
(description and tenant['description'] != description)
)
return not found_difference
def delete_tenant(self, tenant):
""" Deletes a tenant from manageiq.
Returns:
dict with `msg` and `changed`
"""
try:
url = '%s/tenants/%s' % (self.api_url, tenant['id'])
result = self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
if result['success'] is False:
self.module.fail_json(msg=result['message'])
return dict(changed=True, msg=result['message'])
def edit_tenant(self, tenant, name, description):
""" Edit a manageiq tenant.
Returns:
dict with `msg` and `changed`
"""
resource = dict(name=name, description=description, use_config_for_attributes=False)
# check if we need to update ( compare_tenant is true is no difference found )
if self.compare_tenant(tenant, name, description):
return dict(
changed=False,
msg="tenant %s is not changed." % tenant['name'],
tenant=tenant['_data'])
# try to update tenant
try:
result = self.client.post(tenant['href'], action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
return dict(
changed=True,
msg="successfully updated the tenant with id %s" % (tenant['id']))
def create_tenant(self, name, description, parent_tenant):
""" Creates the tenant in manageiq.
Returns:
dict with `msg`, `changed` and `tenant_id`
"""
parent_id = parent_tenant['id']
# check for required arguments
for key, value in dict(name=name, description=description, parent_id=parent_id).items():
if value in (None, ''):
self.module.fail_json(msg="missing required argument: %s" % key)
url = '%s/tenants' % self.api_url
resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
try:
result = self.client.post(url, action='create', resource=resource)
tenant_id = result['results'][0]['id']
except Exception as e:
self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
return dict(
changed=True,
msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
tenant_id=tenant_id)
def tenant_quota(self, tenant, quota_key):
""" Search for tenant quota object by tenant and quota_key.
Returns:
the quota for the tenant, or None if the tenant quota was not found.
"""
tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
return tenant_quotas['resources']
def tenant_quotas(self, tenant):
""" Search for tenant quotas object by tenant.
Returns:
the quotas for the tenant, or None if no tenant quotas were not found.
"""
tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
return tenant_quotas['resources']
def update_tenant_quotas(self, tenant, quotas):
""" Creates the tenant quotas in manageiq.
Returns:
dict with `msg` and `changed`
"""
changed = False
messages = []
for quota_key, quota_value in quotas.items():
current_quota_filtered = self.tenant_quota(tenant, quota_key)
if current_quota_filtered:
current_quota = current_quota_filtered[0]
else:
current_quota = None
if quota_value:
# Change the byte values to GB
if quota_key in ['storage_allocated', 'mem_allocated']:
quota_value_int = int(quota_value) * 1024 * 1024 * 1024
else:
quota_value_int = int(quota_value)
if current_quota:
res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
else:
res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
else:
if current_quota:
res = self.delete_tenant_quota(tenant, current_quota)
else:
res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
if res['changed']:
changed = True
messages.append(res['msg'])
return dict(
changed=changed,
msg=', '.join(messages))
def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
""" Update the tenant quotas in manageiq.
Returns:
result
"""
if current_quota['value'] == quota_value:
return dict(
changed=False,
msg="tenant quota %s already has value %s" % (quota_key, quota_value))
else:
url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
resource = {'value': quota_value}
try:
self.client.post(url, action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
return dict(
changed=True,
msg="successfully updated tenant quota %s" % quota_key)
def create_tenant_quota(self, tenant, quota_key, quota_value):
""" Creates the tenant quotas in manageiq.
Returns:
result
"""
url = '%s/quotas' % (tenant['href'])
resource = {'name': quota_key, 'value': quota_value}
try:
self.client.post(url, action='create', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
return dict(
changed=True,
msg="successfully created tenant quota %s" % quota_key)
def delete_tenant_quota(self, tenant, quota):
""" deletes the tenant quotas in manageiq.
Returns:
result
"""
try:
result = self.client.post(quota['href'], action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
return dict(changed=True, msg=result['message'])
def create_tenant_response(self, tenant, parent_tenant):
""" Creates the ansible result object from a manageiq tenant entity
Returns:
a dict with the tenant id, name, description, parent id,
quota's
"""
tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
try:
ancestry = tenant['ancestry']
tenant_parent_id = int(ancestry.split("/")[-1])
except AttributeError:
# The root tenant does not return the ancestry attribute
tenant_parent_id = None
return dict(
id=tenant['id'],
name=tenant['name'],
description=tenant['description'],
parent_id=tenant_parent_id,
quotas=tenant_quotas
)
@staticmethod
def create_tenant_quotas_response(tenant_quotas):
""" Creates the ansible result object from a manageiq tenant_quotas entity
Returns:
a dict with the applied quotas, name and value
"""
if not tenant_quotas:
return {}
result = {}
for quota in tenant_quotas:
if quota['unit'] == 'bytes':
value = float(quota['value']) / (1024 * 1024 * 1024)
else:
value = quota['value']
result[quota['name']] = value
return result
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
description=dict(required=True, type='str'),
parent_id=dict(required=False, type='int'),
parent=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
quotas=dict(type='dict', default={})
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec
)
name = module.params['name']
description = module.params['description']
parent_id = module.params['parent_id']
parent = module.params['parent']
state = module.params['state']
quotas = module.params['quotas']
manageiq = ManageIQ(module)
manageiq_tenant = ManageIQTenant(manageiq)
parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
# tenant should not exist
if state == "absent":
# if we have a tenant, delete it
if tenant:
res_args = manageiq_tenant.delete_tenant(tenant)
# if we do not have a tenant, nothing to do
else:
if parent_id:
msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
else:
msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
res_args = dict(
changed=False,
msg=msg)
# tenant should exist
if state == "present":
# if we have a tenant, edit it
if tenant:
res_args = manageiq_tenant.edit_tenant(tenant, name, description)
# if we do not have a tenant, create it
else:
res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
# quotas as supplied and we have a tenant
if quotas:
tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
if tenant_quotas_res['changed']:
res_args['changed'] = True
res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
tenant.reload(expand='resources', attributes=['tenant_quotas'])
res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| gpl-3.0 | -1,074,665,009,640,361,600 | 32.263538 | 117 | 0.583406 | false |
mPowering/django-orb | orb/views.py | 1 | 30008 | import os
from collections import defaultdict
from django.conf import settings
from django.contrib import messages
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from haystack.query import SearchQuerySet
from orb.forms import (ResourceStep1Form, ResourceStep2Form, SearchForm,
ResourceRejectForm, AdvancedSearchForm)
from orb.models import Collection
from orb.models import home_resources
from orb.models import ResourceFile, ResourceTag, ResourceCriteria, ResourceRating
from orb.models import ReviewerRole
from orb.models import Tag, Resource, ResourceURL, Category, TagOwner, SearchTracker
from orb.signals import (resource_viewed, resource_url_viewed, resource_file_viewed,
search, resource_workflow, resource_submitted, tag_viewed)
from orb.tags.forms import TagPageForm
def home_view(request):
topics = []
organized_topics = defaultdict(list)
for tag in Tag.tags.public().top_level():
child_tags = tag.children.values_list('id')
resource_count = Resource.objects.filter(status=Resource.APPROVED).filter(
Q(resourcetag__tag__pk__in=child_tags) | Q(resourcetag__tag=tag)).distinct().count()
for category_slug in ["health-domain"]:
if tag.category.slug == category_slug:
organized_topics[category_slug.replace("-", "_")].append({
'resource_count': resource_count,
'tag': tag,
})
topics.append({
'resource_count': resource_count,
'tag': tag,
})
return render(request, 'orb/home.html', {
'topics': topics,
'organized_topics': home_resources(),
'page_title': _(u'ORB by mPowering'),
})
def partner_view(request):
PARTNERS = ['jhu-ccp', 'digital-campus', 'digital-green',
'global-health-media-project', 'medical-aid-films', 'zinc-ors']
partners = Tag.objects.filter(
category__slug='organisation', slug__in=PARTNERS).order_by('name')
return render(request, 'orb/partners.html', {'partners': partners})
def tag_view(request, tag_slug):
"""
Renders a tag detail page.
Allows the user to paginate resultes and sort by preselected options.
Args:
request: HttpRequest
tag_slug: the identifier for the tag
Returns:
Rendered response with a tag's resource list
"""
tag = get_object_or_404(Tag, slug=tag_slug)
filter_params = {
'page': 1,
'order': TagPageForm.CREATED,
}
params_form = TagPageForm(data=request.GET)
if params_form.is_valid():
filter_params.update(params_form.cleaned_data)
order_by = filter_params['order']
if order_by == TagPageForm.RATING:
data = Resource.resources.approved().with_ratings(tag).order_by(order_by)
else:
data = Resource.resources.approved().for_tag(tag).order_by(order_by)
paginator = Paginator(data, settings.ORB_PAGINATOR_DEFAULT)
try:
resources = paginator.page(filter_params['page'])
except (EmptyPage, InvalidPage):
resources = paginator.page(paginator.num_pages)
show_filter_link = tag.category.slug in [slug for name, slug in settings.ADVANCED_SEARCH_CATEGORIES]
tag_viewed.send(sender=tag, tag=tag, request=request)
is_geo_tag = tag.category.name == "Geography"
return render(request, 'orb/tag.html', {
'tag': tag,
'page': resources,
'params_form': params_form,
'show_filter_link': show_filter_link,
'is_geo_tag': is_geo_tag,
})
def taxonomy_view(request):
return render(request, 'orb/taxonomy.html')
def resource_permalink_view(request, id):
resource = get_object_or_404(Resource, pk=id)
return resource_view(request, resource.slug)
def resource_view(request, resource_slug):
resource = get_object_or_404(
Resource.objects.approved(user=request.user), slug=resource_slug)
if resource.status == Resource.ARCHIVED:
messages.error(request, _(
u"This resource has been archived by the ORB Content"
u" Review Team, so is not available for users to view"))
elif resource.status != Resource.APPROVED:
messages.error(request, _(
u"This resource is not yet approved by the ORB Content"
u" Review Team, so is not yet available for all users to view"))
options_menu = []
if resource_can_edit(resource, request.user):
om = {}
om['title'] = _(u'Edit')
om['url'] = reverse('orb_resource_edit', args=[resource.id])
options_menu.append(om)
if request.user.is_staff and resource.status == Resource.PENDING:
om = {}
om['title'] = _(u'Reject')
om['url'] = reverse('orb_resource_reject', args=[resource.id])
options_menu.append(om)
om = {}
om['title'] = _(u'Approve')
om['url'] = reverse('orb_resource_approve', args=[resource.id])
options_menu.append(om)
resource_viewed.send(sender=resource, resource=resource, request=request)
user_rating = 0
if request.user.is_authenticated():
try:
user_rating = ResourceRating.objects.get(
resource=resource, user=request.user).rating
except ResourceRating.DoesNotExist:
pass
# get the collections for this resource
collections = Collection.objects.filter(
collectionresource__resource=resource, visibility=Collection.PUBLIC)
# See if bookmarked
bookmarks = Collection.objects.filter(collectionresource__resource=resource,
visibility=Collection.PRIVATE, collectionuser__user__id=request.user.id).count()
if bookmarks > 0:
bookmarked = True
else:
bookmarked = False
return render(request, 'orb/resource/view.html', {
'resource': resource,
'options_menu': options_menu,
'user_rating': user_rating,
'collections': collections,
'bookmarked': bookmarked,
})
def resource_create_step1_view(request):
if request.user.is_anonymous():
return render(request, 'orb/login_required.html', {
'message': _(u'You need to be logged in to add a resource.'),
})
if request.method == 'POST':
form = ResourceStep1Form(request.POST, request.FILES, request=request)
resource_form_set_choices(form)
if form.is_valid():
# save resource
resource = Resource(status=Resource.PENDING,
create_user=request.user, update_user=request.user)
resource.title = form.cleaned_data.get("title")
resource.description = form.cleaned_data.get("description")
if form.cleaned_data.get("study_time_number") and form.cleaned_data.get("study_time_unit"):
resource.study_time_number = form.cleaned_data.get(
"study_time_number")
resource.study_time_unit = form.cleaned_data.get(
"study_time_unit")
if request.FILES.has_key('image'):
resource.image = request.FILES["image"]
resource.attribution = form.cleaned_data.get("attribution")
resource.save()
# add organisation(s)/geography and other tags
resource_add_free_text_tags(
resource, form.cleaned_data.get('organisations'), request.user, 'organisation')
resource_add_free_text_tags(
resource, form.cleaned_data.get('geography'), request.user, 'geography')
resource_add_free_text_tags(
resource, form.cleaned_data.get('languages'), request.user, 'language')
resource_add_free_text_tags(
resource, form.cleaned_data.get('other_tags'), request.user, 'other')
# add tags
resource_add_tags(request, form, resource)
# see if email needs to be sent
resource_workflow.send(sender=resource, resource=resource, request=request,
status=Resource.PENDING, notes="")
resource_submitted.send(sender=resource, resource=resource, request=request)
# redirect to step 2
# Redirect after POST
return HttpResponseRedirect(reverse('orb_resource_create2', args=[resource.id]))
else:
if request.user.userprofile.organisation:
user_org = request.user.userprofile.organisation.name
initial = {'organisations': user_org, }
else:
initial = {}
form = ResourceStep1Form(initial=initial, request=request)
resource_form_set_choices(form)
return render(request, 'orb/resource/create_step1.html', {'form': form})
def resource_create_step2_view(request, id):
if request.user.is_anonymous():
# TODO use contrib.messages
return render(request, 'orb/login_required.html', {
'message': _(u'You need to be logged in to add a resource.'),
})
resource = get_object_or_404(Resource, pk=id)
# check if owner of this resource
if not resource_can_edit(resource, request.user):
raise Http404()
if request.method == 'POST':
form = ResourceStep2Form(request.POST, request.FILES, request=request)
if form.is_valid():
title = form.cleaned_data.get("title")
# add file and url
if request.FILES.has_key('file'):
rf = ResourceFile(
resource=resource, create_user=request.user, update_user=request.user)
rf.file = request.FILES["file"]
if title:
rf.title = title
rf.save()
url = form.cleaned_data.get("url")
if url:
ru = ResourceURL(
resource=resource, create_user=request.user, update_user=request.user)
ru.url = url
if title:
ru.title = title
ru.save()
initial = {}
form = ResourceStep2Form(initial=initial, request=request)
resource_files = ResourceFile.objects.filter(resource=resource)
resource_urls = ResourceURL.objects.filter(resource=resource)
return render(request, 'orb/resource/create_step2.html', {
'form': form,
'resource': resource,
'resource_files': resource_files,
'resource_urls': resource_urls,
})
def resource_create_file_delete_view(request, id, file_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceFile.objects.get(resource=resource, pk=file_id).delete()
except ResourceFile.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_create2', args=[id]))
def resource_create_url_delete_view(request, id, url_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceURL.objects.get(resource=resource, pk=url_id).delete()
except ResourceURL.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_create2', args=[id]))
def resource_edit_file_delete_view(request, id, file_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceFile.objects.get(resource=resource, pk=file_id).delete()
except ResourceFile.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_edit2', args=[id]))
def resource_edit_url_delete_view(request, id, url_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceURL.objects.get(resource=resource, pk=url_id).delete()
except ResourceURL.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_edit2', args=[id]))
def resource_create_thanks_view(request, id):
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
return render(request, 'orb/resource/create_thanks.html', {'resource': resource})
def resource_guidelines_view(request):
criteria = []
# get the general criteria
criteria_general = ResourceCriteria.objects.filter(role=None).order_by('order_by')
obj = {}
obj['category'] = _("General")
obj['criteria'] = criteria_general
criteria.append(obj)
for k in ReviewerRole.objects.all():
obj = {}
cat = ResourceCriteria.objects.filter(role=k).order_by('order_by')
obj['category'] = k
obj['criteria'] = cat
criteria.append(obj)
return render(request, 'orb/resource/guidelines.html', {'criteria_categories': criteria})
def resource_approve_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
resource.status = Resource.APPROVED
resource.save()
resource_workflow.send(sender=resource, resource=resource,
request=request, status=Resource.APPROVED, notes="")
return render(request, 'orb/resource/status_updated.html', {'resource': resource})
def resource_reject_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
if request.method == 'POST':
form = ResourceRejectForm(data=request.POST)
form.fields['criteria'].choices = [(t.id, t.description) for t in ResourceCriteria.objects.all(
).order_by('category_order_by', 'order_by')]
if form.is_valid():
resource.status = Resource.REJECTED
resource.save()
notes = form.cleaned_data.get("notes")
criteria = form.cleaned_data.get("criteria")
resource_workflow.send(sender=resource, resource=resource, request=request,
status=Resource.REJECTED, notes=notes, criteria=criteria)
return HttpResponseRedirect(reverse('orb_resource_reject_sent', args=[resource.id]))
else:
form = ResourceRejectForm()
form.fields['criteria'].choices = [(t.id, t.description) for t in ResourceCriteria.objects.all(
).order_by('category_order_by', 'order_by')]
return render(request, 'orb/resource/reject_form.html', {
'resource': resource,
'form': form,
})
def resource_reject_sent_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
return render(request, 'orb/resource/status_updated.html', {'resource': resource, })
def resource_pending_mep_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
resource.status = Resource.PENDING
resource.save()
resource_workflow.send(sender=resource, resource=resource, request=request,
status=Resource.PENDING, notes="")
return render(request, 'orb/resource/status_updated.html', {'resource': resource})
def resource_edit_view(request, resource_id):
resource = get_object_or_404(Resource, pk=resource_id)
if not resource_can_edit(resource, request.user):
raise Http404()
if request.method == 'POST':
form = ResourceStep1Form(data=request.POST, files=request.FILES)
resource_form_set_choices(form)
if form.is_valid():
resource.update_user = request.user
resource.title = form.cleaned_data.get("title")
resource.description = form.cleaned_data.get("description")
if form.cleaned_data.get("study_time_number") and form.cleaned_data.get("study_time_unit"):
resource.study_time_number = form.cleaned_data.get(
"study_time_number")
resource.study_time_unit = form.cleaned_data.get(
"study_time_unit")
resource.attribution = form.cleaned_data.get("attribution")
resource.save()
# update image
image = form.cleaned_data.get("image")
if image == False:
resource.image = None
resource.save()
if request.FILES.has_key('image'):
resource.image = request.FILES["image"]
resource.save()
# update tags - remove all current tags first
ResourceTag.objects.filter(resource=resource).delete()
resource_add_tags(request, form, resource)
resource_add_free_text_tags(
resource, form.cleaned_data.get('organisations'), request.user, 'organisation')
resource_add_free_text_tags(
resource, form.cleaned_data.get('geography'), request.user, 'geography')
resource_add_free_text_tags(
resource, form.cleaned_data.get('languages'), request.user, 'language')
resource_add_free_text_tags(
resource, form.cleaned_data.get('other_tags'), request.user, 'other')
# All successful - now redirect
# Redirect after POST
return HttpResponseRedirect(reverse('orb_resource_edit2', args=[resource.id]))
else:
initial = request.POST.copy()
initial['image'] = resource.image
files = ResourceFile.objects.filter(resource=resource)[:1]
if files:
initial['file'] = files[0].file
form = ResourceStep1Form(
initial=initial, data=request.POST, files=request.FILES)
resource_form_set_choices(form)
else:
data = {}
data['title'] = resource.title
organisations = Tag.objects.filter(
category__slug='organisation', resourcetag__resource=resource).values_list('name', flat=True)
data['organisations'] = ', '.join(organisations)
data['description'] = resource.description
data['image'] = resource.image
data['study_time_number'] = resource.study_time_number
data['study_time_unit'] = resource.study_time_unit
data['attribution'] = resource.attribution
files = ResourceFile.objects.filter(resource=resource)[:1]
if files:
data['file'] = files[0].file
urls = ResourceURL.objects.filter(resource=resource)[:1]
if urls:
data['url'] = urls[0].url
health_topic = Tag.objects.filter(
category__top_level=True, resourcetag__resource=resource).values_list('id', flat=True)
data['health_topic'] = health_topic
resource_type = Tag.objects.filter(
category__slug='type', resourcetag__resource=resource).values_list('id', flat=True)
data['resource_type'] = resource_type
audience = Tag.objects.filter(
category__slug='audience', resourcetag__resource=resource).values_list('id', flat=True)
data['audience'] = audience
geography = Tag.objects.filter(
category__slug='geography', resourcetag__resource=resource).values_list('name', flat=True)
data['geography'] = ', '.join(geography)
languages = Tag.objects.filter(
category__slug='language', resourcetag__resource=resource).values_list('name', flat=True)
data['languages'] = ', '.join(languages)
device = Tag.objects.filter(
category__slug='device', resourcetag__resource=resource).values_list('id', flat=True)
data['device'] = device
license = Tag.objects.filter(
category__slug='license', resourcetag__resource=resource).values_list('id', flat=True)
if license:
data['license'] = license[0]
other_tags = Tag.objects.filter(
resourcetag__resource=resource, category__slug='other').values_list('name', flat=True)
data['other_tags'] = ', '.join(other_tags)
form = ResourceStep1Form(initial=data)
resource_form_set_choices(form)
return render(request, 'orb/resource/edit.html', {'form': form})
def resource_edit_step2_view(request, resource_id):
if request.user.is_anonymous():
# TODO use contrib.messages
return render(request, 'orb/login_required.html', {
'message': _(u'You need to be logged in to add a resource.'),
})
resource = get_object_or_404(Resource, pk=resource_id)
# check if owner of this resource
if not resource_can_edit(resource, request.user):
raise Http404()
if request.method == 'POST':
form = ResourceStep2Form(request.POST, request.FILES, request=request)
if form.is_valid():
title = form.cleaned_data.get("title")
# add file and url
if request.FILES.has_key('file'):
rf = ResourceFile(
resource=resource, create_user=request.user, update_user=request.user)
rf.file = request.FILES["file"]
if title:
rf.title = title
rf.save()
url = form.cleaned_data.get("url")
if url:
ru = ResourceURL(
resource=resource, create_user=request.user, update_user=request.user)
ru.url = url
if title:
ru.title = title
ru.save()
initial = {}
form = ResourceStep2Form(initial=initial, request=request)
resource_files = ResourceFile.objects.filter(resource=resource)
resource_urls = ResourceURL.objects.filter(resource=resource)
return render(request, 'orb/resource/edit_step2.html', {
'form': form,
'resource': resource,
'resource_files': resource_files,
'resource_urls': resource_urls,
})
def resource_edit_thanks_view(request, id):
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
return render(request, 'orb/resource/edit_thanks.html', {'resource': resource})
def search_view(request):
search_query = request.GET.get('q', '')
if search_query:
search_results = SearchQuerySet().filter(content=search_query)
else:
search_results = []
data = {}
data['q'] = search_query
form = SearchForm(initial=data)
paginator = Paginator(search_results, settings.ORB_PAGINATOR_DEFAULT)
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
if search_query:
search.send(sender=search_results, query=search_query,
no_results=search_results.count(), request=request, page=page)
return render(request, 'orb/search.html', {
'form': form,
'query': search_query,
'page': results,
'total_results': paginator.count,
})
def search_advanced_view(request, tag_id=None):
if request.method == 'POST':
form = AdvancedSearchForm(request.POST)
if form.is_valid():
urlparams = request.POST.copy()
# delete these from params as not required
del urlparams['csrfmiddlewaretoken']
del urlparams['submit']
return HttpResponseRedirect(reverse('orb_search_advanced_results') + "?" + urlparams.urlencode())
else:
form = AdvancedSearchForm()
return render(request, 'orb/search_advanced.html', {'form': form})
def search_advanced_results_view(request):
form = AdvancedSearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data.get('q')
results, filter_tags = form.search()
if q:
search_results = SearchQuerySet().filter(content=q).models(Resource).values_list('pk', flat=True)
results = results.filter(pk__in=search_results)
paginator = Paginator(results, settings.ORB_PAGINATOR_DEFAULT)
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
try:
resources = paginator.page(page)
except (EmptyPage, InvalidPage):
resources = paginator.page(paginator.num_pages)
search.send(sender=results, query=q, no_results=results.count(),
request=request, type=SearchTracker.SEARCH_ADV, page=page)
license_tags = form.cleaned_data['license']
else:
filter_tags = Tag.objects.filter(pk=None)
license_tags = []
resources = Resource.objects.filter(pk=None)
paginator = Paginator(resources, settings.ORB_PAGINATOR_DEFAULT)
return render(request, 'orb/search_advanced_results.html', {
'filter_tags': filter_tags,
'license_tags': license_tags,
'q': form.cleaned_data.get('q'),
'page': resources,
'total_results': paginator.count,
})
def collection_view(request, collection_slug):
collection = get_object_or_404(Collection,
slug=collection_slug, visibility=Collection.PUBLIC)
data = Resource.objects.filter(collectionresource__collection=collection,
status=Resource.APPROVED).order_by('collectionresource__order_by')
paginator = Paginator(data, settings.ORB_PAGINATOR_DEFAULT)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
resources = paginator.page(page)
except (EmptyPage, InvalidPage):
resources = paginator.page(paginator.num_pages)
return render(request, 'orb/collection/view.html', {
'collection': collection,
'page': resources,
'total_results': paginator.count,
})
# Helper functions
def resource_form_set_choices(form):
form.fields['health_topic'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__top_level=True).order_by('order_by', 'name')]
form.fields['resource_type'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='type').order_by('order_by', 'name')]
form.fields['audience'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='audience').order_by('order_by', 'name')]
form.fields['device'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='device').order_by('order_by', 'name')]
form.fields['license'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='license').order_by('order_by', 'name')]
return form
def advanced_search_form_set_choices(form):
for name, slug in settings.ADVANCED_SEARCH_CATEGORIES:
form.fields[name].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug=slug, resourcetag__resource__status=Resource.APPROVED).distinct().order_by('order_by', 'name')]
form.fields['license'].choices = [
('ND', _(u'Derivatives allowed')), ('NC', _(u'Commercial use allowed'))]
return form
def resource_can_edit(resource, user):
if user.is_staff or user == resource.create_user or user == resource.update_user:
return True
else:
return TagOwner.objects.filter(user__pk=user.id, tag__resourcetag__resource=resource).exists()
def resource_add_free_text_tags(resource, tag_text, user, category_slug):
"""
Adds tags to a resource based on free text and category slugs
Args:
resource: a Resource object
tag_text: string of text including multiple comma separated tags
user: the User object to use for the tags
category_slug: the slug of the related Category
Returns:
None
"""
free_text_tags = [x.strip() for x in tag_text.split(',') if x.strip()]
category = Category.objects.get(slug=category_slug)
for tag_name in free_text_tags:
try:
tag = Tag.tags.rewrite(False).get(name=tag_name)
except Tag.DoesNotExist:
try:
tag = Tag.tags.get(name=tag_name)
except Tag.DoesNotExist:
tag = Tag.tags.create(
name=tag_name,
category=category,
create_user=user,
update_user=user,
)
ResourceTag.objects.get_or_create(
tag=tag, resource=resource, defaults={'create_user': user})
def resource_add_tags(request, form, resource):
"""
Adds structured tags to the resource
Args:
request: the HttpRequest
form: Resource add/edit form that has the tag data
resource: the resource to add the tags
Returns:
None
"""
tag_categories = ["health_topic", "resource_type", "audience", "device"]
for tc in tag_categories:
tag_category = form.cleaned_data.get(tc)
for ht in tag_category:
tag = Tag.objects.get(pk=ht)
ResourceTag.objects.get_or_create(
tag=tag, resource=resource, defaults={'create_user': request.user})
# add license
license = form.cleaned_data.get("license")
tag = Tag.objects.get(pk=license)
ResourceTag(tag=tag, resource=resource, create_user=request.user).save()
| gpl-3.0 | 4,012,170,673,093,506,600 | 35.285369 | 122 | 0.6232 | false |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/v8/tools/v8heapconst.py | 1 | 10545 | # Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is automatically generated from the V8 source and should not
# be modified manually, run 'make grokdump' instead to update this file.
# List of known V8 instance types.
INSTANCE_TYPES = {
64: "STRING_TYPE",
68: "ASCII_STRING_TYPE",
65: "CONS_STRING_TYPE",
69: "CONS_ASCII_STRING_TYPE",
67: "SLICED_STRING_TYPE",
71: "SLICED_ASCII_STRING_TYPE",
66: "EXTERNAL_STRING_TYPE",
70: "EXTERNAL_ASCII_STRING_TYPE",
74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
82: "SHORT_EXTERNAL_STRING_TYPE",
86: "SHORT_EXTERNAL_ASCII_STRING_TYPE",
90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
0: "INTERNALIZED_STRING_TYPE",
4: "ASCII_INTERNALIZED_STRING_TYPE",
1: "CONS_INTERNALIZED_STRING_TYPE",
5: "CONS_ASCII_INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
6: "EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
22: "SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
129: "MAP_TYPE",
130: "CODE_TYPE",
131: "ODDBALL_TYPE",
132: "CELL_TYPE",
133: "PROPERTY_CELL_TYPE",
134: "HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "FREE_SPACE_TYPE",
138: "EXTERNAL_BYTE_ARRAY_TYPE",
139: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
140: "EXTERNAL_SHORT_ARRAY_TYPE",
141: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
142: "EXTERNAL_INT_ARRAY_TYPE",
143: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
144: "EXTERNAL_FLOAT_ARRAY_TYPE",
145: "EXTERNAL_DOUBLE_ARRAY_TYPE",
146: "EXTERNAL_PIXEL_ARRAY_TYPE",
148: "FILLER_TYPE",
149: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
150: "DECLARED_ACCESSOR_INFO_TYPE",
151: "EXECUTABLE_ACCESSOR_INFO_TYPE",
152: "ACCESSOR_PAIR_TYPE",
153: "ACCESS_CHECK_INFO_TYPE",
154: "INTERCEPTOR_INFO_TYPE",
155: "CALL_HANDLER_INFO_TYPE",
156: "FUNCTION_TEMPLATE_INFO_TYPE",
157: "OBJECT_TEMPLATE_INFO_TYPE",
158: "SIGNATURE_INFO_TYPE",
159: "TYPE_SWITCH_INFO_TYPE",
161: "ALLOCATION_MEMENTO_TYPE",
160: "ALLOCATION_SITE_TYPE",
162: "SCRIPT_TYPE",
163: "CODE_CACHE_TYPE",
164: "POLYMORPHIC_CODE_CACHE_TYPE",
165: "TYPE_FEEDBACK_INFO_TYPE",
166: "ALIASED_ARGUMENTS_ENTRY_TYPE",
167: "BOX_TYPE",
170: "FIXED_ARRAY_TYPE",
147: "FIXED_DOUBLE_ARRAY_TYPE",
171: "SHARED_FUNCTION_INFO_TYPE",
172: "JS_MESSAGE_OBJECT_TYPE",
175: "JS_VALUE_TYPE",
176: "JS_DATE_TYPE",
177: "JS_OBJECT_TYPE",
178: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
179: "JS_GENERATOR_OBJECT_TYPE",
180: "JS_MODULE_TYPE",
181: "JS_GLOBAL_OBJECT_TYPE",
182: "JS_BUILTINS_OBJECT_TYPE",
183: "JS_GLOBAL_PROXY_TYPE",
184: "JS_ARRAY_TYPE",
185: "JS_ARRAY_BUFFER_TYPE",
186: "JS_TYPED_ARRAY_TYPE",
187: "JS_DATA_VIEW_TYPE",
174: "JS_PROXY_TYPE",
188: "JS_SET_TYPE",
189: "JS_MAP_TYPE",
190: "JS_WEAK_MAP_TYPE",
191: "JS_WEAK_SET_TYPE",
192: "JS_REGEXP_TYPE",
193: "JS_FUNCTION_TYPE",
173: "JS_FUNCTION_PROXY_TYPE",
168: "DEBUG_INFO_TYPE",
169: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
0x080f9: (4, "AsciiInternalizedStringMap"),
0x08121: (170, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
0x08171: (137, "FreeSpaceMap"),
0x08199: (148, "OnePointerFillerMap"),
0x081c1: (148, "TwoPointerFillerMap"),
0x081e9: (132, "CellMap"),
0x08211: (133, "GlobalPropertyCellMap"),
0x08239: (171, "SharedFunctionInfoMap"),
0x08261: (170, "NativeContextMap"),
0x08289: (130, "CodeMap"),
0x082b1: (170, "ScopeInfoMap"),
0x082d9: (170, "FixedCOWArrayMap"),
0x08301: (147, "FixedDoubleArrayMap"),
0x08329: (170, "HashTableMap"),
0x08351: (128, "SymbolMap"),
0x08379: (64, "StringMap"),
0x083a1: (68, "AsciiStringMap"),
0x083c9: (65, "ConsStringMap"),
0x083f1: (69, "ConsAsciiStringMap"),
0x08419: (67, "SlicedStringMap"),
0x08441: (71, "SlicedAsciiStringMap"),
0x08469: (66, "ExternalStringMap"),
0x08491: (74, "ExternalStringWithOneByteDataMap"),
0x084b9: (70, "ExternalAsciiStringMap"),
0x084e1: (82, "ShortExternalStringMap"),
0x08509: (90, "ShortExternalStringWithOneByteDataMap"),
0x08531: (0, "InternalizedStringMap"),
0x08559: (1, "ConsInternalizedStringMap"),
0x08581: (5, "ConsAsciiInternalizedStringMap"),
0x085a9: (2, "ExternalInternalizedStringMap"),
0x085d1: (10, "ExternalInternalizedStringWithOneByteDataMap"),
0x085f9: (6, "ExternalAsciiInternalizedStringMap"),
0x08621: (18, "ShortExternalInternalizedStringMap"),
0x08649: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
0x08671: (22, "ShortExternalAsciiInternalizedStringMap"),
0x08699: (86, "ShortExternalAsciiStringMap"),
0x086c1: (64, "UndetectableStringMap"),
0x086e9: (68, "UndetectableAsciiStringMap"),
0x08711: (138, "ExternalByteArrayMap"),
0x08739: (139, "ExternalUnsignedByteArrayMap"),
0x08761: (140, "ExternalShortArrayMap"),
0x08789: (141, "ExternalUnsignedShortArrayMap"),
0x087b1: (142, "ExternalIntArrayMap"),
0x087d9: (143, "ExternalUnsignedIntArrayMap"),
0x08801: (144, "ExternalFloatArrayMap"),
0x08829: (145, "ExternalDoubleArrayMap"),
0x08851: (146, "ExternalPixelArrayMap"),
0x08879: (170, "NonStrictArgumentsElementsMap"),
0x088a1: (170, "FunctionContextMap"),
0x088c9: (170, "CatchContextMap"),
0x088f1: (170, "WithContextMap"),
0x08919: (170, "BlockContextMap"),
0x08941: (170, "ModuleContextMap"),
0x08969: (170, "GlobalContextMap"),
0x08991: (172, "JSMessageObjectMap"),
0x089b9: (135, "ForeignMap"),
0x089e1: (177, "NeanderMap"),
0x08a09: (161, "AllocationMementoMap"),
0x08a31: (160, "AllocationSiteMap"),
0x08a59: (164, "PolymorphicCodeCacheMap"),
0x08a81: (162, "ScriptMap"),
0x08ad1: (177, "ExternalMap"),
0x08af9: (167, "BoxMap"),
0x08b21: (149, "DeclaredAccessorDescriptorMap"),
0x08b49: (150, "DeclaredAccessorInfoMap"),
0x08b71: (151, "ExecutableAccessorInfoMap"),
0x08b99: (152, "AccessorPairMap"),
0x08bc1: (153, "AccessCheckInfoMap"),
0x08be9: (154, "InterceptorInfoMap"),
0x08c11: (155, "CallHandlerInfoMap"),
0x08c39: (156, "FunctionTemplateInfoMap"),
0x08c61: (157, "ObjectTemplateInfoMap"),
0x08c89: (158, "SignatureInfoMap"),
0x08cb1: (159, "TypeSwitchInfoMap"),
0x08cd9: (163, "CodeCacheMap"),
0x08d01: (165, "TypeFeedbackInfoMap"),
0x08d29: (166, "AliasedArgumentsEntryMap"),
0x08d51: (168, "DebugInfoMap"),
0x08d79: (169, "BreakPointInfoMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("OLD_POINTER_SPACE", 0x08081): "NullValue",
("OLD_POINTER_SPACE", 0x08091): "UndefinedValue",
("OLD_POINTER_SPACE", 0x080a1): "TheHoleValue",
("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
("OLD_POINTER_SPACE", 0x080d1): "UninitializedValue",
("OLD_POINTER_SPACE", 0x080e1): "NoInterceptorResultSentinel",
("OLD_POINTER_SPACE", 0x080f1): "ArgumentsMarker",
("OLD_POINTER_SPACE", 0x08101): "NumberStringCache",
("OLD_POINTER_SPACE", 0x08909): "SingleCharacterStringCache",
("OLD_POINTER_SPACE", 0x08d11): "StringSplitCache",
("OLD_POINTER_SPACE", 0x09119): "RegExpMultipleCache",
("OLD_POINTER_SPACE", 0x09521): "TerminationException",
("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
("OLD_POINTER_SPACE", 0x0a9d9): "NonMonomorphicCache",
("OLD_POINTER_SPACE", 0x0afed): "PolymorphicCodeCache",
("OLD_POINTER_SPACE", 0x0aff5): "NativesSourceCache",
("OLD_POINTER_SPACE", 0x0b035): "EmptyScript",
("OLD_POINTER_SPACE", 0x0b06d): "IntrinsicFunctionNames",
("OLD_POINTER_SPACE", 0x0e089): "ObservationState",
("OLD_POINTER_SPACE", 0x0e095): "FrozenSymbol",
("OLD_POINTER_SPACE", 0x0e0a1): "ElementsTransitionSymbol",
("OLD_POINTER_SPACE", 0x0e0ad): "EmptySlowElementDictionary",
("OLD_POINTER_SPACE", 0x0e249): "ObservedSymbol",
("OLD_POINTER_SPACE", 0x274e9): "StringTable",
("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
("OLD_DATA_SPACE", 0x080a9): "NanValue",
("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
("OLD_DATA_SPACE", 0x08269): "EmptyExternalByteArray",
("OLD_DATA_SPACE", 0x08275): "EmptyExternalUnsignedByteArray",
("OLD_DATA_SPACE", 0x08281): "EmptyExternalShortArray",
("OLD_DATA_SPACE", 0x0828d): "EmptyExternalUnsignedShortArray",
("OLD_DATA_SPACE", 0x08299): "EmptyExternalIntArray",
("OLD_DATA_SPACE", 0x082a5): "EmptyExternalUnsignedIntArray",
("OLD_DATA_SPACE", 0x082b1): "EmptyExternalFloatArray",
("OLD_DATA_SPACE", 0x082bd): "EmptyExternalDoubleArray",
("OLD_DATA_SPACE", 0x082c9): "EmptyExternalPixelArray",
("OLD_DATA_SPACE", 0x082d5): "InfinityValue",
("OLD_DATA_SPACE", 0x082e1): "MinusZeroValue",
("CODE_SPACE", 0x10d01): "JsConstructEntryCode",
("CODE_SPACE", 0x183c1): "JsEntryCode",
}
| apache-2.0 | -4,956,309,752,017,681,000 | 40.352941 | 72 | 0.708108 | false |
eliran-stratoscale/rackattack-api | py/rackattack/tests/test_allocation.py | 2 | 3633 | import mock
import unittest
from rackattack.tcp import publish
from rackattack.tcp import suicide
from rackattack.tcp import allocation
from rackattack.tests import mock_pika
from rackattack.tests import fake_subscribe
from rackattack.tests import one_threaded_publish
class FakeIPCClient:
def __init__(self, hostsByAllocations):
self.hostsByAllocations = hostsByAllocations
self.isAllocationDone = {allocationID: False for allocationID in hostsByAllocations.keys()}
self.allocationDeath = {allocationID: None for allocationID in hostsByAllocations.keys()}
def call(self, method, *args, **kwargs):
method = getattr(self, method)
return method(*args, **kwargs)
def allocation__inauguratorsIDs(self, id):
return self.hostsByAllocations[id]
def allocation__done(self, id):
return self.isAllocationDone[id]
def allocation__dead(self, id):
return self.allocationDeath[id]
class Suicide(Exception):
pass
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.allocationID = 1
cls.hostsByAllocations = {cls.allocationID: dict(nodeAlpha="serverAlpha",
nodeBravo="serverBravo",
nodeCharlie="serverCharlie",
nodeDelta="serverDelta")}
cls.requirements = dict(nodeAlpha=None, nodeBravo=None, nodeCharlie=None, nodeDelta=None)
cls.heartbeat = mock.Mock()
suicide.killSelf = mock.Mock(side_effect=Suicide())
@classmethod
def tearDownClass(cls):
mock_pika.disableMockedPika(modules=[publish])
def setUp(self):
mock_pika.enableMockedPika(modules=[publish])
self.publish = one_threaded_publish.OneThreadedPublish(mock_pika.DEFAULT_AMQP_URL)
self.ipcClient = FakeIPCClient(self.hostsByAllocations)
self.subscribe = fake_subscribe.SubscribeMock(amqpURL=mock_pika.DEFAULT_AMQP_URL)
self._continueWithServer()
self.tested = allocation.Allocation(self.allocationID,
self.requirements,
self.ipcClient,
self.subscribe,
self.heartbeat)
def test_RegisterForAllocation(self):
self.assertIn(self.allocationID, self.subscribe.allocationsCallbacks)
def test_ReceiveAllocationDoneMessage(self):
self.publish.allocationDone(self.allocationID)
self._continueWithServer()
self.tested.wait(timeout=0)
self.ipcClient.isAllocationDone[self.allocationID] = True
self.assertTrue(self.tested.done())
def test_AllocationProviderMessage(self):
self.publish.allocationProviderMessage(self.allocationID, "'sup")
self._continueWithServer()
def test_ReceiveAllocationDeathMessage(self):
self.publish.allocationDied(self.allocationID, reason="freed", message="hi")
self._continueWithServer()
self.ipcClient.allocationDeath[self.allocationID] = "freed"
self.assertRaises(Exception, self.tested.wait, timeout=0)
def test_ReceiveAllocationWithdrawlMessage(self):
self.publish.allocationDied(self.allocationID, reason="withdrawn", message="hi")
self.assertRaises(Suicide, self._continueWithServer)
def _continueWithServer(self):
self.publish.continueWithServer()
self.subscribe.continue_with_thread()
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 4,197,909,182,829,005,000 | 38.48913 | 99 | 0.652078 | false |
brev/nupic | src/nupic/datafiles/extra/secondOrder/makeDataset.py | 34 | 18958 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file_record_stream import FileRecordStream
def _generateModel0(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model0'. For this model, we generate the following
set of sequences:
1-2-3 (4X)
1-2-4 (1X)
5-2-3 (1X)
5-2-4 (4X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# ===============================================================
# Let's model the following:
# a-b-c (4X)
# a-b-d (1X)
# e-b-c (1X)
# e-b-d (4X)
# --------------------------------------------------------------------
# Initial probabilities, 'a' and 'e' equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[4] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 'a' and 'e' should lead to 'b'
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 4:
probs.fill(0)
probs[1] = 1.0 # lead only to b
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# a-b should lead to c 80% and d 20%
# e-b should lead to c 20% and d 80%
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,1]):
probs.fill(0)
probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time
probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time
elif key == str([4,1]):
probs.fill(0)
probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time
probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel1(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 0 and 1 equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[1] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 0 and 1 should lead to 10,11,12,13,14 with equal probability
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 1:
indices = numpy.array([10,11,12,13,14])
probs.fill(0)
probs[indices] = 1.0 # lead only to b
probs /= probs.sum()
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# 0-10 should lead to 15
# 0-11 to 16
# ...
# 1-10 should lead to 20
# 1-11 shold lean to 21
# ...
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,10]):
probs.fill(0)
probs[15] = 1
elif key == str([0,11]):
probs.fill(0)
probs[16] = 1
elif key == str([0,12]):
probs.fill(0)
probs[17] = 1
elif key == str([0,13]):
probs.fill(0)
probs[18] = 1
elif key == str([0,14]):
probs.fill(0)
probs[19] = 1
elif key == str([1,10]):
probs.fill(0)
probs[20] = 1
elif key == str([1,11]):
probs.fill(0)
probs[21] = 1
elif key == str([1,12]):
probs.fill(0)
probs[22] = 1
elif key == str([1,13]):
probs.fill(0)
probs[23] = 1
elif key == str([1,14]):
probs.fill(0)
probs[24] = 1
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3)
def _generateModel2(numCategories, alpha=0.25):
""" Generate the initial, first order, and second order transition
probabilities for 'model2'. For this model, we generate peaked random
transitions using dirichlet distributions.
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
alpha: Determines the peakedness of the transitions. Low alpha
values (alpha=0.01) place the entire weight on a single
transition. Large alpha values (alpha=10) distribute the
evenly among all transitions. Intermediate values (alpha=0.5)
give a moderately peaked transitions.
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
Here is an example of some return values for an intermediate alpha value:
initProb: [0.33, 0.33, 0.33]
firstOrder: {'[0]': [0.2, 0.7, 0.1],
'[1]': [0.1, 0.1, 0.8],
'[2]': [0.1, 0.0, 0.9]}
secondOrder: {'[0,0]': [0.1, 0.0, 0.9],
'[0,1]': [0.0, 0.2, 0.8],
'[0,2]': [0.1, 0.8, 0.1],
...
'[2,2]': [0.8, 0.2, 0.0]}
"""
# --------------------------------------------------------------------
# All initial probabilities, are equally likely
initProb = numpy.ones(numCategories)/numCategories
def generatePeakedProbabilities(lastIdx,
numCategories=numCategories,
alpha=alpha):
probs = numpy.random.dirichlet(alpha=[alpha]*numCategories)
probs[lastIdx] = 0.0
probs /= probs.sum()
return probs
# --------------------------------------------------------------------
# 1st order transitions
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = generatePeakedProbabilities(catIdx)
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = generatePeakedProbabilities(secondIdx)
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, None)
def _generateFile(filename, numRecords, categoryList, initProb,
firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None):
""" Generate a set of records reflecting a set of probabilities.
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
numRecords: number of records to generate
categoryList: list of category names
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrderProb: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrderProb: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table. None means infinite
length.
numNoise: Number of noise elements to place between each
sequence. The noise elements are evenly distributed from
all categories.
resetsEvery: If not None, generate a reset every N records
Here is an example of some parameters:
categoryList: ['cat1', 'cat2', 'cat3']
initProb: [0.7, 0.2, 0.1]
firstOrderProb: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# Create the file
print "Creating %s..." % (filename)
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
outFile = FileRecordStream(filename, write=True, fields=fields)
# --------------------------------------------------------------------
# Convert the probabilitie tables into cumulative probabilities
initCumProb = initProb.cumsum()
firstOrderCumProb = dict()
for (key,value) in firstOrderProb.iteritems():
firstOrderCumProb[key] = value.cumsum()
secondOrderCumProb = dict()
for (key,value) in secondOrderProb.iteritems():
secondOrderCumProb[key] = value.cumsum()
# --------------------------------------------------------------------
# Write out the sequences
elementsInSeq = []
numElementsSinceReset = 0
maxCatIdx = len(categoryList) - 1
for i in xrange(numRecords):
# Generate a reset?
if numElementsSinceReset == 0:
reset = 1
else:
reset = 0
# Pick the next element, based on how are we are into the 2nd order
# sequence.
rand = numpy.random.rand()
if len(elementsInSeq) == 0:
catIdx = numpy.searchsorted(initCumProb, rand)
elif len(elementsInSeq) == 1:
catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)
elif (len(elementsInSeq) >=2) and \
(seqLen is None or len(elementsInSeq) < seqLen-numNoise):
catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand)
else: # random "noise"
catIdx = numpy.random.randint(len(categoryList))
# Write out the record
catIdx = min(maxCatIdx, catIdx)
outFile.appendRecord([reset,categoryList[catIdx]])
#print categoryList[catIdx]
# ------------------------------------------------------------
# Increment counters
elementsInSeq.append(catIdx)
numElementsSinceReset += 1
# Generate another reset?
if resetsEvery is not None and numElementsSinceReset == resetsEvery:
numElementsSinceReset = 0
elementsInSeq = []
# Start another 2nd order sequence?
if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):
elementsInSeq = []
outFile.close()
def generate(model, filenameTrain, filenameTest, filenameCategory,
numCategories=178, numTrainingRecords=1000,
numTestingRecords=100, numNoise=5, resetsEvery=None):
numpy.random.seed(41)
# =====================================================================
# Create our categories and category file.
print "Creating %s..." % (filenameCategory)
categoryList = ['cat%d' % i for i in range(1, numCategories+1)]
categoryFile = open(filenameCategory, 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
# ====================================================================
# Generate the model
if model == 'model0':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel0(numCategories)
elif model == 'model1':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel1(numCategories)
elif model == 'model2':
(initProb, firstOrderProb, secondOrderProb, seqLen) = \
_generateModel2(numCategories)
else:
raise RuntimeError("Unsupported model")
# ====================================================================
# Generate the training and testing files
_generateFile(filename=filenameTrain, numRecords=numTrainingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
_generateFile(filename=filenameTest, numRecords=numTestingRecords,
categoryList=categoryList, initProb=initProb,
firstOrderProb=firstOrderProb, secondOrderProb=secondOrderProb,
seqLen=seqLen, numNoise=numNoise, resetsEvery=resetsEvery)
| agpl-3.0 | 3,369,946,948,047,082,500 | 36.540594 | 84 | 0.514453 | false |
coinbox/coinbox-mod-stock | cbmod/stock/controllers/form.py | 1 | 2414 | import cbpos
from cbmod.stock.models import Product, Category
from cbmod.base.controllers import FormController
class CategoriesFormController(FormController):
cls = Category
def fields(self):
return {"name": (cbpos.tr.stock_("Name"), ""),
"parent": (cbpos.tr.stock_("Parent Category"), None),
"image": (cbpos.tr.stock_("Image"), None),
}
def items(self):
session = cbpos.database.session()
return session.query(Category)
def canDeleteItem(self, item):
session = cbpos.database.session()
# Check if it has a sub-category
category_count = session.query(Category).filter(Category.parent == item).count()
if category_count != 0:
return False
# Check if it has products
product_count = session.query(Product).filter(Product.category == item).count()
if product_count != 0:
return False
# If not we can delete it
return True
def canEditItem(self, item):
return True
def canAddItem(self):
return True
def getDataFromItem(self, field, item):
return getattr(item, field)
class ProductsFormController(FormController):
cls = Product
def fields(self):
import cbmod.currency.controllers as currency
return {"name": (cbpos.tr.stock_("Name"), ""),
"description": (cbpos.tr.stock_("Description"), ""),
"reference": (cbpos.tr.stock_("Reference"), ""),
"code": (cbpos.tr.stock_("Code"), ""),
"price": (cbpos.tr.stock_("Price"), 0),
"currency": (cbpos.tr.stock_("Currency"), currency.default),
"in_stock": (cbpos.tr.stock_("In Stock"), True),
"quantity": (cbpos.tr.stock_("Quantity"), 0),
"category": (cbpos.tr.stock_("Category"), None),
"image": (cbpos.tr.stock_("Image"), None),
}
def items(self):
session = cbpos.database.session()
return session.query(Product)
def canDeleteItem(self, item):
return True
def canEditItem(self, item):
return True
def canAddItem(self):
return True
def getDataFromItem(self, field, item):
return getattr(item, field)
| mit | 1,218,395,910,546,391,600 | 29.948718 | 88 | 0.55261 | false |
jsma/django-cms | cms/tests/test_rendering.py | 2 | 25139 | # -*- coding: utf-8 -*-
from django.core.cache import cache
from django.test.utils import override_settings
from sekizai.context import SekizaiContext
from cms import plugin_rendering
from cms.api import create_page, add_plugin
from cms.cache.placeholder import get_placeholder_cache
from cms.models import Page, Placeholder, CMSPlugin
from cms.plugin_rendering import PluginContext
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.toolbar import CMSToolbar
from cms.views import details
TEMPLATE_NAME = 'tests/rendering/base.html'
def sample_plugin_processor(instance, placeholder, rendered_content, original_context):
original_context_var = original_context['original_context_var']
return '%s|test_plugin_processor_ok|%s|%s|%s' % (
rendered_content,
instance.body,
placeholder.slot,
original_context_var
)
def sample_plugin_context_processor(instance, placeholder, original_context):
content = 'test_plugin_context_processor_ok|' + instance.body + '|' + \
placeholder.slot + '|' + original_context['original_context_var']
return {
'test_plugin_context_processor': content,
}
@override_settings(
CMS_TEMPLATES=[(TEMPLATE_NAME, TEMPLATE_NAME), ('extra_context.html', 'extra_context.html')],
)
class RenderingTestCase(CMSTestCase):
def setUp(self):
super(RenderingTestCase, self).setUp()
self.test_user = self._create_user("test", True, True)
with self.login_user_context(self.test_user):
self.test_data = {
'title': u'RenderingTestCase-title',
'slug': u'renderingtestcase-slug',
'reverse_id': u'renderingtestcase-reverse-id',
'text_main': u'RenderingTestCase-main',
'text_sub': u'RenderingTestCase-sub',
}
self.test_data2 = {
'title': u'RenderingTestCase-title2',
'slug': u'RenderingTestCase-slug2',
'reverse_id': u'renderingtestcase-reverse-id2',
}
self.test_data3 = {
'title': u'RenderingTestCase-title3',
'slug': u'RenderingTestCase-slug3',
'reverse_id': u'renderingtestcase-reverse-id3',
'text_sub': u'RenderingTestCase-sub3',
}
self.test_data4 = {
'title': u'RenderingTestCase-title3',
'no_extra': u'no extra var!',
'placeholderconf': {'extra_context': {'extra_context': {'extra_var': 'found extra var'}}},
'extra': u'found extra var',
}
self.test_data5 = {
'title': u'RenderingTestCase-title5',
'slug': u'RenderingTestCase-slug5',
'reverse_id': u'renderingtestcase-reverse-id5',
'text_main': u'RenderingTestCase-main-page5',
'text_sub': u'RenderingTestCase-sub5',
}
self.test_data6 = {
'title': u'RenderingTestCase-title6',
'slug': u'RenderingTestCase-slug6',
'reverse_id': u'renderingtestcase-reverse-id6',
'text_sub': u'RenderingTestCase-sub6',
}
self.insert_test_content()
def insert_test_content(self):
# Insert a page
p = create_page(self.test_data['title'], TEMPLATE_NAME, 'en',
slug=self.test_data['slug'], created_by=self.test_user,
reverse_id=self.test_data['reverse_id'], published=True)
# Placeholders have been inserted on post_save signal:
self.test_placeholders = {}
for placeholder in p.placeholders.all():
self.test_placeholders[placeholder.slot] = placeholder
# Insert some test Text plugins
add_plugin(self.test_placeholders['main'], 'TextPlugin', 'en',
body=self.test_data['text_main'])
add_plugin(self.test_placeholders['sub'], 'TextPlugin', 'en',
body=self.test_data['text_sub'])
p.publish('en')
# Insert another page that is not the home page
p2 = create_page(self.test_data2['title'], TEMPLATE_NAME, 'en',
parent=p, slug=self.test_data2['slug'], published=True,
reverse_id=self.test_data2['reverse_id'])
p2.publish('en')
# Insert another page that is not the home page
p3 = create_page(self.test_data3['title'], TEMPLATE_NAME, 'en',
slug=self.test_data3['slug'], parent=p2,
reverse_id=self.test_data3['reverse_id'], published=True)
# Placeholders have been inserted on post_save signal:
self.test_placeholders3 = {}
for placeholder in p3.placeholders.all():
self.test_placeholders3[placeholder.slot] = placeholder
# # Insert some test Text plugins
add_plugin(self.test_placeholders3['sub'], 'TextPlugin', 'en',
body=self.test_data3['text_sub'])
p3.publish('en')
# Insert another page that is not the home
p4 = create_page(self.test_data4['title'], 'extra_context.html', 'en', parent=p)
# Placeholders have been inserted on post_save signal:
self.test_placeholders4 = {}
for placeholder in p4.placeholders.all():
self.test_placeholders4[placeholder.slot] = placeholder
# Insert some test plugins
add_plugin(self.test_placeholders4['extra_context'], 'ExtraContextPlugin', 'en')
p4.publish('en')
# Insert another page that is not the home page
p5 = create_page(self.test_data5['title'], TEMPLATE_NAME, 'en',
parent=p, slug=self.test_data5['slug'], published=True,
reverse_id=self.test_data5['reverse_id'])
# Placeholders have been inserted on post_save signal:
self.test_placeholders5 = {}
for placeholder in p5.placeholders.all():
self.test_placeholders5[placeholder.slot] = placeholder
# # Insert some test Text plugins
add_plugin(self.test_placeholders5['sub'], 'TextPlugin', 'en',
body=self.test_data5['text_sub'])
add_plugin(self.test_placeholders5['main'], 'TextPlugin', 'en',
body=self.test_data5['text_main'])
p5.publish('en')
# Insert another page that is not the home page
p6 = create_page(self.test_data6['title'], TEMPLATE_NAME, 'en',
slug=self.test_data6['slug'], parent=p5,
reverse_id=self.test_data6['reverse_id'], published=True)
# Placeholders have been inserted on post_save signal:
self.test_placeholders6 = {}
for placeholder in p6.placeholders.all():
self.test_placeholders6[placeholder.slot] = placeholder
# # Insert some test Text plugins
add_plugin(self.test_placeholders6['sub'], 'TextPlugin', 'en',
body=self.test_data6['text_sub'])
p6.publish('en')
# Reload test pages
self.test_page = self.reload(p.publisher_public)
self.test_page2 = self.reload(p2.publisher_public)
self.test_page3 = self.reload(p3.publisher_public)
self.test_page4 = self.reload(p4.publisher_public)
self.test_page5 = self.reload(p5.publisher_public)
self.test_page6 = self.reload(p6.publisher_public)
def strip_rendered(self, content):
return content.strip().replace(u"\n", u"")
@override_settings(CMS_TEMPLATES=[(TEMPLATE_NAME, '')])
def render(self, template, page, context_vars={}):
request = self.get_request(page=page)
output = self.render_template_obj(template, context_vars, request)
return self.strip_rendered(output)
@override_settings(CMS_TEMPLATES=[(TEMPLATE_NAME, '')])
def test_details_view(self):
"""
Tests that the `detail` view is working.
"""
response = details(self.get_request(page=self.test_page), '')
response.render()
r = self.strip_rendered(response.content.decode('utf8'))
self.assertEqual(r, u'|' + self.test_data['text_main'] + u'|' + self.test_data['text_sub'] + u'|')
@override_settings(
CMS_PLUGIN_PROCESSORS=('cms.tests.test_rendering.sample_plugin_processor',),
CMS_PLUGIN_CONTEXT_PROCESSORS=('cms.tests.test_rendering.sample_plugin_context_processor',),
)
def test_processors(self):
"""
Tests that default plugin context processors are working, that plugin processors and plugin context processors
can be defined in settings and are working and that extra plugin context processors can be passed to PluginContext.
"""
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.plugin_pool import plugin_pool
instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]
load_from_string = self.load_template_from_string
@plugin_pool.register_plugin
class ProcessorTestPlugin(TextPlugin):
name = "Test Plugin"
def get_render_template(self, context, instance, placeholder):
t = u'{% load cms_tags %}' + \
u'{{ plugin.counter }}|{{ plugin.instance.body }}|{{ test_passed_plugin_context_processor }}|' \
u'{{ test_plugin_context_processor }}'
return load_from_string(t)
def test_passed_plugin_context_processor(instance, placeholder, context):
return {'test_passed_plugin_context_processor': 'test_passed_plugin_context_processor_ok'}
instance.plugin_type = 'ProcessorTestPlugin'
instance._inst = instance
context = PluginContext({'original_context_var': 'original_context_var_ok'}, instance,
self.test_placeholders['main'], processors=(test_passed_plugin_context_processor,))
plugin_rendering._standard_processors = {}
content_renderer = self.get_content_renderer()
c = content_renderer.render_plugins([instance], context, self.test_placeholders['main'])
r = "".join(c)
expected = (
self.test_data['text_main'] + '|test_passed_plugin_context_processor_ok|test_plugin_context_processor_ok|' +
self.test_data['text_main'] + '|main|original_context_var_ok|test_plugin_processor_ok|' +
self.test_data['text_main'] + '|main|original_context_var_ok'
)
expected = u'1|' + expected
self.assertEqual(r, expected)
plugin_rendering._standard_processors = {}
def test_placeholder(self):
"""
Tests the {% placeholder %} templatetag.
"""
t = u'{% load cms_tags %}' + \
u'|{% placeholder "main" %}|{% placeholder "empty" %}'
r = self.render(t, self.test_page)
self.assertEqual(r, u'|' + self.test_data['text_main'] + '|')
def test_placeholder_extra_context(self):
t = u'{% load cms_tags %}{% placeholder "extra_context" %}'
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['no_extra'])
cache.clear()
with self.settings(CMS_PLACEHOLDER_CONF=self.test_data4['placeholderconf']):
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['extra'])
def test_placeholder_or(self):
"""
Tests the {% placeholder %} templatetag.
"""
t = u'{% load cms_tags %}' + \
u'|{% placeholder "empty" or %}No content{% endplaceholder %}'
r = self.render(t, self.test_page)
self.assertEqual(r, u'|No content')
def test_render_placeholder_tag(self):
"""
Tests the {% render_placeholder %} templatetag.
"""
render_placeholder_body = "I'm the render placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_placeholder_body)
t = '''{% extends "base.html" %}
{% load cms_tags %}
{% block content %}
<h1>{% render_placeholder ex1.placeholder %}</h1>
<h2>{% render_placeholder ex1.placeholder as tempvar %}</h2>
<h3>{{ tempvar }}</h3>
{% endblock content %}
'''
r = self.render(t, self.test_page, {'ex1': ex1})
self.assertIn(
'<h1>%s</h1>' % render_placeholder_body,
r
)
self.assertIn(
'<h2></h2>',
r
)
self.assertIn(
'<h3>%s</h3>' % render_placeholder_body,
r
)
def test_render_uncached_placeholder_tag(self):
"""
Tests the {% render_uncached_placeholder %} templatetag.
"""
render_uncached_placeholder_body = "I'm the render uncached placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_uncached_placeholder_body)
t = '''{% extends "base.html" %}
{% load cms_tags %}
{% block content %}
<h1>{% render_uncached_placeholder ex1.placeholder %}</h1>
<h2>{% render_uncached_placeholder ex1.placeholder as tempvar %}</h2>
<h3>{{ tempvar }}</h3>
{% endblock content %}
'''
r = self.render(t, self.test_page, {'ex1': ex1})
self.assertIn(
'<h1>%s</h1>' % render_uncached_placeholder_body,
r
)
self.assertIn(
'<h2></h2>',
r
)
self.assertIn(
'<h3>%s</h3>' % render_uncached_placeholder_body,
r
)
def test_render_uncached_placeholder_tag_no_use_cache(self):
"""
Tests that {% render_uncached_placeholder %} does not populate cache.
"""
render_uncached_placeholder_body = "I'm the render uncached placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
request = self.get_request('/')
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_uncached_placeholder_body)
template = '{% load cms_tags %}<h1>{% render_uncached_placeholder ex1.placeholder %}</h1>'
cache_value_before = get_placeholder_cache(ex1.placeholder, 'en', 1, request)
self.render(template, self.test_page, {'ex1': ex1})
cache_value_after = get_placeholder_cache(ex1.placeholder, 'en', 1, request)
self.assertEqual(cache_value_before, cache_value_after)
self.assertIsNone(cache_value_after)
def test_render_placeholder_tag_use_cache(self):
"""
Tests that {% render_placeholder %} populates cache.
"""
render_placeholder_body = "I'm the render placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
request = self.get_request('/')
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_placeholder_body)
template = '{% load cms_tags %}<h1>{% render_placeholder ex1.placeholder %}</h1>'
cache_value_before = get_placeholder_cache(ex1.placeholder, 'en', 1, request)
self.render(template, self.test_page, {'ex1': ex1})
cache_value_after = get_placeholder_cache(ex1.placeholder, 'en', 1, request)
self.assertNotEqual(cache_value_before, cache_value_after)
self.assertIsNone(cache_value_before)
self.assertIsNotNone(cache_value_after)
def test_show_placeholder(self):
"""
Tests the {% show_placeholder %} templatetag, using lookup by pk/dict/reverse_id and passing a Page object.
"""
t = u'{% load cms_tags %}' + \
u'|{% show_placeholder "main" ' + str(self.test_page.pk) + ' %}' + \
u'|{% show_placeholder "main" test_dict %}' + \
u'|{% show_placeholder "sub" "' + str(self.test_page.reverse_id) + '" %}' + \
u'|{% show_placeholder "sub" test_page %}'
r = self.render(t, self.test_page, {'test_page': self.test_page, 'test_dict': {'pk': self.test_page.pk}})
self.assertEqual(r, (u'|' + self.test_data['text_main']) * 2 + (u'|' + self.test_data['text_sub']) * 2)
def test_show_placeholder_extra_context(self):
t = u'{% load cms_tags %}{% show_uncached_placeholder "extra_context" ' + str(self.test_page4.pk) + ' %}'
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['no_extra'])
cache.clear()
with self.settings(CMS_PLACEHOLDER_CONF=self.test_data4['placeholderconf']):
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['extra'])
def test_show_uncached_placeholder_by_pk(self):
"""
Tests the {% show_uncached_placeholder %} templatetag, using lookup by pk.
"""
template = u'{%% load cms_tags %%}{%% show_uncached_placeholder "main" %s %%}' % self.test_page.pk
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_data['text_main'])
def test_show_uncached_placeholder_by_lookup_dict(self):
template = u'{% load cms_tags %}{% show_uncached_placeholder "main" test_dict %}'
output = self.render(template, self.test_page, {'test_dict': {'pk': self.test_page.pk}})
self.assertEqual(output, self.test_data['text_main'])
def test_show_uncached_placeholder_by_reverse_id(self):
template = u'{%% load cms_tags %%}{%% show_uncached_placeholder "sub" "%s" %%}' % self.test_page.reverse_id
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_data['text_sub'])
def test_show_uncached_placeholder_by_page(self):
template = u'{% load cms_tags %}{% show_uncached_placeholder "sub" test_page %}'
output = self.render(template, self.test_page, {'test_page': self.test_page})
self.assertEqual(output, self.test_data['text_sub'])
def test_show_uncached_placeholder_tag_no_use_cache(self):
"""
Tests that {% show_uncached_placeholder %} does not populate cache.
"""
template = '{% load cms_tags %}<h1>{% show_uncached_placeholder "sub" test_page %}</h1>'
placeholder = self.test_page.placeholders.get(slot='sub')
request = self.get_request(page=self.test_page)
cache_value_before = get_placeholder_cache(placeholder, 'en', 1, request)
output = self.render(template, self.test_page, {'test_page': self.test_page})
cache_value_after = get_placeholder_cache(placeholder, 'en', 1, request)
self.assertEqual(output, '<h1>%s</h1>' % self.test_data['text_sub'])
self.assertEqual(cache_value_before, cache_value_after)
self.assertIsNone(cache_value_after)
def test_page_url_by_pk(self):
template = u'{%% load cms_tags %%}{%% page_url %s %%}' % self.test_page2.pk
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_dictionary(self):
template = u'{% load cms_tags %}{% page_url test_dict %}'
output = self.render(template, self.test_page, {'test_dict': {'pk': self.test_page2.pk}})
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_reverse_id(self):
template = u'{%% load cms_tags %%}{%% page_url "%s" %%}' % self.test_page2.reverse_id
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_reverse_id_not_on_a_page(self):
template = u'{%% load cms_tags %%}{%% page_url "%s" %%}' % self.test_page2.reverse_id
output = self.render(template, None)
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_page(self):
template = u'{% load cms_tags %}{% page_url test_page %}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_page_as(self):
template = u'{% load cms_tags %}{% page_url test_page as test_url %}{{ test_url }}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, self.test_page2.get_absolute_url())
#
# To ensure compatible behaviour, test that page_url swallows any
# Page.DoesNotExist exceptions when NOT in DEBUG mode.
#
@override_settings(DEBUG=False)
def test_page_url_on_bogus_page(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" %}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, '')
#
# To ensure compatible behaviour, test that page_url will raise a
# Page.DoesNotExist exception when the page argument does not eval to a
# valid page
#
@override_settings(DEBUG=True)
def test_page_url_on_bogus_page_in_debug(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" %}'
self.assertRaises(
Page.DoesNotExist,
self.render,
template,
self.test_page,
{'test_page': self.test_page2}
)
#
# In the 'as varname' form, ensure that the tag will always swallow
# Page.DoesNotExist exceptions both when DEBUG is False and...
#
@override_settings(DEBUG=False)
def test_page_url_as_on_bogus_page(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" as test_url %}{{ test_url }}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, '')
#
# ...when it is True.
#
@override_settings(DEBUG=True)
def test_page_url_as_on_bogus_page_in_debug(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" as test_url %}{{ test_url }}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, '')
def test_page_attribute(self):
"""
Tests the {% page_attribute %} templatetag, using current page, lookup by pk/dict/reverse_id and passing a Page object.
"""
t = u'{% load cms_tags %}' + \
u'|{% page_attribute title %}' + \
u'{% page_attribute title as title %}' + \
u'|{{ title }}' + \
u'|{% page_attribute title ' + str(self.test_page2.pk) + ' %}' + \
u'{% page_attribute title ' + str(self.test_page2.pk) + ' as title %}' + \
u'|{{ title }}' + \
u'|{% page_attribute title test_dict %}' + \
u'{% page_attribute title test_dict as title %}' + \
u'|{{ title }}' + \
u'|{% page_attribute slug "' + str(self.test_page2.reverse_id) + '" %}' + \
u'{% page_attribute slug "' + str(self.test_page2.reverse_id) + '" as slug %}' + \
u'|{{ slug }}' + \
u'|{% page_attribute slug test_page %}' + \
u'{% page_attribute slug test_page as slug %}' + \
u'|{{ slug }}'
r = self.render(t, self.test_page, {'test_page': self.test_page2, 'test_dict': {'pk': self.test_page2.pk}})
self.assertEqual(r, (u'|' + self.test_data['title']) * 2 + (u'|' + self.test_data2['title']) * 4 + (
u'|' + self.test_data2['slug']) * 4)
def test_inherit_placeholder(self):
t = u'{% load cms_tags %}' + \
u'|{% placeholder "main" inherit %}|{% placeholder "sub" %}'
# a page whose parent has no 'main' placeholder inherits from ancestors
r = self.render(t, self.test_page3)
self.assertEqual(r, u'|' + self.test_data['text_main'] + '|' + self.test_data3['text_sub'])
# a page whose parent has 'main' placeholder inherits from the parent, not ancestors
r = self.render(t, self.test_page6)
self.assertEqual(r, u'|' + self.test_data5['text_main'] + '|' + self.test_data6['text_sub'])
def test_render_placeholder_toolbar(self):
placeholder = Placeholder()
placeholder.slot = 'test'
placeholder.pk = placeholder.id = 99
request = self.get_request(page=None)
request.toolbar = CMSToolbar(request)
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context['request'] = request
context['cms_content_renderer'] = content_renderer
classes = [
"cms-placeholder-%s" % placeholder.pk,
'cms-placeholder',
]
output = content_renderer.render_editable_placeholder(placeholder, context, 'en')
for cls in classes:
self.assertTrue(cls in output, '%r is not in %r' % (cls, output))
| bsd-3-clause | -4,624,720,984,612,192,000 | 43.731317 | 127 | 0.594176 | false |
hocinebendou/bika.gsoc | bika/lims/content/storagelevel.py | 1 | 3787 | from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaFolderSchema
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IStorageLevel
from bika.lims.interfaces import IStorageLevelIsAssignable
from bika.lims.utils import t
from plone.app.folder.folder import ATFolder
from Products.Archetypes.public import *
from Products.CMFCore.permissions import View, ModifyPortalContent
from zope.interface import alsoProvides, noLongerProvides
from zope.interface import implements
from Acquisition import aq_chain
schema = BikaFolderSchema.copy() + BikaSchema.copy() + Schema((
ComputedField(
'ParentUID',
expression='context.aq_parent.UID()',
widget=ComputedWidget(
visible=False,
),
),
BooleanField(
'HasChildren',
default=False,
widget=BooleanWidget(visible=False),
),
IntegerField(
'NumberOfAvailableChildren',
default=0,
widget=IntegerWidget(visible=False)
),
ComputedField(
'StorageLevelID',
expression='context.getId()',
widget=ComputedWidget(visible=False),
),
BooleanField(
'IsOccupied',
default=0,
widget=BooleanWidget(visible=False),
),
StringField(
'StockItemID',
widget=StringWidget(visible=False),
),
ComputedField(
'Hierarchy',
expression='context.getHierarchy()',
widget=ComputedWidget(visible=False,),
),
))
schema['description'].schemata = 'default'
schema['description'].widget.visible = True
class StorageLevel(ATFolder):
implements(IStorageLevel)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def at_post_create_script(self):
# Jira LIMS-1961
if hasattr(self.aq_parent, 'getNumberOfAvailableChildren'):
number = self.aq_parent.getNumberOfAvailableChildren()
self.aq_parent.setNumberOfAvailableChildren(number + 1)
alsoProvides(self.aq_parent, IStorageLevelIsAssignable)
self.aq_parent.reindexObject(idxs=['object_provides'])
if hasattr(self.aq_parent, 'HasChildren') and not \
self.aq_parent.HasChildren:
self.aq_parent.setHasChildren(True)
grand_parent = self.aq_parent.aq_parent
if hasattr(self.aq_parent, 'aq_parent') and \
hasattr(grand_parent, 'getNumberOfAvailableChildren'):
number = grand_parent.getNumberOfAvailableChildren()
grand_parent.setNumberOfAvailableChildren(number - 1)
if number <= 1:
noLongerProvides(grand_parent, IStorageLevelIsAssignable)
grand_parent.reindexObject(idxs=['object_provides'])
def getHierarchy(self):
ancestors = []
ancestor = self
# TODO: Instead of looping indefinitely, use aq_chain
for obj in ancestor.aq_chain:
ancestors.append(obj.Title())
if obj.portal_type == 'StorageUnit' or \
obj.portal_type == 'Plone Site':
break
'''
while(1):
ancestors.append(ancestor.Title())
if ancestor.portal_type == 'StorageUnit' or ancestor.portal_type == 'Plone Site':
break
ancestor = ancestor.aq_parent
'''
return ' > '.join(reversed(ancestors))
registerType(StorageLevel, PROJECTNAME)
| mit | -6,424,093,236,299,220,000 | 32.219298 | 93 | 0.649327 | false |
sowe9385/qiime | scripts/principal_coordinates.py | 15 | 4906 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski", "Rob Knight", "Antonio Gonzalez Pena",
"Catherine Lozupone", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "[email protected]"
from os.path import exists, isdir, splitext, join
from os import makedirs, listdir
from qiime.util import (parse_command_line_parameters, get_options_lookup,
make_option)
from qiime.principal_coordinates import pcoa
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = "Principal Coordinates Analysis (PCoA)"
script_info['script_description'] = ("Principal Coordinate Analysis (PCoA) is "
"commonly used to compare groups of "
"samples based on phylogenetic or "
"count-based distance metrics (see "
"section on beta_diversity.py).")
script_info['script_usage'] = [
("PCoA (Single File)",
"For this script, the user supplies a distance matrix (i.e. resulting "
"file from beta_diversity.py), along with the output filename (e.g. "
"beta_div_coords.txt), as follows:",
"%prog -i beta_div.txt -o beta_div_coords.txt"),
("PCoA (Multiple Files):",
"The script also functions in batch mode if a folder is supplied as input"
" (e.g. from beta_diversity.py run in batch). No other files should be "
"present in the input folder - only the distance matrix files to be "
"analyzed. This script operates on every distance matrix file in the "
"input directory and creates a corresponding principal coordinates "
"results file in the output directory, e.g.:",
"%prog -i beta_div_weighted_unifrac/ -o beta_div_weighted_pcoa_results/")
]
script_info['output_description'] = ("The resulting output file consists of "
"the principal coordinate (PC) axes "
"(columns) for each sample (rows). "
"Pairs of PCs can then be graphed to view"
" the relationships between samples. The "
"bottom of the output file contains the "
"eigenvalues and % variation explained "
"for each PC. For more information of the"
" file format, check the "
"OrdinationResults class in the "
"scikit-bio package "
"(http://scikit-bio.org/)")
script_info['required_options'] = [
make_option('-i', '--input_path', type='existing_path',
help='path to the input distance matrix file(s) (i.e., the '
'output from beta_diversity.py). Is a directory for '
'batch processing and a filename for a single file '
'operation.'),
make_option('-o', '--output_path', type='new_path',
help='output path. directory for batch processing, filename '
'for single file operation'),
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
input_path = opts.input_path
output_path = opts.output_path
if isdir(input_path):
# Run PCoA on all distance matrices in the input dir
# Create the output directory if it does not exists
if not exists(output_path):
makedirs(output_path)
# Get all the filenames present in the input directory
file_names = [fname for fname in listdir(input_path)
if not (fname.startswith('.') or isdir(fname))]
# Loop through all the input files
for fname in file_names:
# Get the path to the input distance matrix
infile = join(input_path, fname)
# Run PCoA on the input distance matrix
with open(infile, 'U') as lines:
pcoa_scores = pcoa(lines)
# Store the PCoA results on the output directory
base_fname, ext = splitext(fname)
out_file = join(output_path, 'pcoa_%s.txt' % base_fname)
pcoa_scores.write(out_file)
else:
# Run PCoA on the input distance matrix
with open(input_path, 'U') as f:
pcoa_scores = pcoa(f)
# Store the results in the output file
pcoa_scores.write(output_path)
if __name__ == "__main__":
main()
| gpl-2.0 | 640,386,690,097,262,700 | 43.6 | 79 | 0.570934 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/artifact.py | 2 | 3290 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Artifact(Resource):
"""An artifact.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:ivar title: The artifact's title.
:vartype title: str
:ivar description: The artifact's description.
:vartype description: str
:ivar publisher: The artifact's publisher.
:vartype publisher: str
:ivar file_path: The file path to the artifact.
:vartype file_path: str
:ivar icon: The URI to the artifact icon.
:vartype icon: str
:ivar target_os_type: The artifact's target OS.
:vartype target_os_type: str
:ivar parameters: The artifact's parameters.
:vartype parameters: object
:ivar created_date: The artifact's creation date.
:vartype created_date: datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'title': {'readonly': True},
'description': {'readonly': True},
'publisher': {'readonly': True},
'file_path': {'readonly': True},
'icon': {'readonly': True},
'target_os_type': {'readonly': True},
'parameters': {'readonly': True},
'created_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'title': {'key': 'properties.title', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'file_path': {'key': 'properties.filePath', 'type': 'str'},
'icon': {'key': 'properties.icon', 'type': 'str'},
'target_os_type': {'key': 'properties.targetOsType', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': 'object'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
}
def __init__(self, location=None, tags=None):
super(Artifact, self).__init__(location=location, tags=tags)
self.title = None
self.description = None
self.publisher = None
self.file_path = None
self.icon = None
self.target_os_type = None
self.parameters = None
self.created_date = None
| mit | -7,418,917,253,011,895,000 | 36.386364 | 78 | 0.571429 | false |
sambiak/pandaMorp | audio.py | 1 | 1961 | # This Python file uses the following encoding: utf-8
"""
Morpion en 3 dimensions avec une IA utilisant l'algorithme Minimax implémenté
Copyright (C) 2015 Guillaume Augustoni, Leo Henriot, Raphael Chevalier et Enzo cabezas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
class AudioManager():
def __init__(self, loader):
self.loader = loader
self.Musiquewin = self.loader.loadMusic("GreekDance.mp3")
self.Musiquegame = self.loader.loadMusic("cannontube.ogg")
self.Musiquegame.setLoop(True)
print("éxécuté")
self.Musiquemenu = self.loader.loadMusic("Welcome.flac")
self.volumeSon = 1
self.Musiquemenu.setLoop(True)
self.Musiquemenu.play()
def Arretermusiques(self):
self.Musiquewin.stop()
self.Musiquegame.stop()
self.buttonson.stop()
self.rollsound.stop()
self.resetsound.stop()
def LancerMusiquevictoire(self):
if self.Musiquewin.status() != self.Musiquewin.PLAYING:
self.Musiquewin.play()
self.Musiquegame.stop()
def LancerMusiquemenu(self):
if self.Musiquemenu.status() != self.Musiquemenu.PLAYING:
self.Musiquemenu.play()
def LancerMusiquegame(self):
if self.Musiquegame.status() != self.Musiquegame.PLAYING:
self.Musiquegame.play()
def disableaudio(self):
self.disableAllAudio()
def Actualiserson(self,Volume):
self.Musiquegame.setVolume(Volume)
self.Musiquewin.setVolume(Volume)
self.Musiquemenu.setVolume(Volume)
| gpl-3.0 | -3,316,838,220,605,760,500 | 33.928571 | 87 | 0.765849 | false |
0x46616c6b/ansible | test/runner/lib/import_analysis.py | 23 | 5934 | """Analyze python import statements."""
from __future__ import absolute_import, print_function
import ast
import os
import uuid
from lib.util import (
display,
ApplicationError,
)
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of python file paths mapped to sets of module_utils names.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils_files = (os.path.splitext(filename) for filename in os.listdir('lib/ansible/module_utils'))
module_utils = sorted(name[0] for name in module_utils_files if name[0] != '__init__' and name[1] == '.py')
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None):
"""Recursively expand module_utils imports from module_utils files.
:type import_name: str
:type depth: int
:type seen: set[str] | None
:rtype set[str]
"""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
import_path = os.path.join('lib/ansible/module_utils', '%s.py' % import_name)
for name in sorted(imports_by_target_path.get(import_path, set())):
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not len(imports[module_util]):
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
with open(path, 'r') as module_fd:
code = module_fd.read()
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Setting the full path to the filename results in only the filename being given for str(ex).
# As a work-around, set the filename to a UUID and replace it in the final string output with the actual path.
ex.filename = str(uuid.uuid4())
error = str(ex).replace(ex.filename, path)
raise ApplicationError('AST parse error: %s' % error)
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
super(ModuleUtilFinder, self).__init__()
self.path = path
self.module_utils = module_utils
self.imports = set()
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
# import ansible.module_utils.MODULE[.MODULE]
self.add_import(alias.name.split('.')[2], node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
if node.module == 'ansible.module_utils':
for alias in node.names:
# from ansible.module_utils import MODULE[, MODULE]
self.add_import(alias.name, node.lineno)
elif node.module.startswith('ansible.module_utils.'):
# from ansible.module_utils.MODULE[.MODULE]
self.add_import(node.module.split('.')[2], node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
if name in self.imports:
return # duplicate imports are ignored
if name not in self.module_utils:
if self.path.startswith('test/'):
return # invalid imports in tests are ignored
raise Exception('%s:%d Invalid module_util import: %s' % (self.path, line_number, name))
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
| gpl-3.0 | 4,384,890,752,725,790,700 | 34.532934 | 130 | 0.614257 | false |
lzw120/django | build/lib/django/contrib/localflavor/si/si_postalcodes.py | 89 | 13570 | # *-* coding: utf-8 *-*
SI_POSTALCODES = [
(1000, u'Ljubljana'),
(1215, u'Medvode'),
(1216, u'Smlednik'),
(1217, u'Vodice'),
(1218, u'Komenda'),
(1219, u'Laze v Tuhinju'),
(1221, u'Motnik'),
(1222, u'Trojane'),
(1223, u'Blagovica'),
(1225, u'Lukovica'),
(1230, u'Dom\u017eale'),
(1233, u'Dob'),
(1234, u'Menge\u0161'),
(1235, u'Radomlje'),
(1236, u'Trzin'),
(1241, u'Kamnik'),
(1242, u'Stahovica'),
(1251, u'Morav\u010de'),
(1252, u'Va\u010de'),
(1262, u'Dol pri Ljubljani'),
(1270, u'Litija'),
(1272, u'Pol\u0161nik'),
(1273, u'Dole pri Litiji'),
(1274, u'Gabrovka'),
(1275, u'\u0160martno pri Litiji'),
(1276, u'Primskovo'),
(1281, u'Kresnice'),
(1282, u'Sava'),
(1290, u'Grosuplje'),
(1291, u'\u0160kofljica'),
(1292, u'Ig'),
(1293, u'\u0160marje - Sap'),
(1294, u'Vi\u0161nja Gora'),
(1295, u'Ivan\u010dna Gorica'),
(1296, u'\u0160entvid pri Sti\u010dni'),
(1301, u'Krka'),
(1303, u'Zagradec'),
(1310, u'Ribnica'),
(1311, u'Turjak'),
(1312, u'Videm - Dobrepolje'),
(1313, u'Struge'),
(1314, u'Rob'),
(1315, u'Velike La\u0161\u010de'),
(1316, u'Ortnek'),
(1317, u'Sodra\u017eica'),
(1318, u'Lo\u0161ki Potok'),
(1319, u'Draga'),
(1330, u'Ko\u010devje'),
(1331, u'Dolenja vas'),
(1332, u'Stara Cerkev'),
(1336, u'Kostel'),
(1337, u'Osilnica'),
(1338, u'Ko\u010devska Reka'),
(1351, u'Brezovica pri Ljubljani'),
(1352, u'Preserje'),
(1353, u'Borovnica'),
(1354, u'Horjul'),
(1355, u'Polhov Gradec'),
(1356, u'Dobrova'),
(1357, u'Notranje Gorice'),
(1358, u'Log pri Brezovici'),
(1360, u'Vrhnika'),
(1370, u'Logatec'),
(1372, u'Hotedr\u0161ica'),
(1373, u'Rovte'),
(1380, u'Cerknica'),
(1381, u'Rakek'),
(1382, u'Begunje pri Cerknici'),
(1384, u'Grahovo'),
(1385, u'Nova vas'),
(1386, u'Stari trg pri Lo\u017eu'),
(1410, u'Zagorje ob Savi'),
(1411, u'Izlake'),
(1412, u'Kisovec'),
(1413, u'\u010cem\u0161enik'),
(1414, u'Podkum'),
(1420, u'Trbovlje'),
(1423, u'Dobovec'),
(1430, u'Hrastnik'),
(1431, u'Dol pri Hrastniku'),
(1432, u'Zidani Most'),
(1433, u'Rade\u010de'),
(1434, u'Loka pri Zidanem Mostu'),
(2000, u'Maribor'),
(2201, u'Zgornja Kungota'),
(2204, u'Miklav\u017e na Dravskem polju'),
(2205, u'Star\u0161e'),
(2206, u'Marjeta na Dravskem polju'),
(2208, u'Pohorje'),
(2211, u'Pesnica pri Mariboru'),
(2212, u'\u0160entilj v Slovenskih goricah'),
(2213, u'Zgornja Velka'),
(2214, u'Sladki vrh'),
(2215, u'Cer\u0161ak'),
(2221, u'Jarenina'),
(2222, u'Jakobski Dol'),
(2223, u'Jurovski Dol'),
(2229, u'Male\u010dnik'),
(2230, u'Lenart v Slovenskih goricah'),
(2231, u'Pernica'),
(2232, u'Voli\u010dina'),
(2233, u'Sveta Ana v Slovenskih goricah'),
(2234, u'Benedikt'),
(2235, u'Sveta Trojica v Slovenskih goricah'),
(2236, u'Cerkvenjak'),
(2241, u'Spodnji Duplek'),
(2242, u'Zgornja Korena'),
(2250, u'Ptuj'),
(2252, u'Dornava'),
(2253, u'Destrnik'),
(2254, u'Trnovska vas'),
(2255, u'Vitomarci'),
(2256, u'Jur\u0161inci'),
(2257, u'Polen\u0161ak'),
(2258, u'Sveti Toma\u017e'),
(2259, u'Ivanjkovci'),
(2270, u'Ormo\u017e'),
(2272, u'Gori\u0161nica'),
(2273, u'Podgorci'),
(2274, u'Velika Nedelja'),
(2275, u'Miklav\u017e pri Ormo\u017eu'),
(2276, u'Kog'),
(2277, u'Sredi\u0161\u010de ob Dravi'),
(2281, u'Markovci'),
(2282, u'Cirkulane'),
(2283, u'Zavr\u010d'),
(2284, u'Videm pri Ptuju'),
(2285, u'Zgornji Leskovec'),
(2286, u'Podlehnik'),
(2287, u'\u017detale'),
(2288, u'Hajdina'),
(2289, u'Stoperce'),
(2310, u'Slovenska Bistrica'),
(2311, u'Ho\u010de'),
(2312, u'Orehova vas'),
(2313, u'Fram'),
(2314, u'Zgornja Polskava'),
(2315, u'\u0160martno na Pohorju'),
(2316, u'Zgornja Lo\u017enica'),
(2317, u'Oplotnica'),
(2318, u'Laporje'),
(2319, u'Polj\u010dane'),
(2321, u'Makole'),
(2322, u'Maj\u0161perk'),
(2323, u'Ptujska Gora'),
(2324, u'Lovrenc na Dravskem polju'),
(2325, u'Kidri\u010devo'),
(2326, u'Cirkovce'),
(2327, u'Ra\u010de'),
(2331, u'Pragersko'),
(2341, u'Limbu\u0161'),
(2342, u'Ru\u0161e'),
(2343, u'Fala'),
(2344, u'Lovrenc na Pohorju'),
(2345, u'Bistrica ob Dravi'),
(2351, u'Kamnica'),
(2352, u'Selnica ob Dravi'),
(2353, u'Sv. Duh na Ostrem Vrhu'),
(2354, u'Bresternica'),
(2360, u'Radlje ob Dravi'),
(2361, u'O\u017ebalt'),
(2362, u'Kapla'),
(2363, u'Podvelka'),
(2364, u'Ribnica na Pohorju'),
(2365, u'Vuhred'),
(2366, u'Muta'),
(2367, u'Vuzenica'),
(2370, u'Dravograd'),
(2371, u'Trbonje'),
(2372, u'Libeli\u010de'),
(2373, u'\u0160entjan\u017e pri Dravogradu'),
(2380, u'Slovenj Gradec'),
(2381, u'Podgorje pri Slovenj Gradcu'),
(2382, u'Mislinja'),
(2383, u'\u0160martno pri Slovenj Gradcu'),
(2390, u'Ravne na Koro\u0161kem'),
(2391, u'Prevalje'),
(2392, u'Me\u017eica'),
(2393, u'\u010crna na Koro\u0161kem'),
(2394, u'Kotlje'),
(3000, u'Celje'),
(3201, u'\u0160martno v Ro\u017eni dolini'),
(3202, u'Ljube\u010dna'),
(3203, u'Nova Cerkev'),
(3204, u'Dobrna'),
(3205, u'Vitanje'),
(3206, u'Stranice'),
(3210, u'Slovenske Konjice'),
(3211, u'\u0160kofja vas'),
(3212, u'Vojnik'),
(3213, u'Frankolovo'),
(3214, u'Zre\u010de'),
(3215, u'Lo\u010de'),
(3220, u'\u0160tore'),
(3221, u'Teharje'),
(3222, u'Dramlje'),
(3223, u'Loka pri \u017dusmu'),
(3224, u'Dobje pri Planini'),
(3225, u'Planina pri Sevnici'),
(3230, u'\u0160entjur'),
(3231, u'Grobelno'),
(3232, u'Ponikva'),
(3233, u'Kalobje'),
(3240, u'\u0160marje pri Jel\u0161ah'),
(3241, u'Podplat'),
(3250, u'Roga\u0161ka Slatina'),
(3252, u'Rogatec'),
(3253, u'Pristava pri Mestinju'),
(3254, u'Pod\u010detrtek'),
(3255, u'Bu\u010de'),
(3256, u'Bistrica ob Sotli'),
(3257, u'Podsreda'),
(3260, u'Kozje'),
(3261, u'Lesi\u010dno'),
(3262, u'Prevorje'),
(3263, u'Gorica pri Slivnici'),
(3264, u'Sveti \u0160tefan'),
(3270, u'La\u0161ko'),
(3271, u'\u0160entrupert'),
(3272, u'Rimske Toplice'),
(3273, u'Jurklo\u0161ter'),
(3301, u'Petrov\u010de'),
(3302, u'Gri\u017ee'),
(3303, u'Gomilsko'),
(3304, u'Tabor'),
(3305, u'Vransko'),
(3310, u'\u017dalec'),
(3311, u'\u0160empeter v Savinjski dolini'),
(3312, u'Prebold'),
(3313, u'Polzela'),
(3314, u'Braslov\u010de'),
(3320, u'Velenje - dostava'),
(3322, u'Velenje - po\u0161tni predali'),
(3325, u'\u0160o\u0161tanj'),
(3326, u'Topol\u0161ica'),
(3327, u'\u0160martno ob Paki'),
(3330, u'Mozirje'),
(3331, u'Nazarje'),
(3332, u'Re\u010dica ob Savinji'),
(3333, u'Ljubno ob Savinji'),
(3334, u'Lu\u010de'),
(3335, u'Sol\u010dava'),
(3341, u'\u0160martno ob Dreti'),
(3342, u'Gornji Grad'),
(4000, u'Kranj'),
(4201, u'Zgornja Besnica'),
(4202, u'Naklo'),
(4203, u'Duplje'),
(4204, u'Golnik'),
(4205, u'Preddvor'),
(4206, u'Zgornje Jezersko'),
(4207, u'Cerklje na Gorenjskem'),
(4208, u'\u0160en\u010dur'),
(4209, u'\u017dabnica'),
(4210, u'Brnik - aerodrom'),
(4211, u'Mav\u010di\u010de'),
(4212, u'Visoko'),
(4220, u'\u0160kofja Loka'),
(4223, u'Poljane nad \u0160kofjo Loko'),
(4224, u'Gorenja vas'),
(4225, u'Sovodenj'),
(4226, u'\u017diri'),
(4227, u'Selca'),
(4228, u'\u017delezniki'),
(4229, u'Sorica'),
(4240, u'Radovljica'),
(4243, u'Brezje'),
(4244, u'Podnart'),
(4245, u'Kropa'),
(4246, u'Kamna Gorica'),
(4247, u'Zgornje Gorje'),
(4248, u'Lesce'),
(4260, u'Bled'),
(4263, u'Bohinjska Bela'),
(4264, u'Bohinjska Bistrica'),
(4265, u'Bohinjsko jezero'),
(4267, u'Srednja vas v Bohinju'),
(4270, u'Jesenice'),
(4273, u'Blejska Dobrava'),
(4274, u'\u017dirovnica'),
(4275, u'Begunje na Gorenjskem'),
(4276, u'Hru\u0161ica'),
(4280, u'Kranjska Gora'),
(4281, u'Mojstrana'),
(4282, u'Gozd Martuljek'),
(4283, u'Rate\u010de - Planica'),
(4290, u'Tr\u017ei\u010d'),
(4294, u'Kri\u017ee'),
(5000, u'Nova Gorica'),
(5210, u'Deskle'),
(5211, u'Kojsko'),
(5212, u'Dobrovo v Brdih'),
(5213, u'Kanal'),
(5214, u'Kal nad Kanalom'),
(5215, u'Ro\u010dinj'),
(5216, u'Most na So\u010di'),
(5220, u'Tolmin'),
(5222, u'Kobarid'),
(5223, u'Breginj'),
(5224, u'Srpenica'),
(5230, u'Bovec'),
(5231, u'Log pod Mangartom'),
(5232, u'So\u010da'),
(5242, u'Grahovo ob Ba\u010di'),
(5243, u'Podbrdo'),
(5250, u'Solkan'),
(5251, u'Grgar'),
(5252, u'Trnovo pri Gorici'),
(5253, u'\u010cepovan'),
(5261, u'\u0160empas'),
(5262, u'\u010crni\u010de'),
(5263, u'Dobravlje'),
(5270, u'Ajdov\u0161\u010dina'),
(5271, u'Vipava'),
(5272, u'Podnanos'),
(5273, u'Col'),
(5274, u'\u010crni Vrh nad Idrijo'),
(5275, u'Godovi\u010d'),
(5280, u'Idrija'),
(5281, u'Spodnja Idrija'),
(5282, u'Cerkno'),
(5283, u'Slap ob Idrijci'),
(5290, u'\u0160empeter pri Gorici'),
(5291, u'Miren'),
(5292, u'Ren\u010de'),
(5293, u'Vol\u010dja Draga'),
(5294, u'Dornberk'),
(5295, u'Branik'),
(5296, u'Kostanjevica na Krasu'),
(5297, u'Prva\u010dina'),
(6000, u'Koper'),
(6210, u'Se\u017eana'),
(6215, u'Diva\u010da'),
(6216, u'Podgorje'),
(6217, u'Vremski Britof'),
(6219, u'Lokev'),
(6221, u'Dutovlje'),
(6222, u'\u0160tanjel'),
(6223, u'Komen'),
(6224, u'Seno\u017ee\u010de'),
(6225, u'Hru\u0161evje'),
(6230, u'Postojna'),
(6232, u'Planina'),
(6240, u'Kozina'),
(6242, u'Materija'),
(6243, u'Obrov'),
(6244, u'Podgrad'),
(6250, u'Ilirska Bistrica'),
(6251, u'Ilirska Bistrica - Trnovo'),
(6253, u'Kne\u017eak'),
(6254, u'Jel\u0161ane'),
(6255, u'Prem'),
(6256, u'Ko\u0161ana'),
(6257, u'Pivka'),
(6258, u'Prestranek'),
(6271, u'Dekani'),
(6272, u'Gra\u010di\u0161\u010de'),
(6273, u'Marezige'),
(6274, u'\u0160marje'),
(6275, u'\u010crni Kal'),
(6276, u'Pobegi'),
(6280, u'Ankaran - Ancarano'),
(6281, u'\u0160kofije'),
(6310, u'Izola - Isola'),
(6320, u'Portoro\u017e - Portorose'),
(6330, u'Piran - Pirano'),
(6333, u'Se\u010dovlje - Sicciole'),
(8000, u'Novo mesto'),
(8210, u'Trebnje'),
(8211, u'Dobrni\u010d'),
(8212, u'Velika Loka'),
(8213, u'Veliki Gaber'),
(8216, u'Mirna Pe\u010d'),
(8220, u'\u0160marje\u0161ke Toplice'),
(8222, u'Oto\u010dec'),
(8230, u'Mokronog'),
(8231, u'Trebelno'),
(8232, u'\u0160entrupert'),
(8233, u'Mirna'),
(8250, u'Bre\u017eice'),
(8251, u'\u010cate\u017e ob Savi'),
(8253, u'Arti\u010de'),
(8254, u'Globoko'),
(8255, u'Pi\u0161ece'),
(8256, u'Sromlje'),
(8257, u'Dobova'),
(8258, u'Kapele'),
(8259, u'Bizeljsko'),
(8261, u'Jesenice na Dolenjskem'),
(8262, u'Kr\u0161ka vas'),
(8263, u'Cerklje ob Krki'),
(8270, u'Kr\u0161ko'),
(8272, u'Zdole'),
(8273, u'Leskovec pri Kr\u0161kem'),
(8274, u'Raka'),
(8275, u'\u0160kocjan'),
(8276, u'Bu\u010dka'),
(8280, u'Brestanica'),
(8281, u'Senovo'),
(8282, u'Koprivnica'),
(8283, u'Blanca'),
(8290, u'Sevnica'),
(8292, u'Zabukovje'),
(8293, u'Studenec'),
(8294, u'Bo\u0161tanj'),
(8295, u'Tr\u017ei\u0161\u010de'),
(8296, u'Krmelj'),
(8297, u'\u0160entjan\u017e'),
(8310, u'\u0160entjernej'),
(8311, u'Kostanjevica na Krki'),
(8312, u'Podbo\u010dje'),
(8321, u'Brusnice'),
(8322, u'Stopi\u010de'),
(8323, u'Ur\u0161na sela'),
(8330, u'Metlika'),
(8331, u'Suhor'),
(8332, u'Gradac'),
(8333, u'Semi\u010d'),
(8340, u'\u010crnomelj'),
(8341, u'Adle\u0161i\u010di'),
(8342, u'Stari trg ob Kolpi'),
(8343, u'Dragatu\u0161'),
(8344, u'Vinica pri \u010crnomlju'),
(8350, u'Dolenjske Toplice'),
(8351, u'Stra\u017ea'),
(8360, u'\u017du\u017eemberk'),
(8361, u'Dvor'),
(8362, u'Hinje'),
(9000, u'Murska Sobota'),
(9201, u'Puconci'),
(9202, u'Ma\u010dkovci'),
(9203, u'Petrovci'),
(9204, u'\u0160alovci'),
(9205, u'Hodo\u0161 - Hodos'),
(9206, u'Kri\u017eevci'),
(9207, u'Prosenjakovci - Partosfalva'),
(9208, u'Fokovci'),
(9220, u'Lendava - Lendva'),
(9221, u'Martjanci'),
(9222, u'Bogojina'),
(9223, u'Dobrovnik - Dobronak'),
(9224, u'Turni\u0161\u010de'),
(9225, u'Velika Polana'),
(9226, u'Moravske Toplice'),
(9227, u'Kobilje'),
(9231, u'Beltinci'),
(9232, u'\u010cren\u0161ovci'),
(9233, u'Odranci'),
(9240, u'Ljutomer'),
(9241, u'Ver\u017eej'),
(9242, u'Kri\u017eevci pri Ljutomeru'),
(9243, u'Mala Nedelja'),
(9244, u'Sveti Jurij ob \u0160\u010davnici'),
(9245, u'Spodnji Ivanjci'),
(9250, u'Gornja Radgona'),
(9251, u'Ti\u0161ina'),
(9252, u'Radenci'),
(9253, u'Apa\u010de'),
(9261, u'Cankova'),
(9262, u'Roga\u0161ovci'),
(9263, u'Kuzma'),
(9264, u'Grad'),
(9265, u'Bodonci'),
]
SI_POSTALCODES_CHOICES = sorted(SI_POSTALCODES, key=lambda k: k[1])
| bsd-3-clause | -2,700,544,115,968,707,000 | 27.933902 | 67 | 0.552837 | false |
Subsets and Splits