repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
minghuascode/pyj | library/pyjamas/media/Media.ie6.py | 1 | 1553 |
class Media(Widget):
def setSrc(self, src):
print "setSrc", src
obj = self.getElement()
DOM.setAttribute(obj, "URL", src)
def setControls(self, controls):
print "setControls", controls
self.ctrlparam = DOM.createElement("PARAM")
DOM.setAttribute(self.ctrlparam, "name", "ShowControls")
DOM.setBooleanAttribute(self.ctrlparam, "VALUE",
controls and "true" or "false")
self.getElement().appendChild(self.ctrlparam)
# def setStatusbar(self, statusbar):
# print "setstatus", statusbar
# self.statparam = DOM.createElement("PARAM")
# DOM.setAttribute(self.statparam, "name", "ShowStatusBar")
# DOM.setBooleanAttribute(self.statparam, "VALUE",
# statusbar and "true" or "false")
# self.getElement().appendChild(self.statparam)
def setLoop(self, autorewind):
print "autorewind", autorewind
self.loopparam = DOM.createElement("PARAM")
DOM.setAttribute(self.loopparam, "name", "autorewind")
DOM.setBooleanAttribute(self.loopparam, "VALUE",
autorewind and "true" or "false")
self.getElement().appendChild(self.loopparam)
def setAutoplay(self, autostart):
print "autoplay", autostart
self.playparam = DOM.createElement("PARAM")
DOM.setAttribute(self.playparam, "name", "autostart")
DOM.setBooleanAttribute(self.playparam, "VALUE",
autostart and "true" or "false")
self.getElement().appendChild(self.playparam)
| apache-2.0 |
savoca/zerofltetmo | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
jjdmol/LOFAR | CEP/Calibration/ExpIon/src/parmdbmain.py | 1 | 2049 | # -*- coding: utf-8 -*-
# Copyright (C) 2007
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
import subprocess
def store_parms( pdbname, parms, create_new = False) :
FNULL = open( '/dev/null', 'w' )
process = subprocess.Popen( ['parmdbm'], shell = False, stdin = subprocess.PIPE, stdout = FNULL, stderr = FNULL )
if create_new :
process.stdin.write( "create tablename='" + pdbname + "'\n" )
else :
process.stdin.write( "open tablename='" + pdbname + "'\n" )
parmnames = parms.keys()
for parmname in parmnames:
v = parms[parmname]
times = v['times']
nt = len(times)
freqs = v['freqs']
nf = len(freqs)
timewidths = v['timewidths']
freqwidths = v['freqwidths']
values = v['values']
repr_values = '[' + ', '.join([repr(v1) for v1 in values.flat]) + ']'
freq_start = freqs[0]-freqwidths[0]/2
freq_end = freqs[-1]+freqwidths[-1]/2
time_start = times[0] - timewidths[0]/2
time_end = times[-1] + timewidths[-1]/2
domain = "[%s,%s,%s,%s]" % ( freq_start, freq_end, time_start, time_end )
process.stdin.write("add %s shape=[%i,%i], values=%s, domain=%s\n" % (parmname, nf, nt, repr_values, domain))
process.stdin.write('quit\n')
process.wait()
| gpl-3.0 |
django-extensions/django-extensions | tests/management/commands/test_describe_form.py | 1 | 2203 | # -*- coding: utf-8 -*-
from io import StringIO
from django.test import TestCase
from django.db import models
from django.core.management import CommandError, call_command
class DescribeFormExceptionsTests(TestCase):
"""Tests for describe_form command exceptions."""
def test_should_raise_CommandError_if_invalid_arg(self):
with self.assertRaisesRegex(CommandError, "Need application and model name in the form: appname.model"):
call_command('describe_form', 'testapp')
class DescribeFormTests(TestCase):
"""Tests for describe_form command."""
def setUp(self):
self.out = StringIO()
class BaseModel(models.Model):
title = models.CharField(max_length=50)
body = models.TextField()
class Meta:
app_label = 'testapp'
class NonEditableModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=50)
class Meta:
app_label = 'testapp'
def test_should_print_form_definition_for_TestModel(self):
expected_result = '''from django import forms
from testapp.models import BaseModel
class BaseModelForm(forms.Form):
title = forms.CharField(label='Title', max_length=50)
body = forms.CharField(label='Body')'''
call_command('describe_form', 'testapp.BaseModel', stdout=self.out)
self.assertIn(expected_result, self.out.getvalue())
def test_should_print_form_definition_for_TestModel_with_non_editable_field(self):
expected_result = '''from django import forms
from testapp.models import NonEditableModel
class NonEditableModelForm(forms.Form):
title = forms.CharField(label='Title', max_length=50)'''
call_command('describe_form', 'testapp.NonEditableModel', stdout=self.out)
self.assertIn(expected_result, self.out.getvalue())
def test_should_print_form_with_fields_for_TestModel(self):
not_expected = '''body = forms.CharField(label='Body')'''
call_command('describe_form', 'testapp.BaseModel', '--fields=title', stdout=self.out)
self.assertNotIn(not_expected, self.out.getvalue())
| mit |
shellydeforte/PDB | pdb/tests/test_data.py | 1 | 41341 | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals)
class PDBParseData:
ss_dis = {
'104L_B': {
'disorder': '--------------------------------------------------------------------------------------------------------------------------------------------------------------------XX',
'secstr': ' HHHHHHHHH SB EE TTS EE TTT SS HHHHHHHHHHHS S TTB HHHHHHHHHHHHHHHHHHHHT TTHHHHHHHS SSHHHHHHHHHHHH HHHHHH HHHHHHHHTT TTHHHHHHTSSHHHHHS HHHHHHHHHHHS SGGGG ',
'sequence': 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSAAELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},
'104L_A': {
'disorder': '--------------------------------------------------------------------------------------------------------------------------------------------------------------------XX',
'secstr': ' HHHHHHHHT SB EE TTS EEETTTEEEE TT HHHHHHHHHHHHTS TTB HHHHHHHHHHHHHHHHHHHTT TTTHHHHHHS HHHHHHHHHHHHHHHHHHHHT HHHHHHTTTT HHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGG ',
'sequence': 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSAAELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},
'11BG_A': {
'disorder': '----------------------------------------------------------------------------------------------------------------------------',
'secstr': ' HHHHHHHHHB SSTT GGGHHHHHHHHTT SSS SEEEEE S HHHHHGGGGSEEE TTS S EEE SS EEEEEEEE TT BTTB EEEEEEEE EEEEEETTTTEEEEEEEE ',
'sequence': 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV'},
'11BG_B': {
'disorder': '----------------------------------------------------------------------------------------------------------------------------',
'secstr': ' HHHHHHHHHB TT TT GGGHHHHHHHHTT SSSS SEEEEE S HHHHHGGGGSEEE SSS S EEE SS EEEEEEEE TT BTTB EEEEEEEE EEEEEETTTTEEEEEEEE ',
'sequence': 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV'},
'102L_A': {
'disorder': '-------------------------------------------------------------------------------------------------------------------------------------------------------------------XX',
'secstr': ' HHHHHHHHH EEEEEE TTS EEEETTEEEESSS TTTHHHHHHHHHHTS TTB HHHHHHHHHHHHHHHHHHHHH TTHHHHHHHS HHHHHHHHHHHHHHHHHHHHT HHHHHHHHTT HHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGG ',
'sequence': 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'}}
filter_pdb_chain_uniprot_input = {
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',
7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A',
12: 'A', 13: 'A', 14: 'A', 15: 'A', 16: 'A', 17: 'B', 18: 'C',
19: 'A', 20: 'A', 21: 'A', 22: 'A'},
'PDB_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 1, 5: 1, 6: 1, 7: 45, 8: 1,
9: 45, 10: 1, 11: 1, 12: 0, 13: 0, 14: 1,
15: 0, 16: 1, 17: 1, 18: 1, 19: 22, 20: 343, 21: 22,
22: 391},
'SP_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,
9: 45, 10: 2, 11: 2, 12: 1, 13: 1, 14: 1,
15: 1, 16: 2, 17: 2, 18: 2, 19: 22, 20: 126, 21: 22,
22: 126},
'SP_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,
8: 44, 9: 164, 10: 155, 11: 154, 12: 3,
13: 154, 14: 164, 15: 154, 16: 210, 17: 210, 18: 210,
19: 342, 20: 200, 21: 390, 22: 200},
'RES_BEG': {
0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,
9: 47, 10: 1, 11: -1, 12: 1, 13: 1, 14: 1,
15: 1, 16: 1, 17: 1, 18: 1, 19: 5, 20: 326, 21: 5, 22: 374},
'PDB_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 124, 5: 124, 6: 44, 7: 164,
8: 44, 9: 164, 10: 153, 11: 153,
12: 153, 13: 153, 14: 164, 15: 153, 16: 209, 17: 209,
18: 209, 19: 342, 20: 417, 21: 390, 22: 465},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',
4: 'P00669', 5: 'P00669', 6: 'P00720',
7: 'P00720', 8: 'P00720', 9: 'P00720', 10: 'P02185',
11: 'P02185', 12: 'P02185', 13: 'P02185',
14: 'P00720', 15: 'P02185', 16: 'P09211', 17: 'P09211',
18: 'P09212', 19: 'B3DIN1', 20: 'Q4G1L2',
21: 'B3DIN1', 22: 'Q4G1L2'},
'RES_END': {
0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,
8: 44, 9: 166, 10: 153, 11: 153, 12: 3,
13: 154, 14: 164, 15: 154, 16: 209, 17: 209, 18: 209,
19: 325, 20: 400, 21: 373, 22: 448},
'PDB': {
0: '102l', 1: '102l', 2: '103l', 3: '103l', 4: '11bg',
5: '11bg', 6: '104l', 7: '104l', 8: '104l',
9: '104l', 10: '104m', 11: '105m', 12: '106m', 13: '108m',
14: '109l', 15: '109m', 16: '10gs',
17: '10gs', 18: '10gs', 19: '3v44', 20: '3v44', 21: '3v47',
22: '3v47'}
}
filter_pdb_chain_uniprot_expected = {
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',
7: 'A', 8: 'B', 9: 'B', 19: 'A', 20: 'A', 21: 'A', 22: 'A'},
'SP_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,
9: 45, 19: 22, 20: 126, 21: 22, 22: 126},
'PDB': {
0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',
5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',
19: '3V44', 20: '3V44', 21: '3V47', 22: '3V47'},
'RES_BEG': {
0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,
9: 47, 19: 5, 20: 326, 21: 5, 22: 374},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',
4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',
8: 'P00720', 9: 'P00720', 19: 'B3DIN1', 20: 'Q4G1L2',
21: 'B3DIN1', 22: 'Q4G1L2'},
'RES_END': {
0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,
8: 44, 9: 166, 19: 325, 20: 400, 21: 373, 22: 448},
'SP_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,
8: 44, 9: 164, 19: 342, 20: 200, 21: 390, 22: 200}
}
add_pdbseq_to_df_input = {
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',
7: 'A', 8: 'B', 9: 'B', 19: 'A', 20: 'A', 21: 'A', 22: 'A'},
'SP_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,
9: 45, 19: 22, 20: 126, 21: 22, 22: 126},
'PDB': {
0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',
5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',
19: '3V44', 20: '3V44', 21: '3V47', 22: '3V47'},
'RES_BEG': {
0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,
9: 47, 19: 5, 20: 326, 21: 5, 22: 374},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',
4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',
8: 'P00720', 9: 'P00720', 19: 'B3DIN1', 20: 'Q4G1L2',
21: 'B3DIN1', 22: 'Q4G1L2'},
'RES_END': {
0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,
8: 44, 9: 166, 19: 325, 20: 400, 21: 373, 22: 448},
'SP_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,
8: 44, 9: 164, 19: 342, 20: 200, 21: 390, 22: 200}
}
add_pdbseq_to_df_expected = {
'PDB_SEQ': {
0: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN',
1: 'AAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',
2: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',
3: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',
4: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',
5: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',
6: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',
7: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'B', 4: 'A', 5: 'A', 6: 'B',
7: 'B'},
'SP_BEG': {0: 1, 1: 41, 2: 27, 3: 27, 4: 1, 5: 45, 6: 1, 7: 45},
'SP_END': {0: 40, 1: 164, 2: 150, 3: 150, 4: 44, 5: 164, 6: 44, 7: 164},
'RES_BEG': {0: 1, 1: 42, 2: 1, 3: 1, 4: 1, 5: 47, 6: 1, 7: 47},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00669',
4: 'P00720', 5: 'P00720', 6: 'P00720', 7: 'P00720'},
'RES_END': {
0: 40, 1: 165, 2: 124, 3: 124, 4: 44, 5: 166, 6: 44,
7: 166},
'PDB': {
0: '102L', 1: '102L', 2: '11BG', 3: '11BG', 4: '104L',
5: '104L', 6: '104L', 7: '104L'}
}
filter_single_pdb_chain_sep_input = {
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',
7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A',
14: 'A'},
'SP_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,
9: 45, 10: 22, 11: 126, 12: 22, 13: 126, 14: 1},
'SP_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,
8: 44, 9: 164, 10: 342, 11: 200, 12: 390, 13: 200, 14: 185},
'RES_BEG': {
0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,
9: 47, 10: 5, 11: 326, 12: 5, 13: 374, 14: 1},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',
4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',
8: 'P00720', 9: 'P00720', 10: 'B3DIN1', 11: 'Q4G1L2',
12: 'B3DIN1', 13: 'Q4G1L2', 14: 'P00718'},
'RES_END': {
0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,
8: 44, 9: 166, 10: 325, 11: 400, 12: 373, 13: 448, 14: 185},
'PDB': {
0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',
5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',
10: '3V44', 11: '3V44', 12: '3V47', 13: '3V47', 14: '154L'}
}
filter_single_pdb_chain_sep_expected = {
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',
7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A'},
'SP_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,
9: 45, 10: 22, 11: 126, 12: 22, 13: 126},
'SP_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,
8: 44, 9: 164, 10: 342, 11: 200, 12: 390, 13: 200},
'RES_BEG': {
0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,
9: 47, 10: 5, 11: 326, 12: 5, 13: 374},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',
4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',
8: 'P00720', 9: 'P00720', 10: 'B3DIN1', 11: 'Q4G1L2',
12: 'B3DIN1', 13: 'Q4G1L2'},
'RES_END': {
0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,
8: 44, 9: 166, 10: 325, 11: 400, 12: 373, 13: 448},
'PDB': {
0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',
5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',
10: '3V44', 11: '3V44', 12: '3V47', 13: '3V47'}
}
filter_single_pdb_chain_input = {
'SP_PRIMARY': {0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00720'},
'SEC_STRUCT': {
0: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',
1: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',
2: '-XXXPPPHHHHHHHHHBPPPSSTTPGGGHHHHHHHHTTPPSSSPPSEEEEEPSPHHHHHGGGGSEEEPPTTSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTXXXXXXXXP',
3: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX'},
'PDB_CHAIN': {0: '104L_A', 1: '104L_B', 2: '11BG_A', 3: '102L_A'}
}
filter_single_pdb_chain_expected = {
'SP_PRIMARY': {0: 'P00720', 1: 'P00720', 2: 'P00720'}, 'SEC_STRUCT': {
0: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',
1: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',
2: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX'},
'PDB_CHAIN': {0: '104L_A', 1: '104L_B', 2: '102L_A'}
}
compare_to_uni_input = {
'PDB_SEQ': {0: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN',
1: 'AAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',
2: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',
3: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',
4: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',
5: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',
6: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',
7: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'B', 4: 'A', 5: 'A', 6: 'B', 7: 'B'},
'SP_BEG': {0: 1, 1: 41, 2: 27, 3: 27, 4: 1, 5: 45, 6: 1, 7: 45},
'SP_END': {
0: 40, 1: 164, 2: 150, 3: 150, 4: 44, 5: 164, 6: 44, 7: 164},
'RES_BEG': {0: 1, 1: 42, 2: 1, 3: 1, 4: 1, 5: 47, 6: 1, 7: 47},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00669',
4: 'P00720', 5: 'P00720', 6: 'P00720', 7: 'P00720'},
'RES_END': {
0: 40, 1: 165, 2: 124, 3: 124, 4: 44, 5: 166, 6: 44,
7: 166},
'PDB': {
0: '102L', 1: '102L', 2: '11BG', 3: '11BG', 4: '104L',
5: '104L', 6: '104L', 7: '104L'}
}
compare_to_uni_expected = {
'CHAIN': {0: 'A', 1: 'B'},
'SP_BEG': {0: 27, 1: 27},
'SP_END': {0: 150, 1: 150},
'RES_BEG': {0: 1, 1: 1},
'SP_PRIMARY': {0: 'P00669', 1: 'P00669'},
'RES_END': {0: 124, 1: 124},
'PDB': {0: '11BG', 1: '11BG'}
}
read_pdb_chain_uniprot_uniIDs_input = {
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',
7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A',
14: 'A'},
'SP_BEG': {
0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,
9: 45, 10: 22, 11: 126, 12: 22, 13: 126, 14: 1},
'SP_END': {
0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,
8: 44, 9: 164, 10: 342, 11: 200, 12: 390, 13: 200, 14: 185},
'RES_BEG': {
0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,
9: 47, 10: 5, 11: 326, 12: 5, 13: 374, 14: 1},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',
4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',
8: 'P00720', 9: 'P00720', 10: 'B3DIN1', 11: 'Q4G1L2',
12: 'B3DIN1', 13: 'Q4G1L2', 14: 'P00718'},
'RES_END': {
0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,
8: 44, 9: 166, 10: 325, 11: 400, 12: 373, 13: 448, 14: 185},
'PDB': {
0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',
5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',
10: '3V44', 11: '3V44', 12: '3V47', 13: '3V47', 14: '154L'}
}
create_pdb_composite_input = {
'PDB_SEQ': {
0: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN',
1: 'AAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',
2: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',
3: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',
4: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',
5: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',
6: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',
7: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},
'CHAIN': {
0: 'A', 1: 'A', 2: 'A', 3: 'B',
4: 'A', 5: 'A', 6: 'B', 7: 'B'},
'SP_BEG': {0: 1, 1: 41, 2: 27, 3: 27, 4: 1, 5: 45, 6: 1, 7: 45},
'SP_END': {
0: 40, 1: 164, 2: 150, 3: 150, 4: 44,
5: 164, 6: 44, 7: 164},
'RES_BEG': {0: 1, 1: 42, 2: 1, 3: 1, 4: 1, 5: 47, 6: 1, 7: 47},
'SP_PRIMARY': {
0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00669',
4: 'P00720', 5: 'P00720', 6: 'P00720', 7: 'P00720'},
'RES_END': {
0: 40, 1: 165, 2: 124, 3: 124, 4: 44, 5: 166,
6: 44, 7: 166},
'PDB': {
0: '102L', 1: '102L', 2: '11BG', 3: '11BG', 4: '104L',
5: '104L', 6: '104L', 7: '104L'}
}
create_pdb_composite_expected = {
'SP_PRIMARY': {
0: 'P00720', 1: 'P00669', 2: 'P00720', 3: 'P00720',
4: 'P00669'},
'SEC_STRUCT': {
0: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',
1: '--------------------------PPPHHHHHHHHHBPTTPPTTPGGGHHHHHHHHTTPSSSSPPSEEEEEPSPHHHHHGGGGSEEEPPSSSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP',
2: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',
3: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',
4: '--------------------------PPPHHHHHHHHHBPPPSSTTPGGGHHHHHHHHTTPPSSSPPSEEEEEPSPHHHHHGGGGSEEEPPTTSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP'},
'PDB_CHAIN': {
0: '104L_B', 1: '11BG_B', 2: '102L_A', 3: '104L_A',
4: '11BG_A'}
}
create_uni_struct_input = {
'SP_PRIMARY': {
0: 'P00720', 1: 'P00669', 2: 'P00720',
3: 'P00720', 4: 'P00669'},
'SEC_STRUCT': {
0: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',
1: '--------------------------PPPHHHHHHHHHBPTTPPTTPGGGHHHHHHHHTTPSSSSPPSEEEEEPSPHHHHHGGGGSEEEPPSSSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP',
2: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',
3: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',
4: '--------------------------PPPHHHHHHHHHBPPPSSTTPGGGHHHHHHHHTTPPSSSPPSEEEEEPSPHHHHHGGGGSEEEPPTTSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP'},
'Unnamed: 0': {0: 0, 1: 1, 2: 2, 3: 3, 4: 4},
'PDB_CHAIN': {
0: '104L_B', 1: '11BG_B', 2: '102L_A',
3: '104L_A', 4: '11BG_A'}
}
create_uni_struct_expected = {
'SP_PRIMARY': {0: 'P00720', 1: 'P00669'},
'STRUCT': {
0: 'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXX',
1: '--------------------------OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'}
}
create_intervals_pdb_df = {
'SP_PRIMARY': {
0: 'Q5SLQ0', 1: 'Q5SLQ0', 2: 'Q805F6', 3: 'Q5SLQ0',
4: 'Q5SLQ0', 5: 'Q5SLQ0', 6: 'Q5SLQ0', 7: 'Q5SLQ0',
8: 'Q5SLQ0', 9: 'Q5SLQ0', 10: 'Q5SLQ0', 11: 'Q5SLQ0',
12: 'Q5SLQ0', 13: 'Q5SLQ0', 14: 'Q805F6', 15: 'Q5SLQ0',
16: 'Q5SLQ0', 17: 'Q5SLQ0', 18: 'Q5SLQ0', 19: 'Q5SLQ0',
20: 'Q5SLQ0', 21: 'Q5SLQ0', 22: 'Q5SLQ0', 23: 'Q5SLQ0',
24: 'Q5SLQ0', 25: 'Q5SLQ0', 26: 'Q5SLQ0', 27: 'Q5SLQ0',
28: 'Q5SLQ0', 29: 'Q5SLQ0', 30: 'Q5SLQ0', 31: 'Q5SLQ0',
32: 'Q5SLQ0', 33: 'Q5SLQ0', 34: 'Q5SLQ0', 35: 'Q5SLQ0',
36: 'Q5SLQ0', 37: 'Q5SLQ0', 38: 'Q5SLQ0', 39: 'Q5SLQ0',
40: 'Q5SLQ0', 41: 'Q5SLQ0', 42: 'Q5SLQ0', 43: 'Q5SLQ0',
44: 'Q5SLQ0', 45: 'Q5SLQ0', 46: 'Q5SLQ0', 47: 'Q5SLQ0',
48: 'Q5SLQ0', 49: 'Q5SLQ0'},
'SEC_STRUCT': {
0: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
1: 'XXXXXXXXXXXXXXXPPPSPPHHHHSPSEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPBPPP',
2: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXPPTTEETTTTEEPTTPSPSSSTTEETTEEPPTTPEEEPPSSSSPPEEPPSSPSSPPPPPXX-',
3: '-XXXXXXXXXXXXXXPPPSPPTTTSSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
4: '------------------PPPSTTTSPSPPSSPTTPHHHHHHTBPTTPPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
5: 'XXXXXXXXXXXXXXXXXPPPPTTTTPPSEETTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
6: 'XXXXXXXXXXXXXXXXXXPPPGGGSPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
7: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPPEETTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
8: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
9: '-XXXXXXXXXXXXXXPPPSPPTTTSSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
10: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPSEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
11: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEESSPSSPPTTTGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
12: 'XXXXXXXXXXXXXXXPPPSPPHHHHSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
13: 'XXXXXXXXXXXXXXXXXXPPPSTTSSPPEESSPSSPHHHHTTSSPSSSPPPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
14: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXPPTTEETTTTEEPTTPSPSSSTTEETTEEPPTTPEEEPPPTTPPPEEPPSSPSSPPXXXXX-',
15: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPSPBSSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
16: 'XXXXXXXXXXXXXXXPPPSPPHHHHSSSEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
17: 'XXXXXXXXXXXXXXXPPPSPPTTTTSPSEESSPSSPHHHHHTTBPSSSPBPPHHHHTPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
18: 'XXXXXXXXXXXXXXXXXXPPPTTSSSPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
19: 'XXXXXXXXXXXXXXXPPPSSPGGGGPPSPPTTPSSPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
20: '------------------PPPTTTTPSSEETTPTTPHHHHGGGBPTTPPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
21: 'XXXXXXXXXXXXXXXXXXPPPSTTSSPPEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
22: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
23: '-XXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
24: '-XXXXXXXXXXXXXXPPPSPPTTTSSPPPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
25: '------------------PPPHHHHPPPEETTPSSPHHHHGGGBPTTPPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPPBPP',
26: 'XXXXXXXXXXXXXXXXXXPPPSGGGPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
27: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSPPSSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
28: 'XXXXXXXXXXXXXXXPPPSPPHHHHSPSEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
29: '-XXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHHTTBPSSSPBPPHHHHPPPTTHHHHHHHHHHHHHHTTSSPSPPPEEPP',
30: 'XXXXXXXXXXXXXXXXXXPPPSTTSPPSPPSSPTTPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
31: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEETTPTTPHHHHGGGBPSSSPBPPHHHHTPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
32: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSPPSSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
33: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPSEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
34: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
35: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPSSPPTTGGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
36: '------------------PPPTTTTPPPEETTPSSPHHHHGGGBPTTSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
37: 'XXXXXXXXXXXXXXXXXXPPPSTTSPPSPPSSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
38: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPSPBTTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
39: '-XXXXXXXXXXXXXXPPPSPPTTTSSPSEETTPTTPHHHHGGGBPSSSSBPPHHHHPPPTTHHHHHHHHHHHHHHHTSSPSPPPBPPP',
40: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPPPBTTPTTPHHHHGGGSPSSSPPPPTTTSPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
41: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
42: 'XXXXXXXXXXXXXXXPPPSPPHHHHPPSEETTPTTPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',
43: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
44: 'XXXXXXXXXXXXXXXXXXPPPTTTTSPPPBTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPBPPP',
45: 'XXXXXXXXXXXXXXXXXXPPPGGGSPPSPPSSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
46: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
47: 'XXXXXXXXXXXXXXXPPPSSPHHHHSPSPPSSPSSPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',
48: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEETTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',
49: 'XXXXXXXXXXXXXXXXXPPPPHHHHPPSEETTPSSPPHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP'},
'PDB_CHAIN': {
0: '4DV6_R', 1: '4LFB_R', 2: '3C05_B', 3: '2UU9_R',
4: '4K0K_R', 5: '4DR6_R', 6: '4DR3_R', 7: '4NXM_R',
8: '4LF7_R', 9: '2UUB_R', 10: '4LF5_R', 11: '4JI4_R',
12: '4LF9_R', 13: '4JI8_R', 14: '3C05_D', 15: '4JI3_R',
16: '4DR4_R', 17: '4LFC_R', 18: '4JI6_R', 19: '4JI1_R',
20: '4JYA_R', 21: '4DV1_R', 22: '4OX9_R', 23: '2UUA_R',
24: '2UUC_R', 25: '4JV5_R', 26: '4JI2_R', 27: '4DV4_R',
28: '4DR5_R', 29: '2UXB_R', 30: '4DUZ_R', 31: '4DUY_R',
32: '4DV7_R', 33: '4DV5_R', 34: '4DV3_R', 35: '4LF6_R',
36: '4KHP_R', 37: '4DV0_R', 38: '4LF4_R', 39: '2UXD_R',
40: '4JI5_R', 41: '4LF8_R', 42: '4DR2_R', 43: '4DV2_R',
44: '4JI7_R', 45: '4DR1_R', 46: '4JI0_R', 47: '4LFA_R',
48: '4NXN_R', 49: '4DR7_R'}
}
create_intervals_uni_df = {
'SP_PRIMARY': {0: 'Q5SLQ0', 1: 'Q805F6'},
'STRUCT': {
0: 'XXXXXXXXXXXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',
1: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXXXX-'}
}
create_intervals_expected = {
'SP_PRIMARY': {15401: 'Q805F6', 10068: 'Q5SLQ0'},
'STRUCT': {
15401: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXXXX-',
10068: 'XXXXXXXXXXXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'},
'MISSING': {
15401: [['conserved', (418, 421)], ['contained', (477, 482)]],
10068: [['contained', (0, 18)]]}
}
class ScoresData:
uni_df = {
'SP_PRIMARY': {
0: 'P30615',
139: 'P62805',
102: 'Q8KRK5'
},
'STRUCT': {
0: 'XOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',
139: 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXXXXXXXXXX',
102: '----------XXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'
},
'MISSING': {
0: [
['conserved', (0, 1)],
['conflict', (95, 97)]
],
139: [
['overlap', (0, 28)],
['conflict', (92, 103)]
],
102: [
['conserved', (10, 19)]
]
}
}
create_scores_dict_expected = {
'iup_short': [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1],
'disordp_rna': [
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'esp_xray': [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0],
'disordp_dna': [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'dynamine': [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1],
'anchor_def': [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0],
'disordp_pro': [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
'morfpred': [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0
]
}
test_predictions_expected = {
'anchor_def': {
'-': 0.0,
'contained': 0.0,
'overlap': 0.0,
'discarded': 0.0,
'conserved': 0.0,
'O': 0.0,
'X': 0.0,
'conflict': 0.0},
'disordp_rna': {
'-': 0.0,
'contained': 0.0,
'overlap': 18.0,
'discarded': 0.0,
'conserved': 0.0,
'O': 41.0,
'X': 20.0,
'conflict': 2.0},
'esp_xray': {
'-': 10.0,
'contained': 0.0,
'overlap': 20.0,
'discarded': 0.0,
'conserved': 9.0,
'O': 10.0,
'X': 29.0,
'conflict': 0.0},
'morfpred': {
'-': 0.0,
'contained': 0.0,
'overlap': 3.0,
'discarded': 0.0,
'conserved': 0.0,
'O': 3.0,
'X': 3.0,
'conflict': 0.0},
'iup_short': {
'-': 10.0,
'contained': 0.0,
'overlap': 28.0,
'discarded': 0.0,
'conserved': 9.0,
'O': 6.0,
'X': 41.0,
'conflict': 4.0},
'disordp_dna': {
'-': 0.0,
'contained': 0.0,
'overlap': 27.0,
'discarded': 0.0,
'conserved': 1.0,
'O': 9.0,
'X': 28.0,
'conflict': 0.0},
'total': {
'-': 10.0,
'contained': 0.0,
'overlap': 28.0,
'discarded': 0.0,
'conserved': 9.0,
'O': 106.0,
'X': 48.0,
'conflict': 11.0},
'disordp_pro': {
'-': 0.0,
'contained': 0.0,
'overlap': 0.0,
'discarded': 0.0,
'conserved': 0.0,
'O': 0.0,
'X': 0.0,
'conflict': 0.0},
'dynamine': {
'-': 10.0,
'contained': 0.0,
'overlap': 17.0,
'discarded': 0.0,
'conserved': 9.0,
'O': 9.0,
'X': 32.0,
'conflict': 6.0}
}
test_fill_data_expected = {
'esp_xray-iup_short': {
'conserved': 1.0,
'contained': 0.0,
'conflict': 0.63636363636363635,
'overlap': 0.7142857142857143},
'dynamine-esp_xray': {
'conserved': 1.0,
'contained': 0.0,
'conflict': 0.45454545454545453,
'overlap': 0.8928571428571429},
'iup_short-dynamine': {
'conserved': 1.0,
'contained': 0.0,
'conflict': 0.81818181818181823,
'overlap': 0.6071428571428571}
}
class UniData:
P00720 = """\
>sp|P00720|ENLYS_BPT4 Endolysin OS=Enterobacteria phage T4 GN=E PE=1 SV=2
MNIFEMLRIDERLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNCNGVITK
DEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRCALINMVFQMGETGVAGFTNSLRM
LQQKRWDEAAVNLAKSIWYNQTPNRAKRVITTFRTGTWDAYKNL
"""
P02185 = """\
>sp|P02185|MYG_PHYCD Myoglobin OS=Physeter catodon GN=MB PE=1 SV=2
MVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASE
DLKKHGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRH
PGDFGADAQGAMNKALELFRKDIAAKYKELGYQG
"""
class TsvData:
pdb_seq_tsv_valid = """\
\tPDB\tCHAIN\tSP_PRIMARY\tRES_BEG\tRES_END\tSP_BEG\tSP_END\tPDB_SEQ
0\t101M\tA\tP02185\t1\t154\t1\t154\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRVKHLKTEAEMKASEDLKKHGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG
1\t102L\tA\tP00720\t1\t40\t1\t40\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN
2\t102L\tA\tP00720\t42\t165\t41\t164\tAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL
3\t102M\tA\tP02185\t1\t154\t1\t154\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASEDLKKAGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG
4\t103L\tA\tP00720\t1\t40\t1\t40\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN
"""
# github #25 Need one or two examples of obsolete proteins in pdb_seq.tsv
# TODO: Replace entries 5 and 6 below with real ones.
pdb_seq_tsv_with_obs = """\
\tPDB\tCHAIN\tSP_PRIMARY\tRES_BEG\tRES_END\tSP_BEG\tSP_END\tPDB_SEQ
0\t101M\tA\tP02185\t1\t154\t1\t154\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRVKHLKTEAEMKASEDLKKHGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG
1\t102L\tA\tP00720\t1\t40\t1\t40\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN
2\t102L\tA\tP00720\t42\t165\t41\t164\tAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL
3\t102M\tA\tP02185\t1\t154\t1\t154\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASEDLKKAGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG
4\t103L\tA\tP00720\t1\t40\t1\t40\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN
5\t104M\tA\tP45678\t1\t40\t1\t50\tNOTAVALIDPROTEINORSEQUENCE
6\t104L\tA\tP45678\t1\t40\t1\t60\tNOTAVALIDPROTEINORSEQUENCE
"""
| mit |
buqing2009/MissionPlanner | Lib/site-packages/scipy/ndimage/tests/test_filters.py | 55 | 1118 | ''' Some tests for filters '''
import numpy as np
from numpy.testing import assert_equal, assert_raises
import scipy.ndimage as sndi
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0)
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4
| gpl-3.0 |
baloo/libnl | python/netlink/route/links/vlan.py | 22 | 1948 | #
# Copyright (c) 2011 Thomas Graf <[email protected]>
#
"""VLAN network link
"""
from __future__ import absolute_import
from ... import core as netlink
from .. import capi as capi
class VLANLink(object):
def __init__(self, link):
self._link = link
@property
@netlink.nlattr(type=int)
def id(self):
"""vlan identifier"""
return capi.rtnl_link_vlan_get_id(self._link)
@id.setter
def id(self, value):
capi.rtnl_link_vlan_set_id(self._link, int(value))
@property
@netlink.nlattr(type=str)
def flags(self):
""" VLAN flags
Setting this property will *Not* reset flags to value you supply in
Examples:
link.flags = '+xxx' # add xxx flag
link.flags = 'xxx' # exactly the same
link.flags = '-xxx' # remove xxx flag
link.flags = [ '+xxx', '-yyy' ] # list operation
"""
flags = capi.rtnl_link_vlan_get_flags(self._link)
return capi.rtnl_link_vlan_flags2str(flags, 256)[0].split(',')
def _set_flag(self, flag):
if flag.startswith('-'):
i = capi.rtnl_link_vlan_str2flags(flag[1:])
capi.rtnl_link_vlan_unset_flags(self._link, i)
elif flag.startswith('+'):
i = capi.rtnl_link_vlan_str2flags(flag[1:])
capi.rtnl_link_vlan_set_flags(self._link, i)
else:
i = capi.rtnl_link_vlan_str2flags(flag)
capi.rtnl_link_vlan_set_flags(self._link, i)
@flags.setter
def flags(self, value):
if type(value) is list:
for flag in value:
self._set_flag(flag)
else:
self._set_flag(value)
###################################################################
# TODO:
# - ingress map
# - egress map
def brief(self):
return 'vlan-id {0}'.format(self.id)
def init(link):
link.vlan = VLANLink(link._rtnl_link)
return link.vlan
| lgpl-2.1 |
jjscarafia/odoo | addons/portal_project_issue/__openerp__.py | 375 | 1713 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Issue',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds issue menu and features to your portal if project_issue and portal are installed.
==================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['project_issue','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
'portal_project_issue_view.xml',
'views/portal_project_issue.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CCLab/Raw-Salad | scripts/db/budget/fundtr.py | 1 | 10946 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
import:
PLANY FINANSOWE PANSTWOWYCH FUNDUSZY CELOWYCH
W UKLADIE TRADYCIJNEM
flat structure (each data unit is a separate doc in the collection)
parenting is archieved through 'parent' key
the script also inserts a doc into the schema collection
(warning! if there's already a schema for the budget collection, it should first
be removed manually from the collection data_zz_schema)
the files needed to upload the budget:
- this file (fundtr.py)
- data file CSV, produced from XLS (for example, fundtr.csv)
- schema file JSON (for example, fundtr-schema.json)
type python fundtr.py -h for instructions
"""
import getpass
import os
import optparse
import csv
import pymongo
import simplejson as json
from ConfigParser import ConfigParser
#-----------------------------
def get_db_connect(fullpath, dbtype):
connect_dict= {}
defaults= {
'basedir': fullpath
}
cfg= ConfigParser(defaults)
cfg.read(fullpath)
connect_dict['host']= cfg.get(dbtype,'host')
connect_dict['port']= cfg.getint(dbtype,'port')
connect_dict['database']= cfg.get(dbtype,'database')
connect_dict['username']= cfg.get(dbtype,'username')
try:
connect_dict['password']= cfg.get(dbtype,'password')
except:
connect_dict['password']= None
return connect_dict
#-----------------------------
def sort_format(src):
"""
format 1-2-3... to 001-002-003...
src should be convertable to int
"""
src_list= src.split('-')
res_list= []
for elm in src_list:
try:
res_list.append('%03d' % int(elm))
except:
res_list.append(elm)
res= '-'.join(res_list)
return res
#-----------------------------
def db_insert(data_bulk, db, collname, clean_first=False):
collect= db[collname]
if clean_first:
collect.remove()
collect.insert(data_bulk)
return collect.find().count()
#-----------------------------
def clean_info(lst, info_idefs, cut_prefix):
fin_list= lst[:]
print "...move defined elements to the info key of their parents: %s" % info_idefs
for info_idef in info_idefs:
parent_idef= info_idef.rsplit("-",1)[0]
index_parent, index_info= -1, -1
i= 0
print "...looking for idefs: info %s; parent %s" % (info_idef, parent_idef)
for curr_doc in fin_list:
if cut_prefix:
curr_idef= curr_doc["idef"].split("-",1)[1]
else:
curr_idef= curr_doc["idef"]
if curr_idef == parent_idef:
index_parent= i
parent_dict= curr_doc
if curr_idef == info_idef:
index_info= i
info_dict= curr_doc
if index_parent > 0 and index_info > 0: # ok, we've got them both
break
i += 1
if index_parent < 0 and index_info < 0:
print "ERROR: can't move elements to the info key - impossible to find them and/or their parents!"
else:
if parent_dict["info"] is None:
parent_dict["info"]= []
print "...setting up info key for element %s" % parent_dict["idef"]
del info_dict["info"]
parent_dict["info"].append(info_dict)
del fin_list[index_info]
# filling leaves
print '-- correcting leaves'
fund_data_children= fin_list[:]
for fund_data_row in fin_list:
for fund_data_child in fund_data_children:
if fund_data_row['idef'] == fund_data_child['parent']:
fund_data_row['leaf']= False
break
return fin_list
#-----------------------------
def fill_docs(fund_data):
print '-- filling out missing information'
level_label= ['a','b','c','d','e','f','g']
deepest_level= 0
total_frst_popr= 0
total_plan_nast= 0
for row_doc in fund_data:
row_doc['idef_sort']= sort_format(row_doc['idef'])
row_doc['leaf']= True # for a while, will fill correctly later
curr_level= row_doc['idef'].count('-') # count of '-' in idef tells about the level
row_doc['level']= level_label[curr_level]
if curr_level > deepest_level:
deepest_level= curr_level
if row_doc['frst_popr'] is None: # numeric values shouldn't be null
row_doc['frst_popr']= 0
if row_doc['plan_nast'] is None:
row_doc['plan_nast']= 0
if curr_level > 0: # fill parent
row_doc['parent']= row_doc['idef'].rpartition('-')[0]
row_doc['parent_sort']= sort_format(row_doc['parent'])
else:
row_doc['parent']= None # no parent at the level a
row_doc['parent_sort']= None
total_frst_popr += row_doc['frst_popr'] # calculating totals
total_plan_nast += row_doc['plan_nast']
# cleaning names
row_doc['name']= row_doc['name'].replace('\n', ' ')
row_doc['name']= row_doc['name'].replace('Ŝ', 'ż')
print '-- !!! deepest_level is', level_label[deepest_level]
print '-- filling out totals'
total_doc= {}
total_doc['idef']= '999999'
total_doc['idef_sort']= '999999'
total_doc['parent']= None
total_doc['parent_sort']= None
total_doc['level']= 'a'
total_doc['leaf']= True
total_doc['type']= 'Total'
total_doc['name']= 'Ogółem'
total_doc['paragrafy']= None
total_doc['info']= None
total_doc['frst_popr']= total_frst_popr
total_doc['plan_nast']= total_plan_nast
fund_data. append(total_doc)
return fund_data
#-----------------------------
def csv_parse(csv_read, schema):
print '-- parsing csv file'
out= []
dbkey_alias= schema["alias"] # dict of aliases -> document keys in db
dbval_types= schema["type"] # dict of types -> values types in db
for row in csv_read:
keys= tuple(row)
keys_len= len(keys)
row= iter(row)
for row in csv_read:
i= 0
dict_row= {} # this holds the data of the current row
for field in row:
new_key= [v for k, v in dbkey_alias.iteritems() if i == int(k)][0]
new_type= None
if new_key in dbval_types:
new_type= dbval_types[new_key]
if new_type == "string":
dict_row[new_key] = str(field)
elif new_type == "int":
if field == '':
dict_row[new_key] = None
else:
dict_row[new_key] = int(field)
elif new_type == "float":
if ',' in field:
field= field.replace(',', '.')
dict_row[new_key]= float(field)
elif new_type == None:
try:
dict_row[new_key]= float(field) # then if it is a number
if dict_row[new_key].is_integer(): # it can be integer
dict_row[new_key] = int(field)
except:
dict_row[new_key] = field # no, it is a string
i += 1
dict_row['info']= None # filling only for those who have info key
out.append(dict_row)
return out
#-----------------------------
if __name__ == "__main__":
# process command line options
cmdparser = optparse.OptionParser(usage="usage: python %prog [Options] source_file.csv source_schema.json")
cmdparser.add_option("-f", "--conf", action="store", dest="conf_filename", help="configuration file")
cmdparser.add_option("-l", "--collect", action="store",dest='collection_name',help="collection name")
cmdparser.add_option("-c", action="store_true",dest='dbact',help="clean db before insert (ignored if db is not updated)")
opts, args = cmdparser.parse_args()
conf_filename= opts.conf_filename
if conf_filename is None:
print 'No configuratuion file specified!'
exit()
try:
f_temp= open(conf_filename, 'rb')
except Exception as e:
print 'Cannot open .conf file:\n %s\n' % e
exit()
clean_db= opts.dbact # False - insert() data, True - remove() and then insert()
# get connection details
conn= get_db_connect(conf_filename, 'mongodb')
conn_host= conn['host']
conn_port= conn['port']
conn_db= conn['database']
try:
connection= pymongo.Connection(conn_host, conn_port)
db= connection[conn_db]
print '...connected to the database', db
except Exception as e:
print 'Unable to connect to the mongodb database:\n %s\n' % e
exit()
# authentication
conn_username= conn['username']
conn_password= conn['password']
if conn_password is None:
conn_password = getpass.getpass()
if db.authenticate(conn_username, conn_password) != 1:
print 'Cannot authenticate to db, exiting now'
exit()
# data collection
if opts.collection_name is None:
print 'Collection name not given - the name dd_xxxxyyyy_xx will be used'
collectname= 'dd_xxxxyyyy_xx'
else:
collectname= opts.collection_name
# CSV file
try:
src_file= open(args[0], 'rb')
except IOError as e:
print 'Unable to open file:\n %s\n' % e
exit()
csv_delim= ';'
csv_quote= '"'
try:
csv_read= csv.reader(src_file, delimiter= csv_delim, quotechar= csv_quote)
except Exception as e:
print 'Unable to read CSV file:\n %s\n' % e
exit()
# schema file
try:
filename_schema= args[1]
except:
filename_schema= None
if filename_schema is None:
filename_schema= args[0].rstrip('.csv')+'-schema.json'
try: #deserialize it into the object
sch_src= open(filename_schema, 'rb')
schema= json.load(sch_src, encoding='utf-8') # schema file
except Exception as e:
print 'Error in processing schema file:\n %s\n' % e
exit()
# create temporary dict
obj_parsed= csv_parse(csv_read, schema)
# fill it out with real data
obj_rep= fill_docs(obj_parsed) # processing and inserting the data
# for ii in obj_rep:
# print "%-15s %-20s %-15s %-20s %5s %-7s %-10s %-50s %10d %10d" % (
# ii['idef'], ii['idef_sort'], ii['parent'], ii['parent_sort'], ii['level'], ii['leaf'], ii['type'], ii['name'], ii['plan_nast'], ii['frst_popr']
# )
# WARNING! pecularity: move defined elements to the info key of their parents
info_idef_list= []
try:
info_idef_list= schema['info']
except:
pass
if len(info_idef_list) > 0:
obj_rep= clean_info(obj_rep, info_idef_list, cut_prefix=False)
print '-- inserting into the db'
print '-- ', db_insert(obj_rep, db, collectname, clean_db), 'records inserted'
print 'Done'
| bsd-3-clause |
cesargtz/YecoraOdoo | addons/account_sequence/__init__.py | 433 | 1104 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_sequence
import account_sequence_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hanselke/erpnext-1 | erpnext/accounts/report/financial_statements.py | 42 | 7666 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, _dict
from frappe.utils import (flt, getdate, get_first_day, get_last_day,
add_months, add_days, formatdate)
def get_period_list(fiscal_year, periodicity, from_beginning=False):
"""Get a list of dict {"to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
fy_start_end_date = frappe.db.get_value("Fiscal Year", fiscal_year, ["year_start_date", "year_end_date"])
if not fy_start_end_date:
frappe.throw(_("Fiscal Year {0} not found.").format(fiscal_year))
start_date = getdate(fy_start_end_date[0])
end_date = getdate(fy_start_end_date[1])
if periodicity == "Yearly":
period_list = [_dict({"to_date": end_date, "key": fiscal_year, "label": fiscal_year})]
else:
months_to_add = {
"Half-yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
# start with first day, so as to avoid year to_dates like 2-April if ever they occur
to_date = get_first_day(start_date)
for i in xrange(12 / months_to_add):
to_date = add_months(to_date, months_to_add)
if to_date == get_first_day(to_date):
# if to_date is the first day, get the last day of previous month
to_date = add_days(to_date, -1)
else:
# to_date should be the last day of the new to_date's month
to_date = get_last_day(to_date)
if to_date <= end_date:
# the normal case
period_list.append(_dict({ "to_date": to_date }))
# if it ends before a full year
if to_date == end_date:
break
else:
# if a fiscal year ends before a 12 month period
period_list.append(_dict({ "to_date": end_date }))
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
label = formatdate(opts["to_date"], "MMM YYYY")
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": start_date,
"year_end_date": end_date
})
if from_beginning:
# set start date as None for all fiscal periods, used in case of Balance Sheet
opts["from_date"] = None
else:
opts["from_date"] = start_date
return period_list
def get_data(company, root_type, balance_must_be, period_list, ignore_closing_entries=False):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name = filter_accounts(accounts)
gl_entries_by_account = get_gl_entries(company, period_list[0]["from_date"], period_list[-1]["to_date"],
accounts[0].lft, accounts[0].rgt, ignore_closing_entries=ignore_closing_entries)
calculate_values(accounts_by_name, gl_entries_by_account, period_list)
accumulate_values_into_parents(accounts, accounts_by_name, period_list)
out = prepare_data(accounts, balance_must_be, period_list)
if out:
add_total_row(out, balance_must_be, period_list)
return out
def calculate_values(accounts_by_name, gl_entries_by_account, period_list):
for entries in gl_entries_by_account.values():
for entry in entries:
d = accounts_by_name.get(entry.account)
for period in period_list:
# check if posting date is within the period
if entry.posting_date <= period.to_date:
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = accounts_by_name[d.parent_account].get(period.key, 0.0) + \
d.get(period.key, 0.0)
def prepare_data(accounts, balance_must_be, period_list):
out = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
row = {
"account_name": d.account_name,
"account": d.name,
"parent_account": d.parent_account,
"indent": flt(d.indent),
"from_date": year_start_date,
"to_date": year_end_date
}
for period in period_list:
if d.get(period.key):
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= (1 if balance_must_be=="Debit" else -1)
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
if has_value:
out.append(row)
return out
def add_total_row(out, balance_must_be, period_list):
row = {
"account_name": "'" + _("Total ({0})").format(balance_must_be) + "'",
"account": None
}
for period in period_list:
row[period.key] = out[0].get(period.key, 0.0)
out[0][period.key] = ""
out.append(row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
return frappe.db.sql("""select name, parent_account, lft, rgt, root_type, report_type, account_name from `tabAccount`
where company=%s and root_type=%s order by lft""", (company, root_type), as_dict=True)
def filter_accounts(accounts, depth=10):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
children = parent_children_map.get(parent) or []
if parent == None:
sort_root_accounts(children)
for child in children:
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name
def sort_root_accounts(roots):
"""Sort root types as Asset, Liability, Equity, Income, Expense"""
def compare_roots(a, b):
if a.report_type != b.report_type and a.report_type == "Balance Sheet":
return -1
if a.root_type != b.root_type and a.root_type == "Asset":
return -1
if a.root_type == "Liability" and b.root_type == "Equity":
return -1
if a.root_type == "Income" and b.root_type == "Expense":
return -1
return 1
roots.sort(compare_roots)
def get_gl_entries(company, from_date, to_date, root_lft, root_rgt, ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select posting_date, account, debit, credit, is_opening from `tabGL Entry`
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and account in (select name from `tabAccount`
where lft >= %(lft)s and rgt <= %(rgt)s)
order by account, posting_date""".format(additional_conditions="\n".join(additional_conditions)),
{
"company": company,
"from_date": from_date,
"to_date": to_date,
"lft": root_lft,
"rgt": root_rgt
},
as_dict=True)
gl_entries_by_account = {}
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_columns(period_list):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"width": 150
})
return columns
| agpl-3.0 |
StevenBlack/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/urlfetcher_mock.py | 165 | 2053 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def make_fetcher_cls(urls):
"""UrlFetcher factory routine that simulates network access
using a dict of URLs -> contents."""
class MockFetcher(object):
def __init__(self, filesystem):
self._filesystem = filesystem
def fetch(self, url):
return urls[url]
def fetch_into_file(self, url):
f, fn = self._filesystem.open_binary_tempfile('mockfetcher')
f.write(self.fetch(url))
f.close()
return fn
return MockFetcher
| bsd-3-clause |
Gitlab11/odoo | addons/portal_stock/__openerp__.py | 437 | 1600 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Stock',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds access rules to your portal if stock and portal are installed.
==========================================================================================
""",
'author': 'OpenERP SA',
'depends': ['sale_stock','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
marc-sensenich/ansible | lib/ansible/module_utils/vmware.py | 7 | 45357 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import atexit
import os
import re
import ssl
import time
import traceback
from random import randint
REQUESTS_IMP_ERR = None
try:
# requests is required for exception handling of the ConnectionError
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
PYVMOMI_IMP_ERR = None
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
PYVMOMI_IMP_ERR = traceback.format_exc()
HAS_PYVMOMI = False
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
from ansible.module_utils.basic import env_fallback, missing_required_lib
class TaskError(Exception):
def __init__(self, *args, **kwargs):
super(TaskError, self).__init__(*args, **kwargs)
def wait_for_task(task, max_backoff=64, timeout=3600):
"""Wait for given task using exponential back-off algorithm.
Args:
task: VMware task object
max_backoff: Maximum amount of sleep time in seconds
timeout: Timeout for the given task in seconds
Returns: Tuple with True and result for successful task
Raises: TaskError on failure
"""
failure_counter = 0
start_time = time.time()
while True:
if time.time() - start_time >= timeout:
raise TaskError("Timeout")
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
error_msg = task.info.error
host_thumbprint = None
try:
error_msg = error_msg.msg
if hasattr(task.info.error, 'thumbprint'):
host_thumbprint = task.info.error.thumbprint
except AttributeError:
pass
finally:
raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
time.sleep(sleep_time)
failure_counter += 1
def wait_for_vm_ip(content, vm, timeout=300):
facts = dict()
interval = 15
while timeout > 0:
_facts = gather_vm_facts(content, vm)
if _facts['ipv4'] or _facts['ipv6']:
facts = _facts
break
time.sleep(interval)
timeout -= interval
return facts
def find_obj(content, vimtype, name, first=True, folder=None):
container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
# Get all objects matching type (and name if given)
obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)]
container.Destroy()
# Return first match or None
if first:
if obj_list:
return obj_list[0]
return None
# Return all matching objects or empty list
return obj_list
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
if not isinstance(obj_type, list):
obj_type = [obj_type]
objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
for obj in objects:
if obj.name == name:
return obj
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
def find_datacenter_by_name(content, datacenter_name):
return find_object_by_name(content, datacenter_name, [vim.Datacenter])
def get_parent_datacenter(obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
return datacenter
def find_datastore_by_name(content, datastore_name):
return find_object_by_name(content, datastore_name, [vim.Datastore])
def find_dvs_by_name(content, switch_name):
return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch])
def find_hostsystem_by_name(content, hostname):
return find_object_by_name(content, hostname, [vim.HostSystem])
def find_resource_pool_by_name(content, resource_pool_name):
return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
def find_network_by_name(content, network_name):
return find_object_by_name(content, network_name, [vim.Network])
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
try:
moid = thisobj._moId
except AttributeError:
moid = None
if moid in ['group-d1', 'ha-folder-root']:
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'hw_datastores': [],
'hw_files': [],
'hw_esxi_host': None,
'hw_guest_ha_state': None,
'hw_is_template': vm.config.template,
'hw_folder': None,
'hw_version': vm.config.version,
'instance_uuid': vm.config.instanceUuid,
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'guest_question': vm.summary.runtime.question,
'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
'vnc': {},
}
# facts that may or may not exist
if vm.summary.runtime.host:
try:
host = vm.summary.runtime.host
facts['hw_esxi_host'] = host.summary.config.name
except vim.fault.NoPermission:
# User does not have read permission for the host system,
# proceed without this value. This value does not contribute or hamper
# provisioning or power management operations.
pass
if vm.summary.runtime.dasVmProtection:
facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
datastores = vm.datastore
for ds in datastores:
facts['hw_datastores'].append(ds.info.name)
try:
files = vm.config.files
layout = vm.layout
if files:
facts['hw_files'] = [files.vmPathName]
for item in layout.snapshot:
for snap in item.snapshotFile:
if 'vmsn' in snap:
facts['hw_files'].append(snap)
for item in layout.configFile:
facts['hw_files'].append(os.path.join(os.path.dirname(files.vmPathName), item))
for item in vm.layout.logFile:
facts['hw_files'].append(os.path.join(files.logDirectory, item))
for item in vm.layout.disk:
for disk in item.diskFile:
facts['hw_files'].append(disk)
except Exception:
pass
facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
if vm.guest.ipAddress:
if ':' in vm.guest.ipAddress:
facts['ipv6'] = vm.guest.ipAddress
else:
facts['ipv4'] = vm.guest.ipAddress
ethernet_idx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and
hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):
port_group_key = entry.backing.port.portgroupKey
port_key = entry.backing.port.portKey
else:
port_group_key = None
port_key = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
'portgroup_portkey': port_key,
'portgroup_key': port_group_key,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
facts['vnc'] = get_vnc_extraconfig(vm)
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('snapshot',))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
if current_snap_obj:
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
else:
result['current_snapshot'] = dict()
return result
def get_vnc_extraconfig(vm):
result = {}
for opts in vm.config.extraConfig:
for optkeyname in ['enabled', 'ip', 'port', 'password']:
if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
result[optkeyname] = opts.value
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str',
required=False,
fallback=(env_fallback, ['VMWARE_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
required=False,
fallback=(env_fallback, ['VMWARE_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
required=False,
no_log=True,
fallback=(env_fallback, ['VMWARE_PASSWORD'])),
port=dict(type='int',
default=443,
fallback=(env_fallback, ['VMWARE_PORT'])),
validate_certs=dict(type='bool',
required=False,
default=True,
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
port = module.params.get('port', 443)
validate_certs = module.params['validate_certs']
if not hostname:
module.fail_json(msg="Hostname parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
if not username:
module.fail_json(msg="Username parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
if not password:
module.fail_json(msg="Password parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or use validate_certs=false.')
ssl_context = None
if not validate_certs and hasattr(ssl, 'SSLContext'):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
connect_args = dict(
host=hostname,
user=username,
pwd=password,
port=port,
)
if ssl_context:
connect_args.update(sslContext=ssl_context)
service_instance = connect.SmartConnect(**connect_args)
except vim.fault.InvalidLogin as invalid_login:
module.fail_json(msg="Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (hostname, port, username, invalid_login.msg))
except vim.fault.NoPermission as no_permission:
module.fail_json(msg="User %s does not have required permission"
" to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
except vmodl.fault.InvalidRequest as invalid_request:
# Request is malformed
module.fail_json(msg="Failed to get a response from server %s:%s as "
"request is malformed: %s" % (hostname, port, invalid_request.msg))
except Exception as generic_exc:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (hostname, port, generic_exc))
if service_instance is None:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port))
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
data[x] = to_text(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = to_text(xo) + ':' + to_text(xo.name)
elif isinstance(xo, vim.vm.ProfileSpec):
pass
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
if issubclass(xt, integer_types):
data[x] = int(xo)
else:
data[x] = to_text(xo)
elif issubclass(xt, bool):
data[x] = xo
elif issubclass(xt, dict):
data[to_text(x)] = {}
for k, v in xo.items():
k = to_text(k)
data[x][k] = serialize_spec(v)
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
def set_vm_power_state(content, vm, state, force, timeout=0):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = gather_vm_facts(content, vm)
expected_state = state.replace('_', '').replace('-', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon':
if vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
if timeout > 0:
result.update(wait_for_poweroff(vm, timeout))
else:
task = vm.RebootGuest()
# Set result['changed'] immediately because
# shutdown and reboot return None.
result['changed'] = True
else:
result['failed'] = True
result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
else:
result['failed'] = True
result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
else:
result['failed'] = True
result['msg'] = "Unsupported expected state provided: %s" % expected_state
except Exception as e:
result['failed'] = True
result['msg'] = to_text(e)
if task:
wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
# need to get new metadata if changed
result['instance'] = gather_vm_facts(content, vm)
return result
def wait_for_poweroff(vm, timeout=300):
result = dict()
interval = 15
while timeout > 0:
if vm.runtime.powerState.lower() == 'poweredoff':
break
time.sleep(interval)
timeout -= interval
else:
result['failed'] = True
result['msg'] = 'Timeout while waiting for VM power off.'
return result
class PyVmomi(object):
def __init__(self, module):
"""
Constructor
"""
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib('requests'),
exception=REQUESTS_IMP_ERR)
if not HAS_PYVMOMI:
module.fail_json(msg=missing_required_lib('PyVmomi'),
exception=PYVMOMI_IMP_ERR)
self.module = module
self.params = module.params
self.si = None
self.current_vm_obj = None
self.content = connect_to_api(self.module)
def is_vcenter(self):
"""
Check if given hostname is vCenter or ESXi host
Returns: True if given connection is with vCenter server
False if given connection is with ESXi server
"""
api_type = None
try:
api_type = self.content.about.apiType
except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
def get_managed_objects_properties(self, vim_type, properties=None):
"""
Function to look up a Managed Object Reference in vCenter / ESXi Environment
:param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
:param properties: List of properties related to vim object e.g. Name
:return: local content object
"""
# Get Root Folder
root_folder = self.content.rootFolder
if properties is None:
properties = ['name']
# Create Container View with default root folder
mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
# Create Traversal spec
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name="traversal_spec",
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create Property Spec
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=vim_type, # Type of object to retrieved
all=False,
pathSet=properties
)
# Create Object Spec
object_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=mor,
skip=True,
selectSet=[traversal_spec]
)
# Create Filter Spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[object_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
return self.content.propertyCollector.RetrieveContents([filter_spec])
# Virtual Machine related functions
def get_vm(self):
"""
Function to find unique virtual machine either by UUID or Name.
Returns: virtual machine object if found, else None.
"""
vm_obj = None
user_desired_path = None
if self.params['uuid']:
vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
elif self.params['name']:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
vms = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == self.params['name']:
vms.append(temp_vm_object.obj)
break
# get_managed_objects_properties may return multiple virtual machine,
# following code tries to find user desired one depending upon the folder specified.
if len(vms) > 1:
# We have found multiple virtual machines, decide depending upon folder value
if self.params['folder'] is None:
self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
"Folder value is a required parameter to find uniqueness "
"of the virtual machine" % self.params['name'],
details="Please see documentation of the vmware_guest module "
"for folder parameter.")
# Get folder path where virtual machine is located
# User provided folder where user thinks virtual machine is present
user_folder = self.params['folder']
# User defined datacenter
user_defined_dc = self.params['datacenter']
# User defined datacenter's object
datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
# Get Path for Datacenter
dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
# Nested folder does not return trailing /
if not dcpath.endswith('/'):
dcpath += '/'
if user_folder in [None, '', '/']:
# User provided blank value or
# User provided only root value, we fail
self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
"name [%s], please specify folder path other than blank "
"or '/'" % self.params['name'])
elif user_folder.startswith('/vm/'):
# User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
else:
# User defined datacenter is not nested i.e. dcpath = '/' , or
# User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
# User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
# User defined folder starts with datacenter without '/' i.e.
# folder = DC0/vm/india/finance or
# folder = DC0/vm
user_desired_path = user_folder
for vm in vms:
# Check if user has provided same path as virtual machine
actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
continue
if user_desired_path in actual_vm_folder_path:
vm_obj = vm
break
elif vms:
# Unique virtual machine found.
vm_obj = vms[0]
if vm_obj:
self.current_vm_obj = vm_obj
return vm_obj
def gather_facts(self, vm):
"""
Function to gather facts of virtual machine.
Args:
vm: Name of virtual machine.
Returns: Facts dictionary of the given virtual machine.
"""
return gather_vm_facts(self.content, vm)
@staticmethod
def get_vm_path(content, vm_name):
"""
Function to find the path of virtual machine.
Args:
content: VMware content object
vm_name: virtual machine managed object
Returns: Folder of virtual machine if exists, else None
"""
folder_name = None
folder = vm_name.parent
if folder:
folder_name = folder.name
fp = folder.parent
# climb back up the tree to find our path, stop before the root folder
while fp is not None and fp.name is not None and fp != content.rootFolder:
folder_name = fp.name + '/' + folder_name
try:
fp = fp.parent
except Exception:
break
folder_name = '/' + folder_name
return folder_name
def get_vm_or_template(self, template_name=None):
"""
Find the virtual machine or virtual machine template using name
used for cloning purpose.
Args:
template_name: Name of virtual machine or virtual machine template
Returns: virtual machine or virtual machine template object
"""
template_obj = None
if not template_name:
return template_obj
if "/" in template_name:
vm_obj_path = os.path.dirname(template_name)
vm_obj_name = os.path.basename(template_name)
template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
if template_obj:
return template_obj
else:
template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
if template_obj:
return template_obj
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
templates = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == template_name:
templates.append(temp_vm_object.obj)
break
if len(templates) > 1:
# We have found multiple virtual machine templates
self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
elif templates:
template_obj = templates[0]
return template_obj
# Cluster related functions
def find_cluster_by_name(self, cluster_name, datacenter_name=None):
"""
Find Cluster by name in given datacenter
Args:
cluster_name: Name of cluster name to find
datacenter_name: (optional) Name of datacenter
Returns: True if found
"""
return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
def get_all_hosts_by_cluster(self, cluster_name):
"""
Get all hosts from cluster by cluster name
Args:
cluster_name: Name of cluster
Returns: List of hosts
"""
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
return [host for host in cluster_obj.host]
else:
return []
# Hosts related functions
def find_hostsystem_by_name(self, host_name):
"""
Find Host by name
Args:
host_name: Name of ESXi host
Returns: True if found
"""
return find_hostsystem_by_name(self.content, hostname=host_name)
def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
"""
Function to get all host system managed object
Args:
cluster_name: Name of Cluster
esxi_host_name: Name of ESXi server
Returns: A list of all host system managed objects, else empty list
"""
host_obj_list = []
if not self.is_vcenter():
hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
if hosts:
host_obj_list.append(list(hosts)[0])
else:
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
host_obj_list = [host for host in cluster_obj.host]
else:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
elif esxi_host_name:
if isinstance(esxi_host_name, str):
esxi_host_name = [esxi_host_name]
for host in esxi_host_name:
esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
if esxi_host_obj:
host_obj_list = [esxi_host_obj]
else:
self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
return host_obj_list
# Network related functions
@staticmethod
def find_host_portgroup_by_name(host, portgroup_name):
"""
Find Portgroup on given host
Args:
host: Host config object
portgroup_name: Name of portgroup
Returns: True if found else False
"""
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return False
def get_all_port_groups_by_host(self, host_system):
"""
Function to get all Port Group by host
Args:
host_system: Name of Host System
Returns: List of Port Group Spec
"""
pgs_list = []
for pg in host_system.config.network.portgroup:
pgs_list.append(pg)
return pgs_list
# Datacenter
def find_datacenter_by_name(self, datacenter_name):
"""
Function to get datacenter managed object by name
Args:
datacenter_name: Name of datacenter
Returns: datacenter managed object if found else None
"""
return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
def find_datastore_by_name(self, datastore_name):
"""
Function to get datastore managed object by name
Args:
datastore_name: Name of datastore
Returns: datastore managed object if found else None
"""
return find_datastore_by_name(self.content, datastore_name=datastore_name)
# Datastore cluster
def find_datastore_cluster_by_name(self, datastore_cluster_name):
"""
Function to get datastore cluster managed object by name
Args:
datastore_cluster_name: Name of datastore cluster
Returns: Datastore cluster managed object if found else None
"""
data_store_clusters = get_all_objs(self.content, [vim.StoragePod])
for dsc in data_store_clusters:
if dsc.name == datastore_cluster_name:
return dsc
return None
# VMDK stuff
def vmdk_disk_path_split(self, vmdk_path):
"""
Takes a string in the format
[datastore_name] path/to/vm_name.vmdk
Returns a tuple with multiple strings:
1. datastore_name: The name of the datastore (without brackets)
2. vmdk_fullpath: The "path/to/vm_name.vmdk" portion
3. vmdk_filename: The "vm_name.vmdk" portion of the string (os.path.basename equivalent)
4. vmdk_folder: The "path/to/" portion of the string (os.path.dirname equivalent)
"""
try:
datastore_name = re.match(r'^\[(.*?)\]', vmdk_path, re.DOTALL).groups()[0]
vmdk_fullpath = re.match(r'\[.*?\] (.*)$', vmdk_path).groups()[0]
vmdk_filename = os.path.basename(vmdk_fullpath)
vmdk_folder = os.path.dirname(vmdk_fullpath)
return datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder
except (IndexError, AttributeError) as e:
self.module.fail_json(msg="Bad path '%s' for filename disk vmdk image: %s" % (vmdk_path, to_native(e)))
def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder):
"""
Return vSphere file object or fail_json
Args:
datastore_obj: Managed object of datastore
vmdk_fullpath: Path of VMDK file e.g., path/to/vm/vmdk_filename.vmdk
vmdk_filename: Name of vmdk e.g., VM0001_1.vmdk
vmdk_folder: Base dir of VMDK e.g, path/to/vm
"""
browser = datastore_obj.browser
datastore_name = datastore_obj.name
datastore_name_sq = "[" + datastore_name + "]"
if browser is None:
self.module.fail_json(msg="Unable to access browser for datastore %s" % datastore_name)
detail_query = vim.host.DatastoreBrowser.FileInfo.Details(
fileOwner=True,
fileSize=True,
fileType=True,
modification=True
)
search_spec = vim.host.DatastoreBrowser.SearchSpec(
details=detail_query,
matchPattern=[vmdk_filename],
searchCaseInsensitive=True,
)
search_res = browser.SearchSubFolders(
datastorePath=datastore_name_sq,
searchSpec=search_spec
)
changed = False
vmdk_path = datastore_name_sq + " " + vmdk_fullpath
try:
changed, result = wait_for_task(search_res)
except TaskError as task_e:
self.module.fail_json(msg=to_native(task_e))
if not changed:
self.module.fail_json(msg="No valid disk vmdk image found for path %s" % vmdk_path)
target_folder_path = datastore_name_sq + " " + vmdk_folder + '/'
for file_result in search_res.info.result:
for f in getattr(file_result, 'file'):
if f.path == vmdk_filename and file_result.folderPath == target_folder_path:
return f
self.module.fail_json(msg="No vmdk file found for path specified [%s]" % vmdk_path)
| gpl-3.0 |
ylcolala/ShadowsocksFork | shadowsocks/encrypt.py | 990 | 5180 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
| apache-2.0 |
mongodb/mongo-python-driver | test/test_legacy_api.py | 2 | 38815 | # Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test various legacy / deprecated API features."""
import sys
sys.path[0:0] = [""]
from bson.son import SON
from pymongo import ASCENDING, GEOHAYSTACK
from pymongo.common import partition_node
from pymongo.errors import (BulkWriteError,
ConfigurationError,
InvalidDocument,
InvalidOperation,
OperationFailure)
from pymongo.operations import IndexModel
from test import client_context, unittest, SkipTest
from test.test_client import IntegrationTest
from test.test_bulk import BulkTestBase, BulkAuthorizationTestBase
from test.utils import (DeprecationFilter,
oid_generated_on_process,
rs_or_single_client_noauth,
single_client,
wait_until)
class TestDeprecations(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestDeprecations, cls).setUpClass()
cls.deprecation_filter = DeprecationFilter("error")
@classmethod
def tearDownClass(cls):
cls.deprecation_filter.stop()
def test_geoHaystack_deprecation(self):
self.addCleanup(self.db.test.drop)
keys = [("pos", GEOHAYSTACK), ("type", ASCENDING)]
self.assertRaises(
DeprecationWarning, self.db.test.create_index, keys, bucketSize=1)
indexes = [IndexModel(keys, bucketSize=1)]
self.assertRaises(
DeprecationWarning, self.db.test.create_indexes, indexes)
class TestLegacy(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestLegacy, cls).setUpClass()
cls.w = client_context.w
cls.deprecation_filter = DeprecationFilter()
@classmethod
def tearDownClass(cls):
cls.deprecation_filter.stop()
class TestLegacyBulk(BulkTestBase):
@classmethod
def setUpClass(cls):
super(TestLegacyBulk, cls).setUpClass()
cls.deprecation_filter = DeprecationFilter()
@classmethod
def tearDownClass(cls):
cls.deprecation_filter.stop()
def test_empty(self):
bulk = self.coll.initialize_ordered_bulk_op()
self.assertRaises(InvalidOperation, bulk.execute)
def test_find(self):
# find() requires a selector.
bulk = self.coll.initialize_ordered_bulk_op()
self.assertRaises(TypeError, bulk.find)
self.assertRaises(TypeError, bulk.find, 'foo')
# No error.
bulk.find({})
def test_insert(self):
expected = {
'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 1,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []
}
bulk = self.coll.initialize_ordered_bulk_op()
self.assertRaises(TypeError, bulk.insert, 1)
# find() before insert() is prohibited.
self.assertRaises(AttributeError, lambda: bulk.find({}).insert({}))
# We don't allow multiple documents per call.
self.assertRaises(TypeError, bulk.insert, [{}, {}])
self.assertRaises(TypeError, bulk.insert, ({} for _ in range(2)))
bulk.insert({})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(1, self.coll.count())
doc = self.coll.find_one()
self.assertTrue(oid_generated_on_process(doc['_id']))
bulk = self.coll.initialize_unordered_bulk_op()
bulk.insert({})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(2, self.coll.count())
def test_update(self):
expected = {
'nMatched': 2,
'nModified': 2,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []
}
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_ordered_bulk_op()
# update() requires find() first.
self.assertRaises(
AttributeError,
lambda: bulk.update({'$set': {'x': 1}}))
self.assertRaises(TypeError, bulk.find({}).update, 1)
self.assertRaises(ValueError, bulk.find({}).update, {})
# All fields must be $-operators.
self.assertRaises(ValueError, bulk.find({}).update, {'foo': 'bar'})
bulk.find({}).update({'$set': {'foo': 'bar'}})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 2)
# All fields must be $-operators -- validated server-side.
bulk = self.coll.initialize_ordered_bulk_op()
updates = SON([('$set', {'x': 1}), ('y', 1)])
bulk.find({}).update(updates)
self.assertRaises(BulkWriteError, bulk.execute)
self.coll.delete_many({})
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({}).update({'$set': {'bim': 'baz'}})
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 2,
'nModified': 2,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 2)
self.coll.insert_one({'x': 1})
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({'x': 1}).update({'$set': {'x': 42}})
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 1,
'nModified': 1,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
self.assertEqual(1, self.coll.find({'x': 42}).count())
# Second time, x is already 42 so nModified is 0.
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({'x': 42}).update({'$set': {'x': 42}})
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 1,
'nModified': 0,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
def test_update_one(self):
expected = {
'nMatched': 1,
'nModified': 1,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []
}
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_ordered_bulk_op()
# update_one() requires find() first.
self.assertRaises(
AttributeError,
lambda: bulk.update_one({'$set': {'x': 1}}))
self.assertRaises(TypeError, bulk.find({}).update_one, 1)
self.assertRaises(ValueError, bulk.find({}).update_one, {})
self.assertRaises(ValueError, bulk.find({}).update_one, {'foo': 'bar'})
bulk.find({}).update_one({'$set': {'foo': 'bar'}})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1)
self.coll.delete_many({})
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({}).update_one({'$set': {'bim': 'baz'}})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1)
# All fields must be $-operators -- validated server-side.
bulk = self.coll.initialize_ordered_bulk_op()
updates = SON([('$set', {'x': 1}), ('y', 1)])
bulk.find({}).update_one(updates)
self.assertRaises(BulkWriteError, bulk.execute)
def test_replace_one(self):
expected = {
'nMatched': 1,
'nModified': 1,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []
}
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_ordered_bulk_op()
self.assertRaises(TypeError, bulk.find({}).replace_one, 1)
self.assertRaises(ValueError,
bulk.find({}).replace_one, {'$set': {'foo': 'bar'}})
bulk.find({}).replace_one({'foo': 'bar'})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1)
self.coll.delete_many({})
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({}).replace_one({'bim': 'baz'})
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1)
def test_remove(self):
# Test removing all documents, ordered.
expected = {
'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 2,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []
}
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_ordered_bulk_op()
# remove() must be preceded by find().
self.assertRaises(AttributeError, lambda: bulk.remove())
bulk.find({}).remove()
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.count(), 0)
# Test removing some documents, ordered.
self.coll.insert_many([{}, {'x': 1}, {}, {'x': 1}])
bulk = self.coll.initialize_ordered_bulk_op()
bulk.find({'x': 1}).remove()
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 2,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
self.assertEqual(self.coll.count(), 2)
self.coll.delete_many({})
# Test removing all documents, unordered.
self.coll.insert_many([{}, {}])
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({}).remove()
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 2,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
# Test removing some documents, unordered.
self.assertEqual(self.coll.count(), 0)
self.coll.insert_many([{}, {'x': 1}, {}, {'x': 1}])
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({'x': 1}).remove()
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 2,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
self.assertEqual(self.coll.count(), 2)
self.coll.delete_many({})
def test_remove_one(self):
bulk = self.coll.initialize_ordered_bulk_op()
# remove_one() must be preceded by find().
self.assertRaises(AttributeError, lambda: bulk.remove_one())
# Test removing one document, empty selector.
# First ordered, then unordered.
self.coll.insert_many([{}, {}])
expected = {
'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 1,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []
}
bulk.find({}).remove_one()
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.count(), 1)
self.coll.insert_one({})
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({}).remove_one()
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual(self.coll.count(), 1)
# Test removing one document, with a selector.
# First ordered, then unordered.
self.coll.insert_one({'x': 1})
bulk = self.coll.initialize_ordered_bulk_op()
bulk.find({'x': 1}).remove_one()
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual([{}], list(self.coll.find({}, {'_id': False})))
self.coll.insert_one({'x': 1})
bulk = self.coll.initialize_unordered_bulk_op()
bulk.find({'x': 1}).remove_one()
result = bulk.execute()
self.assertEqualResponse(expected, result)
self.assertEqual([{}], list(self.coll.find({}, {'_id': False})))
def test_upsert(self):
bulk = self.coll.initialize_ordered_bulk_op()
# upsert() requires find() first.
self.assertRaises(
AttributeError,
lambda: bulk.upsert())
expected = {
'nMatched': 0,
'nModified': 0,
'nUpserted': 1,
'nInserted': 0,
'nRemoved': 0,
'upserted': [{'index': 0, '_id': '...'}]
}
bulk.find({}).upsert().replace_one({'foo': 'bar'})
result = bulk.execute()
self.assertEqualResponse(expected, result)
bulk = self.coll.initialize_ordered_bulk_op()
bulk.find({}).upsert().update_one({'$set': {'bim': 'baz'}})
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 1,
'nModified': 1,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1)
bulk = self.coll.initialize_ordered_bulk_op()
bulk.find({}).upsert().update({'$set': {'bim': 'bop'}})
# Non-upsert, no matches.
bulk.find({'x': 1}).update({'$set': {'x': 2}})
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 1,
'nModified': 1,
'nUpserted': 0,
'nInserted': 0,
'nRemoved': 0,
'upserted': [],
'writeErrors': [],
'writeConcernErrors': []},
result)
self.assertEqual(self.coll.find({'bim': 'bop'}).count(), 1)
self.assertEqual(self.coll.find({'x': 2}).count(), 0)
def test_upsert_large(self):
big = 'a' * (client_context.client.max_bson_size - 37)
bulk = self.coll.initialize_ordered_bulk_op()
bulk.find({'x': 1}).upsert().update({'$set': {'s': big}})
result = bulk.execute()
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 1,
'nInserted': 0,
'nRemoved': 0,
'upserted': [{'index': 0, '_id': '...'}]},
result)
self.assertEqual(1, self.coll.find({'x': 1}).count())
def test_client_generated_upsert_id(self):
batch = self.coll.initialize_ordered_bulk_op()
batch.find({'_id': 0}).upsert().update_one({'$set': {'a': 0}})
batch.find({'a': 1}).upsert().replace_one({'_id': 1})
# This is just here to make the counts right in all cases.
batch.find({'_id': 2}).upsert().replace_one({'_id': 2})
result = batch.execute()
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 3,
'nInserted': 0,
'nRemoved': 0,
'upserted': [{'index': 0, '_id': 0},
{'index': 1, '_id': 1},
{'index': 2, '_id': 2}]},
result)
def test_single_ordered_batch(self):
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'a': 1})
batch.find({'a': 1}).update_one({'$set': {'b': 1}})
batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}})
batch.insert({'a': 3})
batch.find({'a': 3}).remove()
result = batch.execute()
self.assertEqualResponse(
{'nMatched': 1,
'nModified': 1,
'nUpserted': 1,
'nInserted': 2,
'nRemoved': 1,
'upserted': [{'index': 2, '_id': '...'}]},
result)
def test_single_error_ordered_batch(self):
self.coll.create_index('a', unique=True)
self.addCleanup(self.coll.drop_index, [('a', 1)])
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'b': 1, 'a': 1})
batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
batch.insert({'b': 3, 'a': 2})
try:
batch.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 1,
'nRemoved': 0,
'upserted': [],
'writeConcernErrors': [],
'writeErrors': [
{'index': 1,
'code': 11000,
'errmsg': '...',
'op': {'q': {'b': 2},
'u': {'$set': {'a': 1}},
'multi': False,
'upsert': True}}]},
result)
def test_multiple_error_ordered_batch(self):
self.coll.create_index('a', unique=True)
self.addCleanup(self.coll.drop_index, [('a', 1)])
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'b': 1, 'a': 1})
batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
batch.find({'b': 3}).upsert().update_one({'$set': {'a': 2}})
batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
batch.insert({'b': 4, 'a': 3})
batch.insert({'b': 5, 'a': 1})
try:
batch.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 1,
'nRemoved': 0,
'upserted': [],
'writeConcernErrors': [],
'writeErrors': [
{'index': 1,
'code': 11000,
'errmsg': '...',
'op': {'q': {'b': 2},
'u': {'$set': {'a': 1}},
'multi': False,
'upsert': True}}]},
result)
def test_single_unordered_batch(self):
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'a': 1})
batch.find({'a': 1}).update_one({'$set': {'b': 1}})
batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}})
batch.insert({'a': 3})
batch.find({'a': 3}).remove()
result = batch.execute()
self.assertEqualResponse(
{'nMatched': 1,
'nModified': 1,
'nUpserted': 1,
'nInserted': 2,
'nRemoved': 1,
'upserted': [{'index': 2, '_id': '...'}],
'writeErrors': [],
'writeConcernErrors': []},
result)
def test_single_error_unordered_batch(self):
self.coll.create_index('a', unique=True)
self.addCleanup(self.coll.drop_index, [('a', 1)])
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'b': 1, 'a': 1})
batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
batch.insert({'b': 3, 'a': 2})
try:
batch.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 2,
'nRemoved': 0,
'upserted': [],
'writeConcernErrors': [],
'writeErrors': [
{'index': 1,
'code': 11000,
'errmsg': '...',
'op': {'q': {'b': 2},
'u': {'$set': {'a': 1}},
'multi': False,
'upsert': True}}]},
result)
def test_multiple_error_unordered_batch(self):
self.coll.create_index('a', unique=True)
self.addCleanup(self.coll.drop_index, [('a', 1)])
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'b': 1, 'a': 1})
batch.find({'b': 2}).upsert().update_one({'$set': {'a': 3}})
batch.find({'b': 3}).upsert().update_one({'$set': {'a': 4}})
batch.find({'b': 4}).upsert().update_one({'$set': {'a': 3}})
batch.insert({'b': 5, 'a': 2})
batch.insert({'b': 6, 'a': 1})
try:
batch.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
# Assume the update at index 1 runs before the update at index 3,
# although the spec does not require it. Same for inserts.
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 2,
'nInserted': 2,
'nRemoved': 0,
'upserted': [
{'index': 1, '_id': '...'},
{'index': 2, '_id': '...'}],
'writeConcernErrors': [],
'writeErrors': [
{'index': 3,
'code': 11000,
'errmsg': '...',
'op': {'q': {'b': 4},
'u': {'$set': {'a': 3}},
'multi': False,
'upsert': True}},
{'index': 5,
'code': 11000,
'errmsg': '...',
'op': {'_id': '...', 'b': 6, 'a': 1}}]},
result)
@client_context.require_version_max(4, 8) # PYTHON-2436
def test_large_inserts_ordered(self):
big = 'x' * self.coll.database.client.max_bson_size
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'b': 1, 'a': 1})
batch.insert({'big': big})
batch.insert({'b': 2, 'a': 2})
try:
batch.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqual(1, result['nInserted'])
self.coll.delete_many({})
big = 'x' * (1024 * 1024 * 4)
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'a': 1, 'big': big})
batch.insert({'a': 2, 'big': big})
batch.insert({'a': 3, 'big': big})
batch.insert({'a': 4, 'big': big})
batch.insert({'a': 5, 'big': big})
batch.insert({'a': 6, 'big': big})
result = batch.execute()
self.assertEqual(6, result['nInserted'])
self.assertEqual(6, self.coll.count())
def test_large_inserts_unordered(self):
big = 'x' * self.coll.database.client.max_bson_size
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'b': 1, 'a': 1})
batch.insert({'big': big})
batch.insert({'b': 2, 'a': 2})
try:
batch.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqual(2, result['nInserted'])
self.coll.delete_many({})
big = 'x' * (1024 * 1024 * 4)
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'a': 1, 'big': big})
batch.insert({'a': 2, 'big': big})
batch.insert({'a': 3, 'big': big})
batch.insert({'a': 4, 'big': big})
batch.insert({'a': 5, 'big': big})
batch.insert({'a': 6, 'big': big})
result = batch.execute()
self.assertEqual(6, result['nInserted'])
self.assertEqual(6, self.coll.count())
def test_numerous_inserts(self):
# Ensure we don't exceed server's 1000-document batch size limit.
n_docs = 2100
batch = self.coll.initialize_unordered_bulk_op()
for _ in range(n_docs):
batch.insert({})
result = batch.execute()
self.assertEqual(n_docs, result['nInserted'])
self.assertEqual(n_docs, self.coll.count())
# Same with ordered bulk.
self.coll.delete_many({})
batch = self.coll.initialize_ordered_bulk_op()
for _ in range(n_docs):
batch.insert({})
result = batch.execute()
self.assertEqual(n_docs, result['nInserted'])
self.assertEqual(n_docs, self.coll.count())
def test_multiple_execution(self):
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({})
batch.execute()
self.assertRaises(InvalidOperation, batch.execute)
def test_generator_insert(self):
def gen():
yield {'a': 1, 'b': 1}
yield {'a': 1, 'b': 2}
yield {'a': 2, 'b': 3}
yield {'a': 3, 'b': 5}
yield {'a': 5, 'b': 8}
result = self.coll.insert_many(gen())
self.assertEqual(5, len(result.inserted_ids))
class TestLegacyBulkNoResults(BulkTestBase):
@classmethod
def setUpClass(cls):
super(TestLegacyBulkNoResults, cls).setUpClass()
cls.deprecation_filter = DeprecationFilter()
@classmethod
def tearDownClass(cls):
cls.deprecation_filter.stop()
def tearDown(self):
self.coll.delete_many({})
def test_no_results_ordered_success(self):
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'_id': 1})
batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}})
batch.insert({'_id': 2})
batch.find({'_id': 1}).remove_one()
self.assertTrue(batch.execute({'w': 0}) is None)
wait_until(lambda: 2 == self.coll.count(),
'insert 2 documents')
wait_until(lambda: self.coll.find_one({'_id': 1}) is None,
'removed {"_id": 1}')
def test_no_results_ordered_failure(self):
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'_id': 1})
batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}})
batch.insert({'_id': 2})
# Fails with duplicate key error.
batch.insert({'_id': 1})
# Should not be executed since the batch is ordered.
batch.find({'_id': 1}).remove_one()
self.assertTrue(batch.execute({'w': 0}) is None)
wait_until(lambda: 3 == self.coll.count(),
'insert 3 documents')
self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1}))
def test_no_results_unordered_success(self):
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'_id': 1})
batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}})
batch.insert({'_id': 2})
batch.find({'_id': 1}).remove_one()
self.assertTrue(batch.execute({'w': 0}) is None)
wait_until(lambda: 2 == self.coll.count(),
'insert 2 documents')
wait_until(lambda: self.coll.find_one({'_id': 1}) is None,
'removed {"_id": 1}')
def test_no_results_unordered_failure(self):
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'_id': 1})
batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}})
batch.insert({'_id': 2})
# Fails with duplicate key error.
batch.insert({'_id': 1})
# Should be executed since the batch is unordered.
batch.find({'_id': 1}).remove_one()
self.assertTrue(batch.execute({'w': 0}) is None)
wait_until(lambda: 2 == self.coll.count(),
'insert 2 documents')
wait_until(lambda: self.coll.find_one({'_id': 1}) is None,
'removed {"_id": 1}')
class TestLegacyBulkWriteConcern(BulkTestBase):
@classmethod
def setUpClass(cls):
super(TestLegacyBulkWriteConcern, cls).setUpClass()
cls.w = client_context.w
cls.secondary = None
if cls.w > 1:
for member in client_context.ismaster['hosts']:
if member != client_context.ismaster['primary']:
cls.secondary = single_client(*partition_node(member))
break
# We tested wtimeout errors by specifying a write concern greater than
# the number of members, but in MongoDB 2.7.8+ this causes a different
# sort of error, "Not enough data-bearing nodes". In recent servers we
# use a failpoint to pause replication on a secondary.
cls.need_replication_stopped = client_context.version.at_least(2, 7, 8)
cls.deprecation_filter = DeprecationFilter()
@classmethod
def tearDownClass(cls):
cls.deprecation_filter.stop()
if cls.secondary:
cls.secondary.close()
def cause_wtimeout(self, batch):
if self.need_replication_stopped:
if not client_context.test_commands_enabled:
raise SkipTest("Test commands must be enabled.")
self.secondary.admin.command('configureFailPoint',
'rsSyncApplyStop',
mode='alwaysOn')
try:
return batch.execute({'w': self.w, 'wtimeout': 1})
finally:
self.secondary.admin.command('configureFailPoint',
'rsSyncApplyStop',
mode='off')
else:
return batch.execute({'w': self.w + 1, 'wtimeout': 1})
def test_fsync_and_j(self):
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'a': 1})
self.assertRaises(
ConfigurationError,
batch.execute, {'fsync': True, 'j': True})
@client_context.require_replica_set
def test_write_concern_failure_ordered(self):
# Ensure we don't raise on wnote.
batch = self.coll.initialize_ordered_bulk_op()
batch.find({"something": "that does no exist"}).remove()
self.assertTrue(batch.execute({"w": self.w}))
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'a': 1})
batch.insert({'a': 2})
# Replication wtimeout is a 'soft' error.
# It shouldn't stop batch processing.
try:
self.cause_wtimeout(batch)
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 0,
'nInserted': 2,
'nRemoved': 0,
'upserted': [],
'writeErrors': []},
result)
# When talking to legacy servers there will be a
# write concern error for each operation.
self.assertTrue(len(result['writeConcernErrors']) > 0)
failed = result['writeConcernErrors'][0]
self.assertEqual(64, failed['code'])
self.assertTrue(isinstance(failed['errmsg'], str))
self.coll.delete_many({})
self.coll.create_index('a', unique=True)
self.addCleanup(self.coll.drop_index, [('a', 1)])
# Fail due to write concern support as well
# as duplicate key error on ordered batch.
batch = self.coll.initialize_ordered_bulk_op()
batch.insert({'a': 1})
batch.find({'a': 3}).upsert().replace_one({'b': 1})
batch.insert({'a': 1})
batch.insert({'a': 2})
try:
self.cause_wtimeout(batch)
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqualResponse(
{'nMatched': 0,
'nModified': 0,
'nUpserted': 1,
'nInserted': 1,
'nRemoved': 0,
'upserted': [{'index': 1, '_id': '...'}],
'writeErrors': [
{'index': 2,
'code': 11000,
'errmsg': '...',
'op': {'_id': '...', 'a': 1}}]},
result)
self.assertTrue(len(result['writeConcernErrors']) > 1)
failed = result['writeErrors'][0]
self.assertTrue("duplicate" in failed['errmsg'])
@client_context.require_replica_set
def test_write_concern_failure_unordered(self):
# Ensure we don't raise on wnote.
batch = self.coll.initialize_unordered_bulk_op()
batch.find({"something": "that does no exist"}).remove()
self.assertTrue(batch.execute({"w": self.w}))
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'a': 1})
batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, 'b': 1}})
batch.insert({'a': 2})
# Replication wtimeout is a 'soft' error.
# It shouldn't stop batch processing.
try:
self.cause_wtimeout(batch)
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqual(2, result['nInserted'])
self.assertEqual(1, result['nUpserted'])
self.assertEqual(0, len(result['writeErrors']))
# When talking to legacy servers there will be a
# write concern error for each operation.
self.assertTrue(len(result['writeConcernErrors']) > 1)
self.coll.delete_many({})
self.coll.create_index('a', unique=True)
self.addCleanup(self.coll.drop_index, [('a', 1)])
# Fail due to write concern support as well
# as duplicate key error on unordered batch.
batch = self.coll.initialize_unordered_bulk_op()
batch.insert({'a': 1})
batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3,
'b': 1}})
batch.insert({'a': 1})
batch.insert({'a': 2})
try:
self.cause_wtimeout(batch)
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqual(2, result['nInserted'])
self.assertEqual(1, result['nUpserted'])
self.assertEqual(1, len(result['writeErrors']))
# When talking to legacy servers there will be a
# write concern error for each operation.
self.assertTrue(len(result['writeConcernErrors']) > 1)
failed = result['writeErrors'][0]
self.assertEqual(2, failed['index'])
self.assertEqual(11000, failed['code'])
self.assertTrue(isinstance(failed['errmsg'], str))
self.assertEqual(1, failed['op']['a'])
failed = result['writeConcernErrors'][0]
self.assertEqual(64, failed['code'])
self.assertTrue(isinstance(failed['errmsg'], str))
upserts = result['upserted']
self.assertEqual(1, len(upserts))
self.assertEqual(1, upserts[0]['index'])
self.assertTrue(upserts[0].get('_id'))
class TestLegacyBulkAuthorization(BulkAuthorizationTestBase):
@classmethod
def setUpClass(cls):
super(TestLegacyBulkAuthorization, cls).setUpClass()
cls.deprecation_filter = DeprecationFilter()
@classmethod
def tearDownClass(cls):
cls.deprecation_filter.stop()
def test_readonly(self):
# We test that an authorization failure aborts the batch and is raised
# as OperationFailure.
cli = rs_or_single_client_noauth(
username='readonly', password='pw', authSource='pymongo_test')
coll = cli.pymongo_test.test
bulk = coll.initialize_ordered_bulk_op()
bulk.insert({'x': 1})
self.assertRaises(OperationFailure, bulk.execute)
def test_no_remove(self):
# We test that an authorization failure aborts the batch and is raised
# as OperationFailure.
cli = rs_or_single_client_noauth(
username='noremove', password='pw', authSource='pymongo_test')
coll = cli.pymongo_test.test
bulk = coll.initialize_ordered_bulk_op()
bulk.insert({'x': 1})
bulk.find({'x': 2}).upsert().replace_one({'x': 2})
bulk.find({}).remove() # Prohibited.
bulk.insert({'x': 3}) # Never attempted.
self.assertRaises(OperationFailure, bulk.execute)
self.assertEqual(set([1, 2]), set(self.coll.distinct('x')))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
KerkhoffTechnologies/shinken | shinken/objects/notificationway.py | 17 | 10841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from item import Item, Items
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
from shinken.log import logger
_special_properties = ('service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period')
class NotificationWay(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'notificationway'
properties = Item.properties.copy()
properties.update({
'notificationway_name':
StringProp(fill_brok=['full_status']),
'host_notifications_enabled':
BoolProp(default=True, fill_brok=['full_status']),
'service_notifications_enabled':
BoolProp(default=True, fill_brok=['full_status']),
'host_notification_period':
StringProp(fill_brok=['full_status']),
'service_notification_period':
StringProp(fill_brok=['full_status']),
'host_notification_options':
ListProp(default=[''], fill_brok=['full_status'], split_on_coma=True),
'service_notification_options':
ListProp(default=[''], fill_brok=['full_status'], split_on_coma=True),
'host_notification_commands':
ListProp(fill_brok=['full_status']),
'service_notification_commands':
ListProp(fill_brok=['full_status']),
'min_business_impact':
IntegerProp(default=0, fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.notificationway_name
# Search for notification_options with state and if t is
# in service_notification_period
def want_service_notification(self, t, state, type, business_impact, cmd=None):
if not self.service_notifications_enabled:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.service_notification_commands:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
b = self.service_notification_period.is_time_valid(t)
if 'n' in self.service_notification_options:
return False
t = {'WARNING': 'w', 'UNKNOWN': 'u', 'CRITICAL': 'c',
'RECOVERY': 'r', 'FLAPPING': 'f', 'DOWNTIME': 's'}
if type == 'PROBLEM':
if state in t:
return b and t[state] in self.service_notification_options
elif type == 'RECOVERY':
if type in t:
return b and t[type] in self.service_notification_options
elif type == 'ACKNOWLEDGEMENT':
return b
elif type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return b and 'f' in self.service_notification_options
elif type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED'):
# No notification when a downtime was cancelled. Is that true??
# According to the documentation we need to look at _host_ options
return b and 's' in self.host_notification_options
return False
# Search for notification_options with state and if t is in
# host_notification_period
def want_host_notification(self, t, state, type, business_impact, cmd=None):
if not self.host_notifications_enabled:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.host_notification_commands:
return False
b = self.host_notification_period.is_time_valid(t)
if 'n' in self.host_notification_options:
return False
t = {'DOWN': 'd', 'UNREACHABLE': 'u', 'RECOVERY': 'r',
'FLAPPING': 'f', 'DOWNTIME': 's'}
if type == 'PROBLEM':
if state in t:
return b and t[state] in self.host_notification_options
elif type == 'RECOVERY':
if type in t:
return b and t[type] in self.host_notification_options
elif type == 'ACKNOWLEDGEMENT':
return b
elif type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return b and 'f' in self.host_notification_options
elif type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED'):
return b and 's' in self.host_notification_options
return False
# Call to get our commands to launch a Notification
def get_notification_commands(self, type):
# service_notification_commands for service
notif_commands_prop = type + '_notification_commands'
notif_commands = getattr(self, notif_commands_prop)
return notif_commands
# Check is required prop are set:
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
# A null notif way is a notif way that will do nothing (service = n, hot =n)
is_null_notifway = False
if (hasattr(self, 'service_notification_options') and
self.service_notification_options == ['n']):
if (hasattr(self, 'host_notification_options') and
self.host_notification_options == ['n']):
is_null_notifway = True
return True
for prop, entry in cls.properties.items():
if prop not in _special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning("[notificationway::%s] %s property not set",
self.get_name(), prop)
state = False # Bad boy...
# Ok now we manage special cases...
# Service part
if not hasattr(self, 'service_notification_commands'):
logger.warning("[notificationway::%s] do not have any "
"service_notification_commands defined", self.get_name())
state = False
else:
for cmd in self.service_notification_commands:
if cmd is None:
logger.warning("[notificationway::%s] a "
"service_notification_command is missing", self.get_name())
state = False
if not cmd.is_valid():
logger.warning("[notificationway::%s] a "
"service_notification_command is invalid", self.get_name())
state = False
if getattr(self, 'service_notification_period', None) is None:
logger.warning("[notificationway::%s] the "
"service_notification_period is invalid", self.get_name())
state = False
# Now host part
if not hasattr(self, 'host_notification_commands'):
logger.warning("[notificationway::%s] do not have any "
"host_notification_commands defined", self.get_name())
state = False
else:
for cmd in self.host_notification_commands:
if cmd is None:
logger.warning("[notificationway::%s] a "
"host_notification_command is missing", self.get_name())
state = False
if not cmd.is_valid():
logger.warning("[notificationway::%s] a host_notification_command "
"is invalid (%s)", cmd.get_name(), str(cmd.__dict__))
state = False
if getattr(self, 'host_notification_period', None) is None:
logger.warning("[notificationway::%s] the host_notification_period "
"is invalid", self.get_name())
state = False
return state
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_nw_by_commands(self, commands):
props = ['service_notification_commands', 'host_notification_commands']
for prop in props:
for cc in getattr(self, prop, []):
cc.late_linkify_with_command(commands)
class NotificationWays(Items):
name_property = "notificationway_name"
inner_class = NotificationWay
def linkify(self, timeperiods, commands):
self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
self.linkify_command_list_with_commands(commands, 'service_notification_commands')
self.linkify_command_list_with_commands(commands, 'host_notification_commands')
def new_inner_member(self, name=None, params={}):
if name is None:
name = NotificationWay.id
params['notificationway_name'] = name
# print "Asking a new inner notificationway from name %s with params %s" % (name, params)
nw = NotificationWay(params)
self.add_item(nw)
| agpl-3.0 |
cloudbau/glance | glance/tests/functional/test_cache_middleware.py | 1 | 32705 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests a Glance API server which uses the caching middleware that
uses the default SQLite cache driver. We use the filesystem store,
but that is really not relevant, as the image cache is transparent
to the backend store.
"""
import hashlib
import json
import os
import shutil
import sys
import time
import httplib2
from glance.tests import functional
from glance.tests.utils import (skip_if_disabled,
execute,
xattr_writes_supported,
minimal_headers)
from glance.tests.functional.store_utils import (setup_http,
get_http_uri)
FIVE_KB = 5 * 1024
class BaseCacheMiddlewareTest(object):
@skip_if_disabled
def test_cache_middleware_transparent_v1(self):
"""
We test that putting the cache middleware into the
application pipeline gives us transparent image caching
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
# You might wonder why the heck this is here... well, it's here
# because it took me forever to figure out that the disk write
# cache in Linux was causing random failures of the os.path.exists
# assert directly below this. Basically, since the cache is writing
# the image file to disk in a different process, the write buffers
# don't flush the cache file during an os.rename() properly, resulting
# in a false negative on the file existence check below. This little
# loop pauses the execution of this process for no more than 1.5
# seconds. If after that time the cached image file still doesn't
# appear on disk, something really is wrong, and the assert should
# trigger...
i = 0
while not os.path.exists(image_cached_path) and i < 30:
time.sleep(0.05)
i = i + 1
self.assertTrue(os.path.exists(image_cached_path))
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
@skip_if_disabled
def test_cache_middleware_transparent_v2(self):
"""Ensure the v2 API image transfer calls trigger caching"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify success
path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
headers = {'content-type': 'application/json'}
image_entity = {
'name': 'Image1',
'visibility': 'public',
'container_format': 'bare',
'disk_format': 'raw',
}
response, content = http.request(path, 'POST',
headers=headers,
body=json.dumps(image_entity))
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['id']
path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port,
image_id)
headers = {'content-type': 'application/octet-stream'}
image_data = "*" * FIVE_KB
response, content = http.request(path, 'PUT',
headers=headers,
body=image_data)
self.assertEqual(response.status, 204)
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
# Grab the image
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 204)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
@skip_if_disabled
def test_cache_remote_image(self):
"""
We test that caching is no longer broken for remote images
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
# Add a remote image and verify a 201 Created is returned
remote_uri = get_http_uri(self, '2')
headers = {'X-Image-Meta-Name': 'Image2',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Location': remote_uri}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['size'], FIVE_KB)
image_id = data['image']['id']
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
# Grab the image
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Grab the image again to ensure it can be served out from
# cache with the correct size
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(int(response['content-length']), FIVE_KB)
self.stop_servers()
@skip_if_disabled
def test_cache_middleware_trans_v1_without_download_image_policy(self):
"""
Ensure the image v1 API image transfer applied 'download_image'
policy enforcement.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
rules = {"context_is_admin": "role:admin", "default": "",
"download_image": "!"}
self.set_policy_rules(rules)
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
@skip_if_disabled
def test_cache_middleware_trans_v2_without_download_image_policy(self):
"""
Ensure the image v2 API image transfer applied 'download_image'
policy enforcement.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify success
path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
headers = {'content-type': 'application/json'}
image_entity = {
'name': 'Image1',
'visibility': 'public',
'container_format': 'bare',
'disk_format': 'raw',
}
response, content = http.request(path, 'POST',
headers=headers,
body=json.dumps(image_entity))
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['id']
path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port,
image_id)
headers = {'content-type': 'application/octet-stream'}
image_data = "*" * FIVE_KB
response, content = http.request(path, 'PUT',
headers=headers,
body=image_data)
self.assertEqual(response.status, 204)
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
rules = {"context_is_admin": "role:admin", "default": "",
"download_image": "!"}
self.set_policy_rules(rules)
# Grab the image
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 204)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
class BaseCacheManageMiddlewareTest(object):
"""Base test class for testing cache management middleware"""
def verify_no_images(self):
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('images' in data)
self.assertEqual(0, len(data['images']))
def add_image(self, name):
"""
Adds an image and returns the newly-added image
identifier
"""
image_data = "*" * FIVE_KB
headers = minimal_headers('%s' % name)
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], name)
self.assertEqual(data['image']['is_public'], True)
return data['image']['id']
def verify_no_cached_images(self):
"""
Verify no images in the image cache
"""
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
self.assertEqual(data['cached_images'], [])
@skip_if_disabled
def test_user_not_authorized(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
image_id1 = self.add_image("Image1")
image_id2 = self.add_image("Image2")
# Verify image does not yet show up in cache (we haven't "hit"
# it yet using a GET /images/1 ...
self.verify_no_cached_images()
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id1)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertEqual(image_id1, cached_images[0]['image_id'])
# Set policy to disallow access to cache management
rules = {"manage_image_cache": '!'}
self.set_policy_rules(rules)
# Verify an unprivileged user cannot see cached images
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
# Verify an unprivileged user cannot delete images from the cache
path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1",
self.api_port, image_id1)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 403)
# Verify an unprivileged user cannot delete all cached images
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 403)
# Verify an unprivileged user cannot queue an image
path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1",
self.api_port, image_id2)
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 403)
self.stop_servers()
@skip_if_disabled
def test_cache_manage_get_cached_images(self):
"""
Tests that cached images are queryable
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
image_id = self.add_image("Image1")
# Verify image does not yet show up in cache (we haven't "hit"
# it yet using a GET /images/1 ...
self.verify_no_cached_images()
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
# Verify the last_modified/last_accessed values are valid floats
for cached_image in data['cached_images']:
for time_key in ('last_modified', 'last_accessed'):
time_val = cached_image[time_key]
try:
float(time_val)
except ValueError:
self.fail('%s time %s for cached image %s not a valid '
'float' % (time_key, time_val,
cached_image['image_id']))
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertEqual(image_id, cached_images[0]['image_id'])
self.assertEqual(0, cached_images[0]['hits'])
# Hit the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image hits increased in output of manage GET
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertEqual(image_id, cached_images[0]['image_id'])
self.assertEqual(1, cached_images[0]['hits'])
self.stop_servers()
@skip_if_disabled
def test_cache_manage_delete_cached_images(self):
"""
Tests that cached images may be deleted
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
ids = {}
# Add a bunch of images...
for x in xrange(4):
ids[x] = self.add_image("Image%s" % str(x))
# Verify no images in cached_images because no image has been hit
# yet using a GET /images/<IMAGE_ID> ...
self.verify_no_cached_images()
# Grab the images, essentially caching them...
for x in xrange(4):
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
ids[x])
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200,
"Failed to find image %s" % ids[x])
# Verify images now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(4, len(cached_images))
for x in xrange(4, 0): # Cached images returned last modified order
self.assertEqual(ids[x], cached_images[x]['image_id'])
self.assertEqual(0, cached_images[x]['hits'])
# Delete third image of the cached images and verify no longer in cache
path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1",
self.api_port, ids[2])
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(3, len(cached_images))
self.assertTrue(ids[2] not in [x['image_id'] for x in cached_images])
# Delete all cached images and verify nothing in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(0, len(cached_images))
self.stop_servers()
@skip_if_disabled
def test_cache_manage_delete_queued_images(self):
"""
Tests that all queued images may be deleted at once
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
ids = {}
NUM_IMAGES = 4
# Add and then queue some images
for x in xrange(NUM_IMAGES):
ids[x] = self.add_image("Image%s" % str(x))
path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1",
self.api_port, ids[x])
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 200)
# Delete all queued images
path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
data = json.loads(content)
num_deleted = data['num_deleted']
self.assertEqual(NUM_IMAGES, num_deleted)
# Verify a second delete now returns num_deleted=0
path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
data = json.loads(content)
num_deleted = data['num_deleted']
self.assertEqual(0, num_deleted)
self.stop_servers()
@skip_if_disabled
def test_queue_and_prefetch(self):
"""
Tests that images may be queued and prefetched
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
cache_config_filepath = os.path.join(self.test_dir, 'etc',
'glance-cache.conf')
cache_file_options = {
'image_cache_dir': self.api_server.image_cache_dir,
'image_cache_driver': self.image_cache_driver,
'registry_port': self.registry_server.bind_port,
'log_file': os.path.join(self.test_dir, 'cache.log'),
'metadata_encryption_key': "012345678901234567890123456789ab"
}
with open(cache_config_filepath, 'w') as cache_file:
cache_file.write("""[DEFAULT]
debug = True
verbose = True
image_cache_dir = %(image_cache_dir)s
image_cache_driver = %(image_cache_driver)s
registry_host = 127.0.0.1
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
log_file = %(log_file)s
""" % cache_file_options)
self.verify_no_images()
ids = {}
# Add a bunch of images...
for x in xrange(4):
ids[x] = self.add_image("Image%s" % str(x))
# Queue the first image, verify no images still in cache after queueing
# then run the prefetcher and verify that the image is then in the
# cache
path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1",
self.api_port, ids[0])
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 200)
self.verify_no_cached_images()
cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" %
(sys.executable, cache_config_filepath))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip(), out)
# Verify first image now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertTrue(ids[0] in [r['image_id']
for r in data['cached_images']])
self.stop_servers()
class TestImageCacheXattr(functional.FunctionalTest,
BaseCacheMiddlewareTest):
"""Functional tests that exercise the image cache using the xattr driver"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import xattr
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-xattr not installed.")
return
self.inited = True
self.disabled = False
self.image_cache_driver = "xattr"
super(TestImageCacheXattr, self).setUp()
self.api_server.deployment_flavor = "caching"
if not xattr_writes_supported(self.test_dir):
self.inited = True
self.disabled = True
self.disabled_message = ("filesystem does not support xattr")
return
def tearDown(self):
super(TestImageCacheXattr, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
class TestImageCacheManageXattr(functional.FunctionalTest,
BaseCacheManageMiddlewareTest):
"""
Functional tests that exercise the image cache management
with the Xattr cache driver
"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import xattr
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-xattr not installed.")
return
self.inited = True
self.disabled = False
self.image_cache_driver = "xattr"
super(TestImageCacheManageXattr, self).setUp()
self.api_server.deployment_flavor = "cachemanagement"
if not xattr_writes_supported(self.test_dir):
self.inited = True
self.disabled = True
self.disabled_message = ("filesystem does not support xattr")
return
def tearDown(self):
super(TestImageCacheManageXattr, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
class TestImageCacheSqlite(functional.FunctionalTest,
BaseCacheMiddlewareTest):
"""
Functional tests that exercise the image cache using the
SQLite driver
"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import sqlite3
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-sqlite3 not installed.")
return
self.inited = True
self.disabled = False
super(TestImageCacheSqlite, self).setUp()
self.api_server.deployment_flavor = "caching"
def tearDown(self):
super(TestImageCacheSqlite, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
class TestImageCacheManageSqlite(functional.FunctionalTest,
BaseCacheManageMiddlewareTest):
"""
Functional tests that exercise the image cache management using the
SQLite driver
"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import sqlite3
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-sqlite3 not installed.")
return
self.inited = True
self.disabled = False
self.image_cache_driver = "sqlite"
super(TestImageCacheManageSqlite, self).setUp()
self.api_server.deployment_flavor = "cachemanagement"
def tearDown(self):
super(TestImageCacheManageSqlite, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
| apache-2.0 |
carljm/django | tests/custom_managers/tests.py | 16 | 24154 | from __future__ import unicode_literals
from django.db import models
from django.test import TestCase
from django.utils import six
from .models import (
Book, Car, CustomManager, CustomQuerySet, DeconstructibleCustomManager,
FastCarAsBase, FastCarAsDefault, FunPerson, OneToOneRestrictedModel,
Person, PersonFromAbstract, PersonManager, PublishedBookManager,
RelatedModel, RestrictedModel,
)
class CustomManagerTests(TestCase):
custom_manager_names = [
'custom_queryset_default_manager',
'custom_queryset_custom_manager',
]
@classmethod
def setUpTestData(cls):
cls.b1 = Book.published_objects.create(
title="How to program", author="Rodney Dangerfield", is_published=True)
cls.b2 = Book.published_objects.create(
title="How to be smart", author="Albert Einstein", is_published=False)
cls.p1 = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
cls.droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
def test_custom_manager_basic(self):
"""
Test a custom Manager method.
"""
self.assertQuerysetEqual(
Person.objects.get_fun_people(), [
"Bugs Bunny"
],
six.text_type
)
def test_queryset_copied_to_default(self):
"""
The methods of a custom QuerySet are properly copied onto the
default Manager.
"""
for manager_name in self.custom_manager_names:
manager = getattr(Person, manager_name)
# Public methods are copied
manager.public_method()
# Private methods are not copied
with self.assertRaises(AttributeError):
manager._private_method()
def test_manager_honors_queryset_only(self):
for manager_name in self.custom_manager_names:
manager = getattr(Person, manager_name)
# Methods with queryset_only=False are copied even if they are private.
manager._optin_private_method()
# Methods with queryset_only=True aren't copied even if they are public.
with self.assertRaises(AttributeError):
manager.optout_public_method()
def test_manager_use_queryset_methods(self):
"""
Custom manager will use the queryset methods
"""
for manager_name in self.custom_manager_names:
manager = getattr(Person, manager_name)
queryset = manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], six.text_type)
self.assertIs(queryset._filter_CustomQuerySet, True)
# Test that specialized querysets inherit from our custom queryset.
queryset = manager.values_list('first_name', flat=True).filter()
self.assertEqual(list(queryset), [six.text_type("Bugs")])
self.assertIs(queryset._filter_CustomQuerySet, True)
self.assertIsInstance(queryset.values(), CustomQuerySet)
self.assertIsInstance(queryset.values().values(), CustomQuerySet)
self.assertIsInstance(queryset.values_list().values(), CustomQuerySet)
def test_init_args(self):
"""
The custom manager __init__() argument has been set.
"""
self.assertEqual(Person.custom_queryset_custom_manager.init_arg, 'hello')
def test_manager_attributes(self):
"""
Custom manager method is only available on the manager and not on
querysets.
"""
Person.custom_queryset_custom_manager.manager_only()
with self.assertRaises(AttributeError):
Person.custom_queryset_custom_manager.all().manager_only()
def test_queryset_and_manager(self):
"""
Queryset method doesn't override the custom manager method.
"""
queryset = Person.custom_queryset_custom_manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], six.text_type)
self.assertIs(queryset._filter_CustomManager, True)
def test_related_manager(self):
"""
The related managers extend the default manager.
"""
self.assertIsInstance(self.droopy.books, PublishedBookManager)
self.assertIsInstance(self.b2.authors, PersonManager)
def test_no_objects(self):
"""
The default manager, "objects", doesn't exist, because a custom one
was provided.
"""
with self.assertRaises(AttributeError):
Book.objects
def test_filtering(self):
"""
Custom managers respond to usual filtering methods
"""
self.assertQuerysetEqual(
Book.published_objects.all(), [
"How to program",
],
lambda b: b.title
)
def test_fk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_books.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_books.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_gfk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_things.all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_things.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.fun_authors.add(droopy)
self.assertQuerysetEqual(
self.b1.authors.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_authors.order_by('first_name').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_removal_through_default_fk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.b1.fun_people_favorite_books.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_books.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_books.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_fk_related_manager(self):
self.test_removal_through_default_fk_related_manager(bulk=False)
def test_removal_through_specified_fk_related_manager(self, bulk=True):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
# Check that the fun manager DOESN'T remove boring people.
self.b1.favorite_books(manager='fun_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.favorite_books(manager='boring_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_book = self.b1
droopy.save()
# Check that the fun manager ONLY clears fun people.
self.b1.favorite_books(manager='fun_people').clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_fk_related_manager(self):
self.test_removal_through_specified_fk_related_manager(bulk=False)
def test_removal_through_default_gfk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.b1.fun_people_favorite_things.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.b1.fun_people_favorite_things.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
bugs.favorite_book = self.b1
bugs.save()
self.b1.fun_people_favorite_things.clear(bulk=bulk)
self.assertQuerysetEqual(
FunPerson._base_manager.order_by('first_name').filter(favorite_thing_id=self.b1.pk), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_default_gfk_related_manager(self):
self.test_removal_through_default_gfk_related_manager(bulk=False)
def test_removal_through_specified_gfk_related_manager(self, bulk=True):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
# Check that the fun manager DOESN'T remove boring people.
self.b1.favorite_things(manager='fun_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.favorite_things(manager='boring_people').remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
droopy.favorite_thing = self.b1
droopy.save()
# Check that the fun manager ONLY clears fun people.
self.b1.favorite_things(manager='fun_people').clear(bulk=bulk)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_slow_removal_through_specified_gfk_related_manager(self):
self.test_removal_through_specified_gfk_related_manager(bulk=False)
def test_removal_through_default_m2m_related_manager(self):
bugs = FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.fun_authors.add(bugs)
droopy = FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.fun_authors.add(droopy)
self.b1.fun_authors.remove(droopy)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Bugs",
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.remove(bugs)
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
self.b1.fun_authors.add(bugs)
self.b1.fun_authors.clear()
self.assertQuerysetEqual(
self.b1.fun_authors.through._default_manager.all(), [
"Droopy",
],
lambda c: c.funperson.first_name,
ordered=False,
)
def test_removal_through_specified_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
# Check that the fun manager DOESN'T remove boring people.
self.b1.authors(manager='fun_people').remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
# Check that the boring manager DOES remove boring people.
self.b1.authors(manager='boring_people').remove(droopy)
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
self.b1.authors.add(droopy)
# Check that the fun manager ONLY clears fun people.
self.b1.authors(manager='fun_people').clear()
self.assertQuerysetEqual(
self.b1.authors(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.authors(manager='fun_people').all(), [
],
lambda c: c.first_name,
ordered=False,
)
def test_deconstruct_default(self):
mgr = models.Manager()
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, 'django.db.models.manager.Manager')
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_deconstruct_as_manager(self):
mgr = CustomQuerySet.as_manager()
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertTrue(as_manager)
self.assertEqual(qs_path, 'custom_managers.models.CustomQuerySet')
def test_deconstruct_from_queryset(self):
mgr = DeconstructibleCustomManager('a', 'b')
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, 'custom_managers.models.DeconstructibleCustomManager')
self.assertEqual(args, ('a', 'b',))
self.assertEqual(kwargs, {})
mgr = DeconstructibleCustomManager('x', 'y', c=3, d=4)
as_manager, mgr_path, qs_path, args, kwargs = mgr.deconstruct()
self.assertFalse(as_manager)
self.assertEqual(mgr_path, 'custom_managers.models.DeconstructibleCustomManager')
self.assertEqual(args, ('x', 'y',))
self.assertEqual(kwargs, {'c': 3, 'd': 4})
def test_deconstruct_from_queryset_failing(self):
mgr = CustomManager('arg')
msg = ("Could not find manager BaseCustomManagerFromCustomQuerySet in "
"django.db.models.manager.\n"
"Please note that you need to inherit from managers you "
"dynamically generated with 'from_queryset()'.")
with self.assertRaisesMessage(ValueError, msg):
mgr.deconstruct()
def test_abstract_model_with_custom_manager_name(self):
"""
A custom manager may be defined on an abstract model.
It will be inherited by the abstract model's children.
"""
PersonFromAbstract.abstract_persons.create(objects='Test')
self.assertQuerysetEqual(
PersonFromAbstract.abstract_persons.all(), ["Test"],
lambda c: c.objects,
)
class TestCars(TestCase):
def test_managers(self):
# Each model class gets a "_default_manager" attribute, which is a
# reference to the first manager defined in the class.
Car.cars.create(name="Corvette", mileage=21, top_speed=180)
Car.cars.create(name="Neon", mileage=31, top_speed=100)
self.assertQuerysetEqual(
Car._default_manager.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
self.assertQuerysetEqual(
Car.cars.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
# alternate manager
self.assertQuerysetEqual(
Car.fast_cars.all(), [
"Corvette",
],
lambda c: c.name
)
# explicit default manager
self.assertQuerysetEqual(
FastCarAsDefault.cars.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
self.assertQuerysetEqual(
FastCarAsDefault._default_manager.all(), [
"Corvette",
],
lambda c: c.name
)
# explicit base manager
self.assertQuerysetEqual(
FastCarAsBase.cars.order_by("name"), [
"Corvette",
"Neon",
],
lambda c: c.name
)
self.assertQuerysetEqual(
FastCarAsBase._base_manager.all(), [
"Corvette",
],
lambda c: c.name
)
class CustomManagersRegressTestCase(TestCase):
def test_filtered_default_manager(self):
"""Even though the default manager filters out some records,
we must still be able to save (particularly, save by updating
existing records) those filtered instances. This is a
regression test for #8990, #9527"""
related = RelatedModel.objects.create(name="xyzzy")
obj = RestrictedModel.objects.create(name="hidden", related=related)
obj.name = "still hidden"
obj.save()
# If the hidden object wasn't seen during the save process,
# there would now be two objects in the database.
self.assertEqual(RestrictedModel.plain_manager.count(), 1)
def test_delete_related_on_filtered_manager(self):
"""Deleting related objects should also not be distracted by a
restricted manager on the related object. This is a regression
test for #2698."""
related = RelatedModel.objects.create(name="xyzzy")
for name, public in (('one', True), ('two', False), ('three', False)):
RestrictedModel.objects.create(name=name, is_public=public, related=related)
obj = RelatedModel.objects.get(name="xyzzy")
obj.delete()
# All of the RestrictedModel instances should have been
# deleted, since they *all* pointed to the RelatedModel. If
# the default manager is used, only the public one will be
# deleted.
self.assertEqual(len(RestrictedModel.plain_manager.all()), 0)
def test_delete_one_to_one_manager(self):
# The same test case as the last one, but for one-to-one
# models, which are implemented slightly different internally,
# so it's a different code path.
obj = RelatedModel.objects.create(name="xyzzy")
OneToOneRestrictedModel.objects.create(name="foo", is_public=False, related=obj)
obj = RelatedModel.objects.get(name="xyzzy")
obj.delete()
self.assertEqual(len(OneToOneRestrictedModel.plain_manager.all()), 0)
def test_queryset_with_custom_init(self):
"""
BaseManager.get_queryset() should use kwargs rather than args to allow
custom kwargs (#24911).
"""
qs_custom = Person.custom_init_queryset_manager.all()
qs_default = Person.objects.all()
self.assertQuerysetEqual(qs_custom, qs_default)
| bsd-3-clause |
ii0/pybrain | docs/code2tut.py | 27 | 2275 | #!/bin/env python
""" Utility script to convert Python source code into tutorials.
Synopsis:
code2tut.py basename
Output:
Will convert tutorials/basename.py into sphinx/basename.txt
Conventions:
1. All textual comments must be enclosed in triple quotation marks.
2. First line of file is ignored, second line of file shall contain title in "",
the following lines starting with # are ignored.
3. Lines following paragraph-level markup (e.g. .. seealso::) must be indented.
Paragraph ends with a blank line.
4. If the code after a comment starts with a higher indentation level, you have
to manually edit the resulting file, e.g. by inserting " ..." at the
beginning of these sections.
See tutorials/fnn.py for example.
"""
__author__ = "Martin Felder, [email protected]"
__version__ = "$Id$"
import sys
import os
f_in = file(os.path.join("tutorials",sys.argv[1])+".py")
f_out = file(os.path.join("sphinx",sys.argv[1])+".txt", "w+")
# write the header
f_out.write(".. _"+sys.argv[1]+":\n\n")
f_in.readline() # ######################
line = f_in.readline()
line= line.split('"')[1] # # PyBrain Tutorial "Classification ..."
f_out.write(line+"\n")
f_out.write("="*len(line)+'\n\n')
linecomment = False
comment = 0
begin = True
inblock = False
# the following is an ugly hack - don't look at it!
for line in f_in:
linecomment = False
# crop #-comments at start of file
if line.startswith('#'):
if begin:
continue
elif begin:
begin = False
if '"""' in line:
for i in range(line.count('"""')):
comment = 1 - comment
if line.count('"""')==2:
linecomment = True
line = line.replace('"""','')
if comment==0:
line += '::'
if not inblock:
line = line.strip()
elif comment==0 and line!='\n':
line = " "+line
if line.startswith('..'):
inblock = True
elif line=="\n":
inblock = False
if (comment or linecomment) and not inblock:
line = line.strip()+"\n"
if line.endswith("::"):
line +='\n\n'
elif line.endswith("::\n"):
line +='\n'
f_out.write(line)
f_in.close()
f_out.close()
| bsd-3-clause |
lpakula/django-oscar-paypal | paypal/payflow/facade.py | 3 | 6767 | """
Bridging module between Oscar and the gateway module (which is Oscar agnostic)
"""
from oscar.apps.payment import exceptions
from paypal.payflow import codes, gateway, models
def authorize(order_number, amt, bankcard, billing_address=None):
"""
Make an *authorisation* request
This holds the money on the customer's bank account but does not mark the
transaction for settlement. This is the most common method to use for
fulfilling goods that require shipping. When the goods are ready to be
shipped, the transaction can be marked for settlement by calling the
delayed_capture method.
If successful, return nothing ("silence is golden") - if unsuccessful raise
an exception which can be caught and handled within view code.
:order_number: Order number for request
:amt: Amount for transaction
:bankcard: Instance of Oscar's Bankcard class (which is just a dumb wrapper
around the pertinent bankcard attributes).
:billing_address: A dict of billing address information (which can
come from the `cleaned_data` of a billing address form).
"""
return _submit_payment_details(
gateway.authorize, order_number, amt, bankcard, billing_address)
def sale(order_number, amt, bankcard, billing_address=None):
"""
Make a *sale* request
This holds the money on the customer's bank account and marks the
transaction for settlement that night. This is appropriate method to use
for products that can be immediately fulfilled - such as digital products.
If successful, return nothing ("silence is golden") - if unsuccessful raise
an exception which can be caught and handled within view code.
:order_number: Order number for request
:amt: Amount for transaction
:bankcard: Instance of Oscar's Bankcard class (which is just a dumb wrapper
around the pertinent bankcard attributes).
:billing_address: A dict of billing address information (which can come from
the `cleaned_data` of a billing address form.
"""
return _submit_payment_details(gateway.sale, order_number, amt, bankcard,
billing_address)
def _submit_payment_details(
gateway_fn, order_number, amt, bankcard, billing_address=None):
# Remap address fields if set.
address_fields = {}
if billing_address:
address_fields.update({
'first_name': billing_address['first_name'],
'last_name': billing_address['last_name'],
'street': billing_address['line1'],
'city': billing_address['line4'],
'state': billing_address['state'],
'zip': billing_address['postcode'].strip(' ')
})
txn = gateway_fn(
order_number,
card_number=bankcard.number,
cvv=bankcard.cvv,
expiry_date=bankcard.expiry_month("%m%y"),
amt=amt,
**address_fields)
if not txn.is_approved:
raise exceptions.UnableToTakePayment(txn.respmsg)
return txn
def delayed_capture(order_number, pnref=None, amt=None):
"""
Capture funds that have been previously authorized.
Notes:
* It's possible to capture a lower amount than the original auth
transaction - however...
* ...only one delayed capture is allowed for a given PNREF...
* ...If multiple captures are required, a 'reference transaction' needs to be
used.
* It's safe to retry captures if the first one fails or errors
:order_number: Order number
:pnref: The PNREF of the authorization transaction to use. If not
specified, the order number is used to retrieve the appropriate transaction.
:amt: A custom amount to capture.
"""
if pnref is None:
# No PNREF specified, look-up the auth transaction for this order number
# to get the PNREF from there.
try:
auth_txn = models.PayflowTransaction.objects.get(
comment1=order_number, trxtype=codes.AUTHORIZATION)
except models.PayflowTransaction.DoesNotExist:
raise exceptions.UnableToTakePayment(
"No authorization transaction found with PNREF=%s" % pnref)
pnref = auth_txn
txn = gateway.delayed_capture(order_number, pnref, amt)
if not txn.is_approved:
raise exceptions.UnableToTakePayment(txn.respmsg)
return txn
def referenced_sale(order_number, pnref, amt):
"""
Capture funds using the bank/address details of a previous transaction
This is equivalent to a *sale* transaction but without the user having to
enter their payment details.
There are two main uses for this:
1. This allows customers to checkout without having to re-enter their
payment details.
2. It allows an initial authorisation to be settled in multiple parts. The
first settle should use delayed_capture but any subsequent ones should
use this method.
:order_number: Order number.
:pnref: PNREF of a previous transaction to use.
:amt: The amount to settle for.
"""
txn = gateway.reference_transaction(
order_number, pnref, amt)
if not txn.is_approved:
raise exceptions.UnableToTakePayment(txn.respmsg)
return txn
def void(order_number, pnref):
"""
Void an authorisation transaction to prevent it from being settled
:order_number: Order number
:pnref: The PNREF of the transaction to void.
"""
txn = gateway.void(order_number, pnref)
if not txn.is_approved:
raise exceptions.PaymentError(txn.respmsg)
return txn
def credit(order_number, pnref=None, amt=None):
"""
Return funds that have been previously settled.
:order_number: Order number
:pnref: The PNREF of the authorization transaction to use. If not
specified, the order number is used to retrieve the appropriate transaction.
:amt: A custom amount to capture. If not specified, the entire transaction
is refuneded.
"""
if pnref is None:
# No PNREF specified, look-up the auth/sale transaction for this order number
# to get the PNREF from there.
try:
auth_txn = models.PayflowTransaction.objects.get(
comment1=order_number, trxtype__in=(codes.AUTHORIZATION,
codes.SALE))
except models.PayflowTransaction.DoesNotExist:
raise exceptions.UnableToTakePayment(
"No authorization transaction found with PNREF=%s" % pnref)
pnref = auth_txn
txn = gateway.credit(order_number, pnref, amt)
if not txn.is_approved:
raise exceptions.PaymentError(txn.respmsg)
return txn
| bsd-3-clause |
v0i0/lammps | tools/moltemplate/examples/coarse_grained/chromosome_metaphase_Naumova2013/moltemplate_files/interpolate_coords.py | 14 | 1944 | #!/usr/bin/env python
err_msg = """
Usage:
interpolate_coords.py Ndesired [scale] < coords_orig.raw > coords.raw
Example:
interpolate_coords.py 30118 3.0 < coords_orig.raw > coords.raw
# (Note: 30117 ~= 128000/4.25, but using 30118 makes interpolation cleaner.
# See the supplemental section of Naumova et al Science 2013, p 18.)
"""
import sys
from math import floor
# Parse the argument list:
if len(sys.argv) <= 1:
sys.stderr.write("Error:\n\nTypical Usage:\n\n"+err_msg+"\n")
exit(1)
n_new = int(sys.argv[1])
if len(sys.argv) > 2:
scale = float(sys.argv[2])
else:
scale = 1.0
coords_orig = []
lines = sys.stdin.readlines()
for line in lines:
tokens = line.split()
if (len(tokens) > 0):
coords_orig.append(list(map(float, tokens)))
g_dim = len(tokens)
n_orig = len(coords_orig)
if n_orig < 2:
sys.stderr.write("Error:\n\nInput file contains less than two lines of coordinates\n")
exit(1)
if n_new < 2:
sys.stderr.write("Error:\n\nOutput file will contain less than two lines of coordinates\n")
exit(1)
coords_new = [[0.0 for d in range(0, g_dim)] for i in range(0, n_new)]
for i_new in range(0, n_new):
I_orig = (i_new) * (float(n_orig-1) / float(n_new-1))
i_orig = int(floor(I_orig))
i_remainder = I_orig - i_orig
if (i_new < n_new-1):
for d in range(0, g_dim):
coords_new[i_new][d] = scale*(coords_orig[i_orig][d]
+
i_remainder*(coords_orig[i_orig+1][d]-
coords_orig[i_orig][d]))
else:
for d in range(0, g_dim):
coords_new[i_new][d] = scale*coords_orig[n_orig-1][d]
# print the coordates
for d in range(0, g_dim-1):
sys.stdout.write(str(coords_new[i_new][d]) + ' ')
sys.stdout.write(str(coords_new[i_new][g_dim-1]) + "\n")
| gpl-2.0 |
tracierenea/gnuradio | gr-digital/python/digital/ofdm_sync_pnac.py | 40 | 6080 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from numpy import fft
from gnuradio import gr
try:
from gnuradio import filter
except ImportError:
import filter_swig as filter
try:
from gnuradio import blocks
except ImportError:
import blocks_swig as blocks
class ofdm_sync_pnac(gr.hier_block2):
def __init__(self, fft_length, cp_length, kstime, logging=False):
"""
OFDM synchronization using PN Correlation and initial cross-correlation:
F. Tufvesson, O. Edfors, and M. Faulkner, "Time and Frequency Synchronization for OFDM using
PN-Sequency Preambles," IEEE Proc. VTC, 1999, pp. 2203-2207.
This implementation is meant to be a more robust version of the Schmidl and Cox receiver design.
By correlating against the preamble and using that as the input to the time-delayed correlation,
this circuit produces a very clean timing signal at the end of the preamble. The timing is
more accurate and does not have the problem associated with determining the timing from the
plateau structure in the Schmidl and Cox.
This implementation appears to require that the signal is received with a normalized power or signal
scaling factor to reduce ambiguities introduced from partial correlation of the cyclic prefix and
the peak detection. A better peak detection block might fix this.
Also, the cross-correlation falls apart as the frequency offset gets larger and completely fails
when an integer offset is introduced. Another thing to look at.
"""
gr.hier_block2.__init__(self, "ofdm_sync_pnac",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature2(2, 2, gr.sizeof_float, gr.sizeof_char)) # Output signature
self.input = blocks.add_const_cc(0)
symbol_length = fft_length + cp_length
# PN Sync with cross-correlation input
# cross-correlate with the known symbol
kstime = [k.conjugate() for k in kstime[0:fft_length//2]]
kstime.reverse()
self.crosscorr_filter = filter.fir_filter_ccc(1, kstime)
# Create a delay line
self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length/2)
# Correlation from ML Sync
self.conjg = blocks.conjugate_cc();
self.corr = blocks.multiply_cc();
# Create a moving sum filter for the input
self.mag = blocks.complex_to_mag_squared()
self.power = filter.fir_filter_fff(1, [1.0] * int(fft_length))
# Get magnitude (peaks) and angle (phase/freq error)
self.c2mag = blocks.complex_to_mag_squared()
self.angle = blocks.complex_to_arg()
self.compare = blocks.sub_ff()
self.sample_and_hold = blocks.sample_and_hold_ff()
#ML measurements input to sampler block and detect
self.threshold = blocks.threshold_ff(0,0,0) # threshold detection might need to be tweaked
self.peaks = blocks.float_to_char()
self.connect(self, self.input)
# Cross-correlate input signal with known preamble
self.connect(self.input, self.crosscorr_filter)
# use the output of the cross-correlation as input time-shifted correlation
self.connect(self.crosscorr_filter, self.delay)
self.connect(self.crosscorr_filter, (self.corr,0))
self.connect(self.delay, self.conjg)
self.connect(self.conjg, (self.corr,1))
self.connect(self.corr, self.c2mag)
self.connect(self.corr, self.angle)
self.connect(self.angle, (self.sample_and_hold,0))
# Get the power of the input signal to compare against the correlation
self.connect(self.crosscorr_filter, self.mag, self.power)
# Compare the power to the correlator output to determine timing peak
# When the peak occurs, it peaks above zero, so the thresholder detects this
self.connect(self.c2mag, (self.compare,0))
self.connect(self.power, (self.compare,1))
self.connect(self.compare, self.threshold)
self.connect(self.threshold, self.peaks, (self.sample_and_hold,1))
# Set output signals
# Output 0: fine frequency correction value
# Output 1: timing signal
self.connect(self.sample_and_hold, (self,0))
self.connect(self.peaks, (self,1))
if logging:
self.connect(self.compare, blocks.file_sink(gr.sizeof_float, "ofdm_sync_pnac-compare_f.dat"))
self.connect(self.c2mag, blocks.file_sink(gr.sizeof_float, "ofdm_sync_pnac-theta_f.dat"))
self.connect(self.power, blocks.file_sink(gr.sizeof_float, "ofdm_sync_pnac-inputpower_f.dat"))
self.connect(self.angle, blocks.file_sink(gr.sizeof_float, "ofdm_sync_pnac-epsilon_f.dat"))
self.connect(self.threshold, blocks.file_sink(gr.sizeof_float, "ofdm_sync_pnac-threshold_f.dat"))
self.connect(self.peaks, blocks.file_sink(gr.sizeof_char, "ofdm_sync_pnac-peaks_b.dat"))
self.connect(self.sample_and_hold, blocks.file_sink(gr.sizeof_float, "ofdm_sync_pnac-sample_and_hold_f.dat"))
self.connect(self.input, blocks.file_sink(gr.sizeof_gr_complex, "ofdm_sync_pnac-input_c.dat"))
| gpl-3.0 |
alxnov/ansible-modules-core | cloud/amazon/ec2_snapshot.py | 22 | 9974 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = min(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg = 'snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError, e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id = dict(),
description = dict(),
instance_id = dict(),
snapshot_id = dict(),
device_name = dict(),
wait = dict(type='bool', default=True),
wait_timeout = dict(type='int', default=0),
last_snapshot_min_age = dict(type='int', default=0),
snapshot_tags = dict(type='dict', default=dict()),
state = dict(choices=['absent','present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sudheesh001/oh-mainline | vendor/packages/docutils/test/test_transforms/test___init__.py | 19 | 1136 | #! /usr/bin/env python
# $Id: test___init__.py 5174 2007-05-31 00:01:52Z wiemann $
# Author: Lea Wiemann <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Test module for transforms/__init__.py.
"""
from __init__ import DocutilsTestSupport # must be imported before docutils
from docutils import transforms, utils
import unittest
class TestTransform(transforms.Transform):
default_priority = 100
applied = 0
def apply(self, **kwargs):
self.applied += 1
assert kwargs == {'foo': 42}
class KwargsTestCase(unittest.TestCase):
def test_kwargs(self):
transformer = transforms.Transformer(utils.new_document('test data'))
transformer.add_transform(TestTransform, foo=42)
transformer.apply_transforms()
self.assertEqual(len(transformer.applied), 1)
self.assertEqual(len(transformer.applied[0]), 4)
transform_record = transformer.applied[0]
self.assertEqual(transform_record[1], TestTransform)
self.assertEqual(transform_record[3], {'foo': 42})
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
mikhaelharswanto/ryu | ryu/tests/integrated/test_flow_monitor_v14.py | 1 | 6717 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import time
import logging
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_4
from ryu.tests.integrated import tester
LOG = logging.getLogger(__name__)
class RunTest(tester.TestFlowBase):
""" Test case for Request-Reply messages.
Some tests need attached port to switch.
If use the OVS, can do it with the following commands.
# ip link add <port> type dummy
# ovs-vsctl add-port <bridge> <port>
"""
OFP_VERSIONS = [ofproto_v1_4.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTest, self).__init__(*args, **kwargs)
self._verify = None
self.n_tables = ofproto_v1_4.OFPTT_MAX
def start_next_test(self, dp):
self._verify = None
self.delete_all_flows(dp)
dp.send_barrier()
if len(self.pending):
t = self.pending.pop()
if self.is_supported(t):
LOG.info(tester.LOG_TEST_START, t)
self.current = t
getattr(self, t)(dp)
else:
self.results[t] = 'SKIP (unsupported)'
self.unclear -= 1
self.start_next_test(dp)
else:
self.print_results()
def run_verify(self, ev):
msg = ev.msg
dp = msg.datapath
verify_func = self.verify_default
v = "verify" + self.current[4:]
if v in dir(self):
verify_func = getattr(self, v)
result = verify_func(dp, msg)
if result is True:
self.unclear -= 1
self.results[self.current] = result
self.start_next_test(dp)
def verify_default(self, dp, msg):
type_ = self._verify
if msg.msg_type == dp.ofproto.OFPT_STATS_REPLY:
return self.verify_stats(dp, msg.body, type_)
elif msg.msg_type == type_:
return True
else:
return 'Reply msg_type %s expected %s' \
% (msg.msg_type, type_)
def verify_stats(self, dp, stats, type_):
stats_types = dp.ofproto_parser.OFPStatsReply._STATS_TYPES
expect = stats_types.get(type_).__name__
if isinstance(stats, list):
for s in stats:
if expect == s.__class__.__name__:
return True
else:
if expect == stats.__class__.__name__:
return True
return 'Reply msg has not \'%s\' class.\n%s' % (expect, stats)
def mod_flow(self, dp, cookie=0, cookie_mask=0, table_id=0,
command=None, idle_timeout=0, hard_timeout=0,
priority=0xff, buffer_id=0xffffffff, match=None,
actions=None, inst_type=None, out_port=None,
out_group=None, flags=0, inst=None):
if command is None:
command = dp.ofproto.OFPFC_ADD
if inst is None:
if inst_type is None:
inst_type = dp.ofproto.OFPIT_APPLY_ACTIONS
inst = []
if actions is not None:
inst = [dp.ofproto_parser.OFPInstructionActions(
inst_type, actions)]
if match is None:
match = dp.ofproto_parser.OFPMatch()
if out_port is None:
out_port = dp.ofproto.OFPP_ANY
if out_group is None:
out_group = dp.ofproto.OFPG_ANY
m = dp.ofproto_parser.OFPFlowMod(dp, cookie, cookie_mask,
table_id, command,
idle_timeout, hard_timeout,
priority, buffer_id,
out_port, out_group,
flags, match, inst)
dp.send_msg(m)
def get_port(self, dp):
for port_no, port in dp.ports.items():
if port_no != dp.ofproto.OFPP_LOCAL:
return port
return None
# Test for Reply message type
def test_flow_monitor_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
monitor_flags = [ofp.OFPFMF_INITIAL, ofp.OFPFMF_ONLY_OWN]
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPFlowMonitorRequest(datapath, 0, 10000,
ofp.OFPP_ANY, ofp.OFPG_ANY,
monitor_flags,
ofp.OFPTT_ALL,
ofp.OFPFMC_ADD, match)
datapath.send_msg(req)
# handler
@set_ev_cls(ofp_event.EventOFPFlowMonitorReply, MAIN_DISPATCHER)
def flow_monitor_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
flow_updates = []
for update in msg.body:
update_str = 'length=%d event=%d' % (update.length, update.event)
if (update.event == ofp.OFPFME_INITIAL or
update.event == ofp.OFPFME_ADDED or
update.event == ofp.OFPFME_REMOVED or
update.event == ofp.OFPFME_MODIFIED):
update_str += 'table_id=%d reason=%d idle_timeout=%d hard_timeout=%d priority=%d cookie=%d match=%d instructions=%s' % (stat.table_id, stat.reason, stat.idle_timeout, stat.hard_timeout, stat.priority, stat.cookie, stat.match, stat.instructions)
elif update.event == ofp.OFPFME_ABBREV:
update_str += 'xid=%d' % (stat.xid)
flow_updates.append(update_str)
self.logger.debug('FlowUpdates: %s', flow_updates)
def error_handler(self, ev):
if self.current.find('error') > 0:
self.run_verify(ev)
def is_supported(self, t):
unsupported = [
]
for u in unsupported:
if t.find(u) != -1:
return False
return True
| apache-2.0 |
Workday/OpenFrame | tools/cr/cr/commands/build.py | 113 | 2455 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the build commands."""
import cr
class BuildCommand(cr.Command):
"""The implementation of the build command.
This is a thin shell over the Builder.Build method of the selected builder.
"""
def __init__(self):
super(BuildCommand, self).__init__()
self.help = 'Build a target'
self.description = ("""
Uses the specified builder for the platform to bring the target
up to date.
""")
def AddArguments(self, subparsers):
parser = super(BuildCommand, self).AddArguments(subparsers)
cr.Builder.AddArguments(self, parser)
cr.Target.AddArguments(self, parser, allow_multiple=True)
self.ConsumeArgs(parser, 'the builder')
return parser
def Run(self):
return cr.Builder.Build(
cr.Target.GetTargets(), cr.context.remains)
class CleanCommand(cr.Command):
"""The implementation of the clean command.
This is a thin shell over the Builder.Clean method of the selected builder.
"""
def __init__(self):
super(CleanCommand, self).__init__()
self.help = 'Clean a target'
self.description = (
'Uses the specified builder to clean out built files for the target.')
def AddArguments(self, subparsers):
parser = super(CleanCommand, self).AddArguments(subparsers)
cr.Builder.AddArguments(self, parser)
cr.Target.AddArguments(self, parser, allow_multiple=True)
self.ConsumeArgs(parser, 'the builder')
return parser
def Run(self):
return cr.Builder.Clean(
cr.Target.GetTargets(), cr.context.remains)
class RebuildCommand(cr.Command):
"""The implementation of the rebuild command.
This is a thin shell over the Builder.Rebuild method of the selected builder.
"""
def __init__(self):
super(RebuildCommand, self).__init__()
self.help = 'Rebuild a target'
self.description = (
'Uses the specified builder for the platform to rebuild a target.')
def AddArguments(self, subparsers):
parser = super(RebuildCommand, self).AddArguments(subparsers)
cr.Builder.AddArguments(self, parser)
cr.Target.AddArguments(self, parser, allow_multiple=True)
self.ConsumeArgs(parser, 'the builder')
return parser
def Run(self):
return cr.Builder.Rebuild(
cr.Target.GetTargets(), cr.context.remains)
| bsd-3-clause |
codl/forget | migrations/versions/583cdac8eba1_add_three_way_media_policy.py | 1 | 1796 | """add three-way media policy
Revision ID: 583cdac8eba1
Revises: 7e255d4ea34d
Create Date: 2017-12-28 00:46:56.023649
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '583cdac8eba1'
down_revision = '7e255d4ea34d'
branch_labels = None
depends_on = None
transitional = sa.table('accounts',
sa.column('policy_keep_media'),
sa.column('old_policy_keep_media'))
def upgrade():
ThreeWayPolicyEnum = sa.Enum('keeponly', 'deleteonly', 'none',
name='enum_3way_policy')
op.execute("""
CREATE TYPE enum_3way_policy AS ENUM ('keeponly', 'deleteonly', 'none')
""")
op.alter_column('accounts', 'policy_keep_media',
new_column_name='old_policy_keep_media')
op.add_column(
'accounts',
sa.Column('policy_keep_media', ThreeWayPolicyEnum,
nullable=False, server_default='none'))
op.execute(transitional.update()
.where(transitional.c.old_policy_keep_media)
.values(policy_keep_media=op.inline_literal('keeponly')))
op.drop_column('accounts', 'old_policy_keep_media')
def downgrade():
op.alter_column('accounts', 'policy_keep_media',
new_column_name='old_policy_keep_media')
op.add_column(
'accounts',
sa.Column('policy_keep_media', sa.Boolean(),
nullable=False, server_default='f'))
op.execute(transitional.update()
.where(transitional.c.old_policy_keep_media == op.inline_literal('keeponly'))
.values(policy_keep_media=op.inline_literal('t')))
op.drop_column('accounts', 'old_policy_keep_media')
op.execute("""
DROP TYPE enum_3way_policy
""")
| isc |
slingcoin/sling-market | test/functional/rpcbind_test.py | 23 | 4773 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running bitcoind with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
pass
def setup_nodes(self):
pass
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
pid = self.bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_jsonrpc(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| mit |
adler-j/lie_grp_diffeo | examples/diffemorphism_2d_shepp_regularized.py | 1 | 3610 | import lie_group_diffeo as lgd
import odl
import numpy as np
action_type = 'geometric'
transform_type = 'affine'
space = odl.uniform_discr([-1, -1], [1, 1], [101, 101], interp='linear')
coord_space = odl.uniform_discr([-1, -1], [1, 1], [101, 101], interp='linear').tangent_bundle
# Select deformation type of the target
if transform_type == 'affine':
transform = odl.deform.LinDeformFixedDisp(
space.tangent_bundle.element([lambda x: x[0] * 0.1 + x[1] * 0.1,
lambda x: x[0] * 0.03 + x[1] * 0.1]) * 0.5)
elif transform_type == 'rotate':
theta = 0.2
transform = odl.deform.LinDeformFixedDisp(
space.tangent_bundle.element([lambda x: (np.cos(theta) - 1) * x[0] + np.sin(theta) * x[1],
lambda x: -np.sin(theta) * x[0] + (np.cos(theta) - 1) * x[1]]))
else:
assert False
# Create template and target
template = odl.phantom.shepp_logan(space, modified=True)
#template = odl.phantom.derenzo_sources(space)
target = transform(template)
# template, target = target, template
# Define data matching functional
data_matching = odl.solvers.L2Norm(space).translated(target)
lie_grp = lgd.Diff(space, coord_space=coord_space)
geometric_deform_action = lgd.GeometricDeformationAction(lie_grp, space)
scale_action = lgd.JacobianDeterminantScalingAction(lie_grp, space)
if action_type == 'mass_preserving':
deform_action = lgd.ComposedAction(geometric_deform_action, scale_action)
elif action_type == 'geometric':
deform_action = geometric_deform_action
else:
assert False
w = space.one()
grid = space.element(lambda x: np.cos(x[0] * np.pi * 5)**20 + np.cos(x[1] * np.pi * 5)**20)
# Create regularizing functional
regularizer = 1 * odl.solvers.KullbackLeibler(space, prior=w)
#regularizer = 2 * odl.solvers.L2NormSquared(space).translated(w)
# Create action
regularizer_action = lgd.JacobianDeterminantScalingAction(lie_grp, space)
# Initial guess
g = lie_grp.identity
# Combine action and functional into single object.
action = lgd.ProductSpaceAction(deform_action, regularizer_action, geometric_deform_action)
x = action.domain.element([template, w, grid]).copy()
f = odl.solvers.SeparableSum(data_matching, regularizer, odl.solvers.ZeroFunctional(space))
# Show some results, reuse the plot
template.show('template')
target.show('target')
# Create callback that displays the current iterate and prints the function
# value
callback = odl.solvers.CallbackShow('diffemorphic matching', step=20)
callback &= odl.solvers.CallbackPrint(f)
# Smoothing
filter_width = 1.0 # standard deviation of the Gaussian filter
ft = odl.trafos.FourierTransform(space)
c = filter_width ** 2 / 4.0 ** 2
gaussian = ft.range.element(lambda x: np.exp(-np.sqrt((x[0] ** 2 + x[1] ** 2) * c)))
convolution = ft.inverse * gaussian * ft
class AinvClass(odl.Operator):
def _call(self, x):
return [convolution(di) for di in x.data]
Ainv = AinvClass(domain=lie_grp.associated_algebra, range=lie_grp.associated_algebra, linear=True)
# Step length method
def steplen(itern):
# print(5e-2 / np.log(2 + itern))
return 1e-2 / np.log(10 + itern)
line_search = odl.solvers.PredefinedLineSearch(steplen)
# line_search = 3e-4
# Solve via gradient flow
result = lgd.gradient_flow_solver(x, f, g, action, Ainv=Ainv,
niter=2000, line_search=line_search,
callback=callback)
result.data.show('Resulting diffeo')
(result.data - lie_grp.identity.data).show('translations')
(result.data_inv - lie_grp.identity.data).show('translations inverse')
| gpl-3.0 |
fengyc/flasky | migrations/versions/cb9983b1a2b6_init.py | 1 | 1175 | """'init'
Revision ID: cb9983b1a2b6
Revises: None
Create Date: 2016-01-28 21:20:28.690752
"""
# revision identifiers, used by Alembic.
revision = 'cb9983b1a2b6'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
### end Alembic commands ###
| mit |
bramwalet/Subliminal.bundle | Contents/Libraries/Shared/requests/packages/urllib3/util/ssl_.py | 305 | 4235 | from binascii import hexlify, unhexlify
from hashlib import md5, sha1
from ..exceptions import SSLError
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
context.options |= OP_NO_COMPRESSION
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
| mit |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Twitter/FriendsAndFollowers/FriendshipsShow.py | 5 | 5218 | # -*- coding: utf-8 -*-
###############################################################################
#
# FriendshipsShow
# Returns detailed information about the relationship between two users.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FriendshipsShow(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FriendshipsShow Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FriendshipsShow, self).__init__(temboo_session, '/Library/Twitter/FriendsAndFollowers/FriendshipsShow')
def new_input_set(self):
return FriendshipsShowInputSet()
def _make_result_set(self, result, path):
return FriendshipsShowResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FriendshipsShowChoreographyExecution(session, exec_id, path)
class FriendshipsShowInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FriendshipsShow
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(FriendshipsShowInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(FriendshipsShowInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(FriendshipsShowInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(FriendshipsShowInputSet, self)._set_input('ConsumerSecret', value)
def set_SourceScreenName(self, value):
"""
Set the value of the SourceScreenName input for this Choreo. ((conditional, string) The screen_name of the subject user. Required unless specifying the SourceUserID instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('SourceScreenName', value)
def set_SourceUserID(self, value):
"""
Set the value of the SourceUserID input for this Choreo. ((conditional, string) The ID of the subject user. Required unless specifying the SourceScreenName instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('SourceUserID', value)
def set_TargetScreenName(self, value):
"""
Set the value of the TargetScreenName input for this Choreo. ((conditional, string) The screen_name of the target user. Required unless specifying the TargetUserID instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('TargetScreenName', value)
def set_TargetUserID(self, value):
"""
Set the value of the TargetUserID input for this Choreo. ((conditional, string) The ID of the target user. Required unless specifying the TargetScreenName instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('TargetUserID', value)
class FriendshipsShowResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FriendshipsShow Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
class FriendshipsShowChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FriendshipsShowResultSet(response, path)
| gpl-2.0 |
mcs07/mongodb-chemistry | mchem/screening.py | 1 | 3656 | # -*- coding: utf-8 -*-
"""
mchem.screening
~~~~~~~~~~~~~~~
Functions for analysing screening methods for chemical searches in MongoDB.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
from math import ceil
import numpy as np
from .similarity import similarity_search
log = logging.getLogger(__name__)
def screen(mol, fingerprinter, fp_collection, threshold=0.8, count_collection=None, reqbits=True, counts=True):
"""Return the number of molecules remaining after screening."""
qfp = fingerprinter.generate(mol)
qn = len(qfp) # Number of bits in query fingerprint
qmin = int(ceil(qn * threshold)) # Minimum number of bits in results fingerprints
qmax = int(qn / threshold) # Maximum number of bits in results fingerprints
ncommon = qn - qmin + 1 # Number of fingerprint bits in which at least one must be in common
query = {}
if reqbits:
if count_collection:
# Use the count_collection to specifically get the rarest required bits
reqbits = [count['_id'] for count in count_collection.find({'_id': {'$in': qfp}}).sort('count', 1).limit(ncommon)]
else:
# Just randomly choose the required bits
reqbits = qfp[:ncommon]
query['bits'] = {'$in': reqbits}
if counts:
query['count'] = {'$gte': qmin, '$lte': qmax}
remaining = fp_collection.find(query).count()
return remaining
def test_screening(mols, fingerprinter, fp_collection, result_collection, threshold=0.8, count_collection=None, reqbits=True, counts=True):
"""Test how different screening methods can constrain the molecule collection."""
log.info('Testing screening: fp: %s Threshold: %s' % (fingerprinter.name, threshold))
log.info('Methods: counts: %s reqbits: %s rarest: %s' % (counts, reqbits, count_collection is not None))
result = {
'fp': fingerprinter.name,
'threshold': threshold,
'remaining': [],
'total': fp_collection.count(),
'reqbits': reqbits,
'counts': counts,
'rarest': count_collection is not None
}
for i, qmol in enumerate(mols):
remain = screen(qmol, fingerprinter, fp_collection, threshold, count_collection, reqbits, counts)
log.debug('Query molecule %s of %s: %s remaining' % (i+1, len(mols), remain))
result['remaining'].append(remain)
result['median_remaining'] = np.median(result['remaining'])
result['mean_remaining'] = np.mean(result['remaining'])
result_collection.insert(result)
def test_ideal(mols, fingerprinter, fp_collection, result_collection, threshold=0.8, count_collection=None):
"""Run exact similarity search to find the ideal theoretical maximum screening ability."""
log.info('Testing ideal screening: fp: %s Threshold: %s' % (fingerprinter.name, threshold))
result = {
'fp': fingerprinter.name,
'threshold': threshold,
'remaining': [],
'total': fp_collection.count(),
'ideal': True
}
for i, qmol in enumerate(mols):
remain = len(similarity_search(qmol, fingerprinter, fp_collection, threshold, count_collection))
log.debug('Query molecule %s of %s: %s remaining' % (i+1, len(mols), remain))
result['remaining'].append(remain)
result['median_remaining'] = np.median(result['remaining'])
result['mean_remaining'] = np.mean(result['remaining'])
result_collection.insert(result)
| mit |
anthonydillon/horizon | openstack_dashboard/test/integration_tests/pages/project/compute/access_and_security/keypairspage.py | 49 | 2716 | # Copyright 2014 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class KeypairsPage(basepage.BaseNavigationPage):
_key_pairs_table_locator = (by.By.ID, 'keypairs')
KEY_PAIRS_TABLE_ACTIONS = ("create_key_pair", "import_key_pair",
"delete_key_pair")
KEY_PAIRS_TABLE_ROW_ACTION = "delete_key_pair"
KEY_PAIRS_TABLE_NAME_COLUMN_INDEX = 0
CREATE_KEY_PAIR_FORM_FIELDS = ('name',)
def __init__(self, driver, conf):
super(KeypairsPage, self).__init__(driver, conf)
self._page_title = "Access & Security"
def _get_row_with_keypair_name(self, name):
return self.keypairs_table.get_row(
self.KEY_PAIRS_TABLE_NAME_COLUMN_INDEX, name)
@property
def keypairs_table(self):
src_elem = self._get_element(*self._key_pairs_table_locator)
return tables.SimpleActionsTableRegion(self.driver, self.conf,
src_elem,
self.KEY_PAIRS_TABLE_ACTIONS,
self.KEY_PAIRS_TABLE_ROW_ACTION)
@property
def create_keypair_form(self):
return forms.FormRegion(self.driver, self.conf, None,
self.CREATE_KEY_PAIR_FORM_FIELDS)
@property
def delete_keypair_form(self):
return forms.BaseFormRegion(self.driver, self.conf, None)
def is_keypair_present(self, name):
return bool(self._get_row_with_keypair_name(name))
def create_keypair(self, keypair_name):
self.keypairs_table.create_key_pair.click()
self.create_keypair_form.name.text = keypair_name
self.create_keypair_form.submit.click()
def delete_keypair(self, name):
self._get_row_with_keypair_name(name).delete_key_pair.click()
self.delete_keypair_form.submit.click()
| apache-2.0 |
XiaominZhang/Impala | tests/comparison/funcs.py | 19 | 24787 | # Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from itertools import ifilter
from tests.comparison.common import ValExpr
from tests.comparison.types import (
Boolean,
Char,
DataType,
Decimal,
Float,
Int,
Number,
Timestamp,
TYPES)
AGG_FUNCS = list() # All aggregate functions will be added
ANALYTIC_FUNCS = list() # All analytic functions will be added
FUNCS = list() # All non-aggregate/analytic functions will be added
class Arg(object):
'''Represents an argument in a function signature.
data_type may be either a DataType or a list of DataTypes. A list is used to
represent a subquery.
If can_be_null is False, a NULL value should never be passed into the function
during execution. This is used to maintain consistency across databases. For example
if Impala and Postgresql both implement function foo but the results differ when
the args to foo are NULL, then this flag can be used to prevent NULL values.
If can_be_null_literal is False, the literal value NULL should never be an argument
to the function. This is provided to workaround problems involving function signature
resolution during execution. An alternative would be to CAST(NULL AS INT).
determines_signature is used to signify that this arg is used to determine the
signature during execution. This implies that the function has multiple signatures
with the same number of arguments and at least one of the "determines_signature"
arguments must be non-NULL in order to determine which signature to use during
execution. An example is "SELECT GREATEST(NULL, NULL)" would result in an error
during execution in Postgresql because the resulting data type could not be
determined. An alternative would be to ensure that each modeled function contains
the full set of possible signatures, then see if "foo(NULL)" would be ambiguous
and if so use "foo(CAST(NULL AS INT))" instead.
'''
def __init__(self,
data_type,
require_constant=False,
min_value=None,
can_be_null=True,
can_be_null_literal=True,
determines_signature=False):
self.type = data_type
self.require_constant = require_constant
self.min_value = min_value
self.can_be_null = can_be_null
self.can_be_null_literal = can_be_null_literal
self.determines_signature = determines_signature
@property
def is_subquery(self):
return isinstance(self.type, list)
def validate(self, expr, skip_nulls=False):
if not issubclass(expr.type, self.type):
raise Exception('Expr type is %s but expected %s' % (expr.type, self.type))
if self.require_constant and not expr.is_constant:
raise Exception('A constant is required')
if self.min_value is not None and expr.val < self.min_value:
raise Exception('Minumum value not met')
if skip_nulls and expr.is_constant and expr.val is None:
return
if expr.is_constant and expr.val is None and not self.can_be_null_literal:
raise Exception('A NULL literal is not allowed')
def __repr__(self):
_repr = 'Arg<type: '
if self.is_subquery:
_repr += 'subquery[' + ', '.join([type_.__name__ for type_ in self.type]) + ']'
else:
_repr += self.type.__name__
if self.require_constant:
_repr += ', constant: True'
if self.min_value:
_repr += ', min: %s' % self.min_value
_repr += '>'
return _repr
class Signature(object):
def __init__(self, func, return_type, *args):
self.func = func
self.return_type = return_type
self.args = list(args)
@property
def input_types(self):
return self.args[1:]
class Func(ValExpr):
'''Base class for functions'''
_NAME = None # Helper for the classmethod name()
_SIGNATURES = list() # Helper for the classmethod signatures()
@classmethod
def name(cls):
'''Returns the name of the function. Multiple functions may have the same name.
For example, COUNT will have a separate Func class for the analytic and aggregate
versions but both will have the same value of name().
'''
return cls.__name__ if cls._NAME is None else cls._NAME
@classmethod
def signatures(cls):
'''Returns the available signatures for the function. Varargs are not supported, a
subset of possible signatures must be chosen.
'''
return cls._SIGNATURES
@classmethod
def create_from_args(cls, *val_exprs):
'''Constructor for instantiating from values. The return types of the exprs will be
inspected and used to find the function signature. If no signature can be found
an error will be raised.
'''
for signature in cls.signatures():
if len(signature.args) != len(val_exprs):
continue
for idx, arg in enumerate(val_exprs):
if not issubclass(arg.type, signature.args[idx].type):
break
else:
break
else:
raise Exception('No signature matches the given arguments: %s' % (val_exprs, ))
return cls(signature, *val_exprs)
def __init__(self, signature, *val_exprs):
'''"signature" should be one of the available signatures at the class level and
signifies which function call this instance is intended to represent.
'''
if signature not in self.signatures():
raise Exception('Unknown signature: %s' % (signature, ))
self.signature = signature
if val_exprs:
self.args = list(val_exprs)
else:
self.args = list()
for arg in signature.args:
if arg.is_subquery:
self.args.append([subtype(None) for subtype in arg.type])
else:
self.args.append(arg.type(arg.min_value))
@property
def exact_type(self):
return self.signature.return_type
def validate(self, skip_nulls=False):
if not len(self.args) == len(self.signature.args):
raise Exception('Signature length mismatch')
for idx, signature_arg in enumerate(self.signature.args):
signature_arg.validate(self.args[idx], skip_nulls=skip_nulls)
def contains_subquery(self):
for signature_arg in self.signature.args:
if signature_arg.is_subquery:
return True
return any(self.iter_exprs(lambda expr: expr.is_func and expr.contains_subquery))
def iter_exprs(self, filter=None):
'''Returns an iterator over all val_exprs including those nested within this
function's args.
'''
for arg in self.args:
if not isinstance(arg, ValExpr):
continue
if not filter or filter(arg):
yield arg
for expr in arg.iter_exprs(filter=filter):
yield expr
def __hash__(self):
return hash(type(self)) + hash(self.signature) + hash(tuple(self.args))
def __eq__(self, other):
if self is other:
return True
if not type(other) == type(self):
return False
return self.signature == other.signature and self.args == other.args
class AggFunc(Func):
def __init__(self, *args):
Func.__init__(self, *args)
self.distinct = False
def validate(self, skip_nulls=False):
super(AggFunc, self).validate(skip_nulls=skip_nulls)
for arg in self.args:
if arg.contains_agg:
raise Exception('Aggregate functions may not contain other aggregates')
if self.contains_analytic:
raise Exception('Aggregate functions may not contain analytics')
class AnalyticFunc(Func):
HAS_IMPLICIT_WINDOW = False
SUPPORTS_WINDOWING = True
REQUIRES_ORDER_BY = False
def __init__(self, *args):
Func.__init__(self, *args)
self.partition_by_clause = None
self.order_by_clause = None
self.window_clause = None
def validate(self, skip_nulls=False):
super(AnalyticFunc, self).validate(skip_nulls=skip_nulls)
for arg in self.args:
if arg.contains_analytic:
raise Exception('Analytic functions may not contain other analytics')
class PartitionByClause(object):
def __init__(self, val_exprs):
self.val_exprs = val_exprs
class WindowClause(object):
def __init__(self, range_or_rows, start_boundary, end_boundary=None):
self.range_or_rows = range_or_rows
self.start_boundary = start_boundary
self.end_boundary = end_boundary
class WindowBoundary(object):
UNBOUNDED_PRECEDING = 'UNBOUNDED PRECEDING'
PRECEDING = 'PRECEDING'
CURRENT_ROW = 'CURRENT ROW'
FOLLOWING = 'FOLLOWING'
UNBOUNDED_FOLLOWING = 'UNBOUNDED FOLLOWING'
def __init__(self, boundary_type, val_expr=None):
self.boundary_type = boundary_type
self.val_expr = val_expr
# It's a lot of work to support this but it should be less error prone than explicitly
# listing each signature.
def create_func(name, returns=None, accepts=[], signatures=[], base_type=Func):
'''Convenience function for creating a function class. The class is put into the
global namespace just as though the class had been declared using the "class"
keyword.
The name of the class is "name". "base_type" can be used to specify the base class.
The signature(s) of the class can be defined in one of three ways. "returns" and
"accepts" can be used together but not in combination with "signatures".
1) "signatures" should be a list of lists. Each entry corresponds to a single
signature. Each item in the signature can be either an Arg or a DataType or
a list of the preceding two types. The first entry in the list is the return
type, the remainder are the input types. DataType is considered a placeholder
for all other base types (Char, Number, Boolean, Timestamp). If a signature
contains DataType, the entire signature will be replace with multiple
signatures, one for each base type. Number is also considered a placeholder
but the replacements will be the cross-product of (Int, Float, and Decimal) *
the number of Number's used, except that the return type is the maximum of
the input types. A function that accepts a subquery is represented by a list of
Arg or DataType.
Ex signatures:
[Int, Double]: Could be a signature for FLOOR
[Int, DataType]: Could be a signature for COUNT
=== [Int, Char] + [Int, Number] + [Int, Boolean] + ...
[Number, Number, Number]: Could be a signature for Multiply
=== ... + [Float, Int, Float] + ... (but not [Int, Float, Float])
[Boolean, DataType, [DataType]]: Could be a signature for In with a subquery
2) "returns" and "accepts" is equivalent to
signatures=[[returns, accepts[0], accepts[1], ..., accepts[n]]]
3) "accepts" is equivalent to
signatures=[[accepts[0], accepts[0], accepts[1], ..., accepts[n]]]
'''
if (returns or accepts) and signatures:
raise Exception('Cannot mix signature specification arguments')
type_name = base_type.__name__.replace('Func', '') + name
func = type(type_name, (base_type, ), {'_NAME': name, '_SIGNATURES': []})
globals()[type_name] = func
if signatures:
signatures = deepcopy(signatures)
if base_type == Func:
FUNCS.append(func)
if returns:
signatures = [Signature(func, returns)]
elif accepts:
signatures = [Signature(func, accepts[0])]
if accepts:
signatures[0].args.extend(accepts)
# Replace convenience inputs with proper types
for idx, signature in enumerate(signatures):
if not isinstance(signature, Signature):
signature = Signature(func, signature[0], *signature[1:])
signatures[idx] = signature
if isinstance(signature.return_type, Arg):
signature.return_type = signature.return_type.type
for arg_idx, arg in enumerate(signature.args):
if not isinstance(arg, Arg):
signature.args[arg_idx] = Arg(arg)
# Replace "DataType" args with actual types
non_wildcard_signatures = list()
for replacement_type in TYPES:
for signature_idx, signature in enumerate(signatures):
replacement_signature = None
for arg_idx, arg in enumerate(signature.args):
if arg.is_subquery:
for sub_idx, subtype in enumerate(arg.type):
if subtype == DataType:
if not replacement_signature:
replacement_signature = deepcopy(signature)
replacement_signature.args[arg_idx].type[sub_idx] = replacement_type
elif arg.type == DataType:
replacement_arg = deepcopy(arg)
replacement_arg.type = replacement_type
if not replacement_signature:
replacement_signature = deepcopy(signature)
replacement_signature.args[arg_idx] = replacement_arg
if signature.return_type == DataType:
if not replacement_signature:
raise Exception('Wildcard return type requires at least one wildcard input arg')
replacement_signature.return_type = replacement_type
if replacement_signature:
non_wildcard_signatures.append(replacement_signature)
else:
non_wildcard_signatures.append(signature)
# This signature did not contain any "DataType" args, remove it from the list
# so it isn't processed again.
del signatures[signature_idx]
# Replace "Number" args... Number wildcards work differently than DataType wildcards.
# foo(DataType, DataType) expands to foo(Boolean, Boolean), foo(Char, Char), etc
# but foo(Number, Number) expands to foo(Decimal, Decimal), foo(Decimal, Int), etc
# In other words, a cross product needs to be done for Number wildcards. If the return
# type is also "Number", then it will be replaced with the largest type of the input
# replacements. Ex, foo(Decimal, Int) would return Decimal.
# Find wildcard signatures
signatures = non_wildcard_signatures
wildcard_signatures = list()
for signature_idx, signature in enumerate(signatures):
is_wildcard = False
for arg_idx, arg in enumerate(signature.args):
if arg.is_subquery:
for subtype in arg.type:
if subtype == Number:
is_wildcard = True
break
elif arg.type == Number:
is_wildcard = True
if is_wildcard:
if signature.return_type == Number:
signature.return_type = (Number, Int)
wildcard_signatures.append(signature)
del signatures[signature_idx]
break
# Helper function to reduce code duplication
def update_return_type_and_append(
replacement_type,
replacement_signature,
wildcard_signatures):
if isinstance(replacement_signature.return_type, tuple):
replacement_signature.return_type = \
(Number, max(replacement_type, replacement_signature.return_type[1]))
wildcard_signatures.append(replacement_signature)
# Fully replace each wildcard one at a time so that a cross product is created
while wildcard_signatures:
signature = wildcard_signatures.pop()
is_wildcard = False
for arg_idx, arg in enumerate(signature.args):
replacement_signature = None
if arg.is_subquery:
if any(ifilter(lambda type_: type_ == Number, arg.type)):
raise Exception('Number not accepted in subquery signatures')
elif arg.type == Number:
for replacement_type in [Decimal, Int, Float]:
replacement_signature = deepcopy(signature)
replacement_signature.args[arg_idx].type = replacement_type
is_wildcard = True
update_return_type_and_append(
replacement_type, replacement_signature, wildcard_signatures)
if is_wildcard:
break
if not is_wildcard:
if isinstance(signature.return_type, tuple):
signature.return_type = signature.return_type[1]
signatures.append(signature)
func._SIGNATURES = signatures
return func
def create_agg(name, returns=None, accepts=[], signatures=[]):
func = create_func(name, returns, accepts, signatures, AggFunc)
AGG_FUNCS.append(func)
return func
def create_analytic(
name,
returns=None,
accepts=[],
signatures=[],
require_order=False,
supports_window=True):
func = create_func(name, returns, accepts, signatures, AnalyticFunc)
func.REQUIRES_ORDER_BY = require_order
func.SUPPORTS_WINDOWING = supports_window
ANALYTIC_FUNCS.append(func)
return func
create_func('IsNull', returns=Boolean, accepts=[DataType])
create_func('IsNotNull', returns=Boolean, accepts=[DataType])
create_func('And', returns=Boolean, accepts=[Boolean, Boolean])
create_func('Or', returns=Boolean, accepts=[Boolean, Boolean])
create_func('Exists', returns=Boolean, accepts=[[DataType]])
create_func('NotExists', returns=Boolean, accepts=[[DataType]])
for func_name in ['In', 'NotIn']:
# Avoid equality comparison on FLOATs
create_func(func_name, signatures=[
[Boolean, Boolean, [Boolean]],
[Boolean, Boolean, Boolean, Boolean],
[Boolean, Char, [Char]],
[Boolean, Char, Char, Char],
[Boolean, Decimal, [Decimal]],
[Boolean, Decimal, [Int]],
[Boolean, Decimal, Decimal, Decimal],
[Boolean, Decimal, Decimal, Int],
[Boolean, Decimal, Int, Decimal],
[Boolean, Int, [Decimal]],
[Boolean, Int, [Int]],
[Boolean, Int, Int, Int],
[Boolean, Int, Decimal, Int],
[Boolean, Int, Int, Decimal],
[Boolean, Timestamp, [Timestamp]],
[Boolean, Timestamp, Timestamp, Timestamp]])
for comparator in ['GreaterThan', 'LessThan']:
create_func(comparator, signatures=[
[Boolean, Number, Number],
[Boolean, Timestamp, Timestamp]])
for comparator in ['GreaterThanOrEquals', 'LessThanOrEquals']:
# Avoid equality comparison on FLOATs
create_func(comparator, signatures=[
[Boolean, Decimal, Decimal],
[Boolean, Decimal, Int],
[Boolean, Int, Decimal],
[Boolean, Int, Int],
[Boolean, Timestamp, Timestamp]])
for comparator in ['Equals', 'NotEquals']:
# Avoid equality comparison on FLOATs
create_func(comparator, signatures=[
[Boolean, Boolean, Boolean],
[Boolean, Char, Char],
[Boolean, Decimal, Decimal],
[Boolean, Decimal, Int],
[Boolean, Int, Decimal],
[Boolean, Int, Int],
[Boolean, Timestamp, Timestamp]])
create_func('If', returns=DataType,
accepts=[Boolean, Arg(DataType, determines_signature=True), DataType])
# Don't allow + or - when using floats/doubles. This is done to avoid something like
# (10000.00919 - 10000) * 10000 which would lead to random values.
for operator in ['Plus', 'Minus']:
create_func(operator, signatures=[
[Decimal,
Arg(Decimal, determines_signature=True),
Arg(Decimal, determines_signature=True)],
[Decimal,
Arg(Decimal, determines_signature=True),
Arg(Int, determines_signature=True)],
[Decimal,
Arg(Int, determines_signature=True),
Arg(Decimal, determines_signature=True)],
[Int,
Arg(Int, determines_signature=True),
Arg(Int, determines_signature=True)]])
create_func('Multiply', signatures=[
[Number,
Arg(Number, determines_signature=True),
Arg(Number, determines_signature=True)]])
# Don't allow INT / INT, Postgresql results in an INT, but a FLOAT in most other databases
create_func('Divide', signatures=[
[Decimal,
Arg(Decimal, determines_signature=True),
Arg(Decimal, determines_signature=True)],
[Decimal,
Arg(Decimal, determines_signature=True),
Arg(Int, determines_signature=True)],
[Decimal,
Arg(Int, determines_signature=True),
Arg(Decimal, determines_signature=True)],
[Float,
Arg(Decimal, determines_signature=True),
Arg(Float, determines_signature=True)],
[Float,
Arg(Float, determines_signature=True),
Arg(Decimal, determines_signature=True)],
[Float,
Arg(Float, determines_signature=True),
Arg(Float, determines_signature=True)],
[Float,
Arg(Float, determines_signature=True),
Arg(Int, determines_signature=True)],
[Float,
Arg(Int, determines_signature=True),
Arg(Float, determines_signature=True)]])
create_func('Abs', signatures=[[Number, Arg(Number, determines_signature=True)]])
# Don't allow FLOAT/DOUBLE to become an INT (ie, an approximation to be treated as a
# precise value).
create_func('Floor', signatures=[[Decimal, Decimal], [Float, Float]])
create_func('Ceil', signatures=[[Decimal, Decimal], [Float, Float]])
# NULL handling in CONCAT differs between Impala and Postgresql
create_func('Concat',
accepts=[Arg(Char, can_be_null=False), Arg(Char, can_be_null=False)])
create_func('Trim', accepts=[Char])
create_func('Length', returns=Int, accepts=[Char])
for interval in ['Year', 'Month', 'Day', 'Hour', 'Minute', 'Second']:
create_func('Extract' + interval,
returns=Int, accepts=[Arg(Timestamp, can_be_null_literal=False)])
create_func(
'DateAdd' + interval,
returns=Timestamp,
# Determines signature in Postgresql
accepts=[Arg(Timestamp, determines_signature=True), Int])
create_func('Greatest', signatures=[
[Number,
Arg(Number, can_be_null=False, determines_signature=True),
Arg(Number, can_be_null=False, determines_signature=True)],
[Timestamp,
Arg(Timestamp, can_be_null=False, determines_signature=True),
Arg(Timestamp, can_be_null=False, determines_signature=True)]])
create_func('Least', signatures=[
[Number,
Arg(Number, can_be_null=False, determines_signature=True),
Arg(Number, can_be_null=False, determines_signature=True)],
[Timestamp,
Arg(Timestamp, can_be_null=False, determines_signature=True),
Arg(Timestamp, can_be_null=False, determines_signature=True)]])
create_func('Coalesce', signatures=[
[DataType,
Arg(DataType, determines_signature=True),
Arg(DataType, determines_signature=True)],
[DataType,
Arg(DataType, determines_signature=True),
Arg(DataType, determines_signature=True),
Arg(DataType, determines_signature=True)]])
# This is added so that query generation can assume that any return type can be
# produced by an aggregate or analytic with only one level of nesting.
# Ex: CAST(SUM(...) AS STRING)
create_func('CastAsChar', signatures=[[Char, Int]])
create_agg('Count', returns=Int, accepts=[Number])
create_agg('Max', signatures=[
[Number, Arg(Number, determines_signature=True)],
[Timestamp, Arg(Timestamp, determines_signature=True)]])
create_agg('Min', signatures=[
[Number, Arg(Number, determines_signature=True)],
[Timestamp, Arg(Timestamp, determines_signature=True)]])
create_agg('Sum', signatures=[
# FLOATs not allowed. See comment about Plus/Minus for info.
[Int, Arg(Int, determines_signature=True)],
[Decimal, Arg(Decimal, determines_signature=True)]])
create_agg('Avg', signatures=[
[Float, Arg(Int, determines_signature=True)],
[Decimal, Arg(Decimal, determines_signature=True)]])
create_analytic('Rank', require_order=True, supports_window=False, returns=Int)
create_analytic('DenseRank', require_order=True, supports_window=False, returns=Int)
create_analytic('RowNumber', require_order=True, supports_window=False, returns=Int)
create_analytic('Lead', require_order=True, supports_window=False, signatures=[
[DataType, Arg(DataType, determines_signature=True)],
[DataType,
Arg(DataType, determines_signature=True),
Arg(Int, require_constant=True, min_value=1)]])
create_analytic('Lag', require_order=True, supports_window=False, signatures=[
[DataType, Arg(DataType, determines_signature=True)],
[DataType,
Arg(DataType, determines_signature=True),
Arg(Int, require_constant=True, min_value=1)]])
create_analytic('FirstValue', require_order=True, signatures=[
[DataType, Arg(DataType, determines_signature=True)]])
create_analytic('LastValue', require_order=True, signatures=[
[DataType, Arg(DataType, determines_signature=True)]])
create_analytic('Max', signatures=[
[Number, Arg(Number, determines_signature=True)],
[Timestamp, Arg(Timestamp, determines_signature=True)]])
create_analytic('Min', signatures=[[Number, Number], [Timestamp, Timestamp]])
create_analytic('Sum', signatures=[[Int, Int], [Decimal, Decimal]]) # FLOATs not allowed
create_analytic('Count', returns=Int, accepts=[Number])
create_analytic('Avg', returns=Float, accepts=[Number])
| apache-2.0 |
davidevinavil/kernel_s500_cm10 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
vicnet/weboob | weboob/tools/config/dbmconfig.py | 2 | 2248 | # -*- coding: utf-8 -*-
# Copyright(C) 2016-2019 Edouard Lefebvre du Prey, Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import yaml
from .iconfig import ConfigError, IConfig
from .yamlconfig import WeboobDumper
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
try:
import anydbm as dbm
except ImportError:
import dbm
__all__ = ['DBMConfig']
class DBMConfig(IConfig):
def __init__(self, path):
self.path = path
def load(self, default={}):
self.storage = dbm.open(self.path, 'c')
def save(self):
if hasattr(self.storage, 'sync'):
self.storage.sync()
def get(self, *args, **kwargs):
key = '.'.join(args)
try:
value = self.storage[key]
value = yaml.load(value, Loader=Loader)
except KeyError:
if 'default' in kwargs:
value = kwargs.get('default')
else:
raise ConfigError()
except TypeError:
raise ConfigError()
return value
def set(self, *args):
key = '.'.join(args[:-1])
value = args[-1]
try:
self.storage[key] = yaml.dump(value, None, Dumper=WeboobDumper, default_flow_style=False)
except KeyError:
raise ConfigError()
except TypeError:
raise ConfigError()
def delete(self, *args):
key = '.'.join(args)
try:
del self.storage[key]
except KeyError:
raise ConfigError()
except TypeError:
raise ConfigError()
| lgpl-3.0 |
Ikke/ktane | modules/memory.py | 1 | 2079 | from window import Window
import curses
import logging
class Memory(Window):
def new(self):
self.stages = []
self.pos = 0
self.part = "pos"
self.title = "Memory"
def _event(self, ev, c):
if c == "p":
self.part = "pos"
elif c == "l":
self.part = "lbl"
elif c in ['1', '2', '3', '4']:
if len(self.stages) == self.pos:
self.stages.append(Stage(**{self.part: c}))
else:
setattr(self.stages[self.pos], self.part, c)
self.part = "pos" if self.part == "lbl" else "lbl"
elif ev in [curses.KEY_ENTER, 10]:
self.pos = (self.pos +1) % 5
self.part == "pos"
elif c == "r":
self.new()
def _update(self, win):
win.erase()
win.addstr("Memory\n", curses.A_BOLD)
win.addstr(table)
instruction = instructions[self.pos]
for nr, line in enumerate(instruction.split("\n")):
win.addstr(2 + nr, 14, line)
for index, stage in enumerate(self.stages):
logging.debug("{} {!s}".format(index, stage.pos))
win.addstr(5 + index * 2, 4, stage.pos)
win.addstr(5 + index * 2, 8, stage.lbl)
if self.part == "pos":
win.addstr(3,4, "P", curses.A_BOLD)
if self.part == "lbl":
win.addstr(3,8, "L", curses.A_BOLD)
win.addstr(5 + self.pos * 2, 0, str(self.pos + 1), curses.A_BOLD)
class Stage():
def __init__(self, pos="", lbl=""):
self.pos = pos
self.lbl = lbl
instructions = [
"""
1 -> 2nd
2 -> 2nd
3 -> 3rd
4 -> 4rth""",
"""
1 -> lbl 4
2 -> pos stg 1
3 -> 1st
4 -> pos stg 1""",
"""
1 -> lbl stg 2
2 -> lbl stg 1
3 -> 3rd
4 -> lbl 4""",
"""
1 -> pos stg 1
2 -> 1st
3 -> pos stg 2
4 -> pos stg 2""",
"""
1 -> lbl stg 1
2 -> lbl stg 2
3 -> lbl stg 4
4 -> lbl stg 3"""
]
table = """
+---+---+
| P | L |
+---+---+
1 | | |
+---+---+
2 | | |
+---+---+
3 | | |
+---+---+
4 | | |
+---+---+"""
| mit |
HunterConnelly/firecares | firecares/firestation/migrations/0023_auto_20160322_1340.py | 3 | 1464 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0022_firedepartment_twitter_handle'),
]
operations = [
migrations.RenameField(
model_name='staffing',
old_name='firefighter',
new_name='personnel',
),
migrations.RemoveField(
model_name='staffing',
name='chief_officer',
),
migrations.RemoveField(
model_name='staffing',
name='ems_emt',
),
migrations.RemoveField(
model_name='staffing',
name='ems_paramedic',
),
migrations.RemoveField(
model_name='staffing',
name='ems_supervisor',
),
migrations.RemoveField(
model_name='staffing',
name='firefighter_emt',
),
migrations.RemoveField(
model_name='staffing',
name='firefighter_paramedic',
),
migrations.RemoveField(
model_name='staffing',
name='officer',
),
migrations.RemoveField(
model_name='staffing',
name='officer_paramedic',
),
migrations.AddField(
model_name='staffing',
name='als',
field=models.BooleanField(default=False),
),
]
| mit |
cberry777/dd-agent | utils/service_discovery/config_stores.py | 6 | 2051 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from utils.service_discovery.abstract_config_store import AbstractConfigStore
from utils.service_discovery.abstract_config_store import CONFIG_FROM_AUTOCONF, CONFIG_FROM_FILE, CONFIG_FROM_TEMPLATE, TRACE_CONFIG # noqa imported somewhere else
from utils.service_discovery.etcd_config_store import EtcdStore
from utils.service_discovery.consul_config_store import ConsulStore
SD_CONFIG_BACKENDS = ['etcd', 'consul'] # noqa: used somewhere else
SD_TEMPLATE_DIR = '/datadog/check_configs'
def get_config_store(agentConfig):
if agentConfig.get('sd_config_backend') == 'etcd':
return EtcdStore(agentConfig)
elif agentConfig.get('sd_config_backend') == 'consul':
return ConsulStore(agentConfig)
else:
return StubStore(agentConfig)
def extract_sd_config(config):
"""Extract configuration about service discovery for the agent"""
sd_config = {}
if config.has_option('Main', 'sd_config_backend'):
sd_config['sd_config_backend'] = config.get('Main', 'sd_config_backend')
else:
sd_config['sd_config_backend'] = None
if config.has_option('Main', 'sd_template_dir'):
sd_config['sd_template_dir'] = config.get(
'Main', 'sd_template_dir')
else:
sd_config['sd_template_dir'] = SD_TEMPLATE_DIR
if config.has_option('Main', 'sd_backend_host'):
sd_config['sd_backend_host'] = config.get(
'Main', 'sd_backend_host')
if config.has_option('Main', 'sd_backend_port'):
sd_config['sd_backend_port'] = config.get(
'Main', 'sd_backend_port')
return sd_config
class StubStore(AbstractConfigStore):
"""Used when no valid config store was found. Allow to use auto_config."""
def _extract_settings(self, config):
pass
def get_client(self):
pass
def crawl_config_template(self):
# There is no user provided templates in auto_config mode
return False
| bsd-3-clause |
ramondelafuente/ansible | lib/ansible/cli/doc.py | 7 | 12586 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader, action_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.print_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
# is there corresponding action plugin?
if module in action_loader:
doc['action'] = True
else:
doc['action'] = False
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = ''.join([" " for a in xrange(pad)])
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
if 'action' in doc and doc['action']:
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
desc = " ".join(opt['description'])
else:
desc = opt['description']
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt or not required:
default = str(opt.get('default', '(null)'))
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), limit-6, initial_indent=" ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| gpl-3.0 |
nghia-huynh/gem5-stable | src/python/m5/debug.py | 51 | 3475 | # Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from UserDict import DictMixin
import internal
from internal.debug import SimpleFlag, CompoundFlag
from internal.debug import schedBreak, setRemoteGDBPort
from m5.util import printList
def help():
print "Base Flags:"
for name in sorted(flags):
if name == 'All':
continue
flag = flags[name]
children = [c for c in flag.kids() ]
if not children:
print " %s: %s" % (name, flag.desc())
print
print "Compound Flags:"
for name in sorted(flags):
if name == 'All':
continue
flag = flags[name]
children = [c for c in flag.kids() ]
if children:
print " %s: %s" % (name, flag.desc())
printList([ c.name() for c in children ], indent=8)
print
class AllFlags(DictMixin):
def __init__(self):
self._version = -1
self._dict = {}
def _update(self):
current_version = internal.debug.getAllFlagsVersion()
if self._version == current_version:
return
self._dict.clear()
for flag in internal.debug.getAllFlags():
self._dict[flag.name()] = flag
self._version = current_version
def __contains__(self, item):
self._update()
return item in self._dict
def __getitem__(self, item):
self._update()
return self._dict[item]
def keys(self):
self._update()
return self._dict.keys()
def values(self):
self._update()
return self._dict.values()
def items(self):
self._update()
return self._dict.items()
def iterkeys(self):
self._update()
return self._dict.iterkeys()
def itervalues(self):
self._update()
return self._dict.itervalues()
def iteritems(self):
self._update()
return self._dict.iteritems()
flags = AllFlags()
| bsd-3-clause |
XiaodunServerGroup/ddyedx | common/djangoapps/course_groups/views.py | 12 | 6668 | from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import json
import logging
import re
from courseware.courses import get_course_with_access
from edxmako.shortcuts import render_to_response
from . import cohorts
log = logging.getLogger(__name__)
def json_http_response(data):
"""
Return an HttpResponse with the data json-serialized and the right content
type header.
"""
return HttpResponse(json.dumps(data), content_type="application/json")
def split_by_comma_and_whitespace(s):
"""
Split a string both by commas and whitespice. Returns a list.
"""
return re.split(r'[\s,]+', s)
@ensure_csrf_cookie
def list_cohorts(request, course_id):
"""
Return json dump of dict:
{'success': True,
'cohorts': [{'name': name, 'id': id}, ...]}
"""
get_course_with_access(request.user, course_id, 'staff')
all_cohorts = [{'name': c.name, 'id': c.id}
for c in cohorts.get_course_cohorts(course_id)]
return json_http_response({'success': True,
'cohorts': all_cohorts})
@ensure_csrf_cookie
@require_POST
def add_cohort(request, course_id):
"""
Return json of dict:
{'success': True,
'cohort': {'id': id,
'name': name}}
or
{'success': False,
'msg': error_msg} if there's an error
"""
get_course_with_access(request.user, course_id, 'staff')
name = request.POST.get("name")
if not name:
return json_http_response({'success': False,
'msg': "No name specified"})
try:
cohort = cohorts.add_cohort(course_id, name)
except ValueError as err:
return json_http_response({'success': False,
'msg': str(err)})
return json_http_response({'success': 'True',
'cohort': {
'id': cohort.id,
'name': cohort.name
}})
@ensure_csrf_cookie
def users_in_cohort(request, course_id, cohort_id):
"""
Return users in the cohort. Show up to 100 per page, and page
using the 'page' GET attribute in the call. Format:
Returns:
Json dump of dictionary in the following format:
{'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': [{'username': ..., 'email': ..., 'name': ...}]
}
"""
get_course_with_access(request.user, course_id, 'staff')
# this will error if called with a non-int cohort_id. That's ok--it
# shoudn't happen for valid clients.
cohort = cohorts.get_cohort_by_id(course_id, int(cohort_id))
paginator = Paginator(cohort.users.all(), 100)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
# return the first page
page = 1
users = paginator.page(page)
except EmptyPage:
# Page is out of range. Return last page
page = paginator.num_pages
contacts = paginator.page(page)
user_info = [{'username': u.username,
'email': u.email,
'name': '{0} {1}'.format(u.first_name, u.last_name)}
for u in users]
return json_http_response({'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': user_info})
@ensure_csrf_cookie
@require_POST
def add_users_to_cohort(request, course_id, cohort_id):
"""
Return json dict of:
{'success': True,
'added': [{'username': username,
'name': name,
'email': email}, ...],
'conflict': [{'username_or_email': ...,
'msg': ...}], # in another cohort
'present': [str1, str2, ...], # already there
'unknown': [str1, str2, ...]}
"""
get_course_with_access(request.user, course_id, 'staff')
cohort = cohorts.get_cohort_by_id(course_id, cohort_id)
users = request.POST.get('users', '')
added = []
present = []
conflict = []
unknown = []
for username_or_email in split_by_comma_and_whitespace(users):
try:
user = cohorts.add_user_to_cohort(cohort, username_or_email)
added.append({'username': user.username,
'name': "{0} {1}".format(user.first_name, user.last_name),
'email': user.email,
})
except ValueError:
present.append(username_or_email)
except User.DoesNotExist:
unknown.append(username_or_email)
except cohorts.CohortConflict as err:
conflict.append({'username_or_email': username_or_email,
'msg': str(err)})
return json_http_response({'success': True,
'added': added,
'present': present,
'conflict': conflict,
'unknown': unknown})
@ensure_csrf_cookie
@require_POST
def remove_user_from_cohort(request, course_id, cohort_id):
"""
Expects 'username': username in POST data.
Return json dict of:
{'success': True} or
{'success': False,
'msg': error_msg}
"""
get_course_with_access(request.user, course_id, 'staff')
username = request.POST.get('username')
if username is None:
return json_http_response({'success': False,
'msg': 'No username specified'})
cohort = cohorts.get_cohort_by_id(course_id, cohort_id)
try:
user = User.objects.get(username=username)
cohort.users.remove(user)
return json_http_response({'success': True})
except User.DoesNotExist:
log.debug('no user')
return json_http_response({'success': False,
'msg': "No user '{0}'".format(username)})
def debug_cohort_mgmt(request, course_id):
"""
Debugging view for dev.
"""
# add staff check to make sure it's safe if it's accidentally deployed.
get_course_with_access(request.user, course_id, 'staff')
context = {'cohorts_ajax_url': reverse('cohorts',
kwargs={'course_id': course_id})}
return render_to_response('/course_groups/debug.html', context)
| agpl-3.0 |
mcaleavya/bcc-scripts | statsnoop.py | 1 | 4709 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# statsnoop Trace stat() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: statsnoop [-h] [-t] [-x] [-p PID]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 08-Feb-2016 Brendan Gregg Created this.
# 17-Feb-2016 Allan McAleavy updated for BPF_PERF_OUTPUT
from __future__ import print_function
from bcc import BPF
import argparse
import ctypes as ct
# arguments
examples = """examples:
./statsnoop # trace all stat() syscalls
./statsnoop -t # include timestamps
./statsnoop -x # only show failed stats
./statsnoop -p 181 # only trace PID 181
"""
parser = argparse.ArgumentParser(
description="Trace stat() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--failed", action="store_true",
help="only show failed stats")
parser.add_argument("-p", "--pid",
help="trace this PID only")
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/limits.h>
#include <linux/sched.h>
struct val_t {
u32 pid;
u64 ts;
char comm[TASK_COMM_LEN];
const char *fname;
};
struct data_t {
u32 pid;
u64 ts;
u64 delta;
int ret;
char comm[TASK_COMM_LEN];
char fname[NAME_MAX];
};
BPF_HASH(args_filename, u32, const char *);
BPF_HASH(infotmp, u32,struct val_t);
BPF_PERF_OUTPUT(events);
int trace_entry(struct pt_regs *ctx, const char __user *filename)
{
struct val_t val = {};
u32 pid = bpf_get_current_pid_tgid();
FILTER
if (bpf_get_current_comm(&val.comm, sizeof(val.comm)) == 0) {
val.pid = bpf_get_current_pid_tgid();
val.ts = bpf_ktime_get_ns();
val.fname = filename;
infotmp.update(&pid, &val);
}
return 0;
};
int trace_return(struct pt_regs *ctx)
{
u32 pid = bpf_get_current_pid_tgid();
struct val_t *valp;
struct data_t data = {};
u64 tsp = bpf_ktime_get_ns();
valp = infotmp.lookup(&pid);
if (valp == 0) {
// missed entry
return 0;
}
bpf_probe_read(&data.comm,sizeof(data.comm), valp->comm);
bpf_probe_read(&data.fname,sizeof(data.fname),(void *)valp->fname);
data.pid = valp->pid;
data.delta = tsp - valp->ts;
data.ts = tsp /1000;
data.ret = ctx->ax;
events.perf_submit(ctx,&data,sizeof(data));
infotmp.delete(&pid);
args_filename.delete(&pid);
return 0;
}
"""
if args.pid:
bpf_text = bpf_text.replace('FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('FILTER', '')
if debug:
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="sys_stat", fn_name="trace_entry")
b.attach_kprobe(event="sys_statfs", fn_name="trace_entry")
b.attach_kprobe(event="sys_newstat", fn_name="trace_entry")
b.attach_kretprobe(event="sys_stat", fn_name="trace_return")
b.attach_kretprobe(event="sys_statfs", fn_name="trace_return")
b.attach_kretprobe(event="sys_newstat", fn_name="trace_return")
TASK_COMM_LEN = 16 # linux/sched.h
NAME_MAX = 255 # linux/limits.h
class Data(ct.Structure):
_fields_ = [
("pid", ct.c_ulonglong),
("ts", ct.c_ulonglong),
("delta", ct.c_ulonglong),
("ret", ct.c_int),
("comm", ct.c_char * TASK_COMM_LEN),
("fname", ct.c_char * NAME_MAX)
]
start_ts = 0
prev_ts = 0
delta = 0
# header
if args.timestamp:
print("%-14s" % ("TIME(s)"), end="")
print("%-6s %-16s %4s %3s %s" % ("PID", "COMM", "FD", "ERR", "PATH"))
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
global start_ts
global prev_ts
global delta
global cont
cont = event.ret
# split return value into FD and errno columns
if event.ret >= 0:
fd_s = event.ret
err = 0
else:
fd_s = -1
err = - event.ret
if start_ts == 0:
prev_ts = start_ts
if start_ts == 1:
delta = float(delta) + (event.ts - prev_ts)
if (args.failed and (event.ret >= 0)):
start_ts = 1
prev_ts = event.ts
return
if args.timestamp:
print("%-14.9f" % (delta / 1000000), end="")
print("%-6d %-16s %4d %3d %s" % (event.pid, event.comm,
fd_s, err, event.fname))
prev_ts = event.ts
start_ts = 1
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
b.kprobe_poll()
| apache-2.0 |
denny820909/builder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/conch/ssh/userauth.py | 16 | 30509 | # -*- test-case-name: twisted.conch.test.test_userauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of the ssh-userauth service.
Currently implemented authentication types are public-key and password.
Maintainer: Paul Swartz
"""
import struct, warnings
from twisted.conch import error, interfaces
from twisted.conch.ssh import keys, transport, service
from twisted.conch.ssh.common import NS, getNS
from twisted.cred import credentials
from twisted.cred.error import UnauthorizedLogin
from twisted.internet import defer, reactor
from twisted.python import failure, log
class SSHUserAuthServer(service.SSHService):
"""
A service implementing the server side of the 'ssh-userauth' service. It
is used to authenticate the user on the other side as being able to access
this server.
@ivar name: the name of this service: 'ssh-userauth'
@type name: C{str}
@ivar authenticatedWith: a list of authentication methods that have
already been used.
@type authenticatedWith: C{list}
@ivar loginTimeout: the number of seconds we wait before disconnecting
the user for taking too long to authenticate
@type loginTimeout: C{int}
@ivar attemptsBeforeDisconnect: the number of failed login attempts we
allow before disconnecting.
@type attemptsBeforeDisconnect: C{int}
@ivar loginAttempts: the number of login attempts that have been made
@type loginAttempts: C{int}
@ivar passwordDelay: the number of seconds to delay when the user gives
an incorrect password
@type passwordDelay: C{int}
@ivar interfaceToMethod: a C{dict} mapping credential interfaces to
authentication methods. The server checks to see which of the
cred interfaces have checkers and tells the client that those methods
are valid for authentication.
@type interfaceToMethod: C{dict}
@ivar supportedAuthentications: A list of the supported authentication
methods.
@type supportedAuthentications: C{list} of C{str}
@ivar user: the last username the client tried to authenticate with
@type user: C{str}
@ivar method: the current authentication method
@type method: C{str}
@ivar nextService: the service the user wants started after authentication
has been completed.
@type nextService: C{str}
@ivar portal: the L{twisted.cred.portal.Portal} we are using for
authentication
@type portal: L{twisted.cred.portal.Portal}
@ivar clock: an object with a callLater method. Stubbed out for testing.
"""
name = 'ssh-userauth'
loginTimeout = 10 * 60 * 60
# 10 minutes before we disconnect them
attemptsBeforeDisconnect = 20
# 20 login attempts before a disconnect
passwordDelay = 1 # number of seconds to delay on a failed password
clock = reactor
interfaceToMethod = {
credentials.ISSHPrivateKey : 'publickey',
credentials.IUsernamePassword : 'password',
credentials.IPluggableAuthenticationModules : 'keyboard-interactive',
}
def serviceStarted(self):
"""
Called when the userauth service is started. Set up instance
variables, check if we should allow password/keyboard-interactive
authentication (only allow if the outgoing connection is encrypted) and
set up a login timeout.
"""
self.authenticatedWith = []
self.loginAttempts = 0
self.user = None
self.nextService = None
self._pamDeferred = None
self.portal = self.transport.factory.portal
self.supportedAuthentications = []
for i in self.portal.listCredentialsInterfaces():
if i in self.interfaceToMethod:
self.supportedAuthentications.append(self.interfaceToMethod[i])
if not self.transport.isEncrypted('in'):
# don't let us transport password in plaintext
if 'password' in self.supportedAuthentications:
self.supportedAuthentications.remove('password')
if 'keyboard-interactive' in self.supportedAuthentications:
self.supportedAuthentications.remove('keyboard-interactive')
self._cancelLoginTimeout = self.clock.callLater(
self.loginTimeout,
self.timeoutAuthentication)
def serviceStopped(self):
"""
Called when the userauth service is stopped. Cancel the login timeout
if it's still going.
"""
if self._cancelLoginTimeout:
self._cancelLoginTimeout.cancel()
self._cancelLoginTimeout = None
def timeoutAuthentication(self):
"""
Called when the user has timed out on authentication. Disconnect
with a DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE message.
"""
self._cancelLoginTimeout = None
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'you took too long')
def tryAuth(self, kind, user, data):
"""
Try to authenticate the user with the given method. Dispatches to a
auth_* method.
@param kind: the authentication method to try.
@type kind: C{str}
@param user: the username the client is authenticating with.
@type user: C{str}
@param data: authentication specific data sent by the client.
@type data: C{str}
@return: A Deferred called back if the method succeeded, or erred back
if it failed.
@rtype: C{defer.Deferred}
"""
log.msg('%s trying auth %s' % (user, kind))
if kind not in self.supportedAuthentications:
return defer.fail(
error.ConchError('unsupported authentication, failing'))
kind = kind.replace('-', '_')
f = getattr(self,'auth_%s'%kind, None)
if f:
ret = f(data)
if not ret:
return defer.fail(
error.ConchError('%s return None instead of a Deferred'
% kind))
else:
return ret
return defer.fail(error.ConchError('bad auth type: %s' % kind))
def ssh_USERAUTH_REQUEST(self, packet):
"""
The client has requested authentication. Payload::
string user
string next service
string method
<authentication specific data>
@type packet: C{str}
"""
user, nextService, method, rest = getNS(packet, 3)
if user != self.user or nextService != self.nextService:
self.authenticatedWith = [] # clear auth state
self.user = user
self.nextService = nextService
self.method = method
d = self.tryAuth(method, user, rest)
if not d:
self._ebBadAuth(
failure.Failure(error.ConchError('auth returned none')))
return
d.addCallback(self._cbFinishedAuth)
d.addErrback(self._ebMaybeBadAuth)
d.addErrback(self._ebBadAuth)
return d
def _cbFinishedAuth(self, (interface, avatar, logout)):
"""
The callback when user has successfully been authenticated. For a
description of the arguments, see L{twisted.cred.portal.Portal.login}.
We start the service requested by the user.
"""
self.transport.avatar = avatar
self.transport.logoutFunction = logout
service = self.transport.factory.getService(self.transport,
self.nextService)
if not service:
raise error.ConchError('could not get next service: %s'
% self.nextService)
log.msg('%s authenticated with %s' % (self.user, self.method))
self.transport.sendPacket(MSG_USERAUTH_SUCCESS, '')
self.transport.setService(service())
def _ebMaybeBadAuth(self, reason):
"""
An intermediate errback. If the reason is
error.NotEnoughAuthentication, we send a MSG_USERAUTH_FAILURE, but
with the partial success indicator set.
@type reason: L{twisted.python.failure.Failure}
"""
reason.trap(error.NotEnoughAuthentication)
self.transport.sendPacket(MSG_USERAUTH_FAILURE,
NS(','.join(self.supportedAuthentications)) + '\xff')
def _ebBadAuth(self, reason):
"""
The final errback in the authentication chain. If the reason is
error.IgnoreAuthentication, we simply return; the authentication
method has sent its own response. Otherwise, send a failure message
and (if the method is not 'none') increment the number of login
attempts.
@type reason: L{twisted.python.failure.Failure}
"""
if reason.check(error.IgnoreAuthentication):
return
if self.method != 'none':
log.msg('%s failed auth %s' % (self.user, self.method))
if reason.check(UnauthorizedLogin):
log.msg('unauthorized login: %s' % reason.getErrorMessage())
elif reason.check(error.ConchError):
log.msg('reason: %s' % reason.getErrorMessage())
else:
log.msg(reason.getTraceback())
self.loginAttempts += 1
if self.loginAttempts > self.attemptsBeforeDisconnect:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'too many bad auths')
return
self.transport.sendPacket(
MSG_USERAUTH_FAILURE,
NS(','.join(self.supportedAuthentications)) + '\x00')
def auth_publickey(self, packet):
"""
Public key authentication. Payload::
byte has signature
string algorithm name
string key blob
[string signature] (if has signature is True)
Create a SSHPublicKey credential and verify it using our portal.
"""
hasSig = ord(packet[0])
algName, blob, rest = getNS(packet[1:], 2)
pubKey = keys.Key.fromString(blob)
signature = hasSig and getNS(rest)[0] or None
if hasSig:
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.nextService) + NS('publickey') +
chr(hasSig) + NS(pubKey.sshType()) + NS(blob))
c = credentials.SSHPrivateKey(self.user, algName, blob, b,
signature)
return self.portal.login(c, None, interfaces.IConchUser)
else:
c = credentials.SSHPrivateKey(self.user, algName, blob, None, None)
return self.portal.login(c, None,
interfaces.IConchUser).addErrback(self._ebCheckKey,
packet[1:])
def _ebCheckKey(self, reason, packet):
"""
Called back if the user did not sent a signature. If reason is
error.ValidPublicKey then this key is valid for the user to
authenticate with. Send MSG_USERAUTH_PK_OK.
"""
reason.trap(error.ValidPublicKey)
# if we make it here, it means that the publickey is valid
self.transport.sendPacket(MSG_USERAUTH_PK_OK, packet)
return failure.Failure(error.IgnoreAuthentication())
def auth_password(self, packet):
"""
Password authentication. Payload::
string password
Make a UsernamePassword credential and verify it with our portal.
"""
password = getNS(packet[1:])[0]
c = credentials.UsernamePassword(self.user, password)
return self.portal.login(c, None, interfaces.IConchUser).addErrback(
self._ebPassword)
def _ebPassword(self, f):
"""
If the password is invalid, wait before sending the failure in order
to delay brute-force password guessing.
"""
d = defer.Deferred()
self.clock.callLater(self.passwordDelay, d.callback, f)
return d
def auth_keyboard_interactive(self, packet):
"""
Keyboard interactive authentication. No payload. We create a
PluggableAuthenticationModules credential and authenticate with our
portal.
"""
if self._pamDeferred is not None:
self.transport.sendDisconnect(
transport.DISCONNECT_PROTOCOL_ERROR,
"only one keyboard interactive attempt at a time")
return defer.fail(error.IgnoreAuthentication())
c = credentials.PluggableAuthenticationModules(self.user,
self._pamConv)
return self.portal.login(c, None, interfaces.IConchUser)
def _pamConv(self, items):
"""
Convert a list of PAM authentication questions into a
MSG_USERAUTH_INFO_REQUEST. Returns a Deferred that will be called
back when the user has responses to the questions.
@param items: a list of 2-tuples (message, kind). We only care about
kinds 1 (password) and 2 (text).
@type items: C{list}
@rtype: L{defer.Deferred}
"""
resp = []
for message, kind in items:
if kind == 1: # password
resp.append((message, 0))
elif kind == 2: # text
resp.append((message, 1))
elif kind in (3, 4):
return defer.fail(error.ConchError(
'cannot handle PAM 3 or 4 messages'))
else:
return defer.fail(error.ConchError(
'bad PAM auth kind %i' % kind))
packet = NS('') + NS('') + NS('')
packet += struct.pack('>L', len(resp))
for prompt, echo in resp:
packet += NS(prompt)
packet += chr(echo)
self.transport.sendPacket(MSG_USERAUTH_INFO_REQUEST, packet)
self._pamDeferred = defer.Deferred()
return self._pamDeferred
def ssh_USERAUTH_INFO_RESPONSE(self, packet):
"""
The user has responded with answers to PAMs authentication questions.
Parse the packet into a PAM response and callback self._pamDeferred.
Payload::
uint32 numer of responses
string response 1
...
string response n
"""
d, self._pamDeferred = self._pamDeferred, None
try:
resp = []
numResps = struct.unpack('>L', packet[:4])[0]
packet = packet[4:]
while len(resp) < numResps:
response, packet = getNS(packet)
resp.append((response, 0))
if packet:
raise error.ConchError("%i bytes of extra data" % len(packet))
except:
d.errback(failure.Failure())
else:
d.callback(resp)
class SSHUserAuthClient(service.SSHService):
"""
A service implementing the client side of 'ssh-userauth'.
@ivar name: the name of this service: 'ssh-userauth'
@type name: C{str}
@ivar preferredOrder: a list of authentication methods we support, in
order of preference. The client will try authentication methods in
this order, making callbacks for information when necessary.
@type preferredOrder: C{list}
@ivar user: the name of the user to authenticate as
@type user: C{str}
@ivar instance: the service to start after authentication has finished
@type instance: L{service.SSHService}
@ivar authenticatedWith: a list of strings of authentication methods we've tried
@type authenticatedWith: C{list} of C{str}
@ivar triedPublicKeys: a list of public key objects that we've tried to
authenticate with
@type triedPublicKeys: C{list} of L{Key}
@ivar lastPublicKey: the last public key object we've tried to authenticate
with
@type lastPublicKey: L{Key}
"""
name = 'ssh-userauth'
preferredOrder = ['publickey', 'password', 'keyboard-interactive']
def __init__(self, user, instance):
self.user = user
self.instance = instance
def serviceStarted(self):
self.authenticatedWith = []
self.triedPublicKeys = []
self.lastPublicKey = None
self.askForAuth('none', '')
def askForAuth(self, kind, extraData):
"""
Send a MSG_USERAUTH_REQUEST.
@param kind: the authentication method to try.
@type kind: C{str}
@param extraData: method-specific data to go in the packet
@type extraData: C{str}
"""
self.lastAuth = kind
self.transport.sendPacket(MSG_USERAUTH_REQUEST, NS(self.user) +
NS(self.instance.name) + NS(kind) + extraData)
def tryAuth(self, kind):
"""
Dispatch to an authentication method.
@param kind: the authentication method
@type kind: C{str}
"""
kind = kind.replace('-', '_')
log.msg('trying to auth with %s' % (kind,))
f = getattr(self,'auth_%s' % (kind,), None)
if f:
return f()
def _ebAuth(self, ignored, *args):
"""
Generic callback for a failed authentication attempt. Respond by
asking for the list of accepted methods (the 'none' method)
"""
self.askForAuth('none', '')
def ssh_USERAUTH_SUCCESS(self, packet):
"""
We received a MSG_USERAUTH_SUCCESS. The server has accepted our
authentication, so start the next service.
"""
self.transport.setService(self.instance)
def ssh_USERAUTH_FAILURE(self, packet):
"""
We received a MSG_USERAUTH_FAILURE. Payload::
string methods
byte partial success
If partial success is C{True}, then the previous method succeeded but is
not sufficent for authentication. C{methods} is a comma-separated list
of accepted authentication methods.
We sort the list of methods by their position in C{self.preferredOrder},
removing methods that have already succeeded. We then call
C{self.tryAuth} with the most preferred method.
@param packet: the L{MSG_USERAUTH_FAILURE} payload.
@type packet: C{str}
@return: a L{defer.Deferred} that will be callbacked with C{None} as
soon as all authentication methods have been tried, or C{None} if no
more authentication methods are available.
@rtype: C{defer.Deferred} or C{None}
"""
canContinue, partial = getNS(packet)
partial = ord(partial)
if partial:
self.authenticatedWith.append(self.lastAuth)
def orderByPreference(meth):
"""
Invoked once per authentication method in order to extract a
comparison key which is then used for sorting.
@param meth: the authentication method.
@type meth: C{str}
@return: the comparison key for C{meth}.
@rtype: C{int}
"""
if meth in self.preferredOrder:
return self.preferredOrder.index(meth)
else:
# put the element at the end of the list.
return len(self.preferredOrder)
canContinue = sorted([meth for meth in canContinue.split(',')
if meth not in self.authenticatedWith],
key=orderByPreference)
log.msg('can continue with: %s' % canContinue)
return self._cbUserauthFailure(None, iter(canContinue))
def _cbUserauthFailure(self, result, iterator):
if result:
return
try:
method = iterator.next()
except StopIteration:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'no more authentication methods available')
else:
d = defer.maybeDeferred(self.tryAuth, method)
d.addCallback(self._cbUserauthFailure, iterator)
return d
def ssh_USERAUTH_PK_OK(self, packet):
"""
This message (number 60) can mean several different messages depending
on the current authentication type. We dispatch to individual methods
in order to handle this request.
"""
func = getattr(self, 'ssh_USERAUTH_PK_OK_%s' %
self.lastAuth.replace('-', '_'), None)
if func is not None:
return func(packet)
else:
self.askForAuth('none', '')
def ssh_USERAUTH_PK_OK_publickey(self, packet):
"""
This is MSG_USERAUTH_PK. Our public key is valid, so we create a
signature and try to authenticate with it.
"""
publicKey = self.lastPublicKey
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.instance.name) + NS('publickey') +
'\x01' + NS(publicKey.sshType()) + NS(publicKey.blob()))
d = self.signData(publicKey, b)
if not d:
self.askForAuth('none', '')
# this will fail, we'll move on
return
d.addCallback(self._cbSignedData)
d.addErrback(self._ebAuth)
def ssh_USERAUTH_PK_OK_password(self, packet):
"""
This is MSG_USERAUTH_PASSWD_CHANGEREQ. The password given has expired.
We ask for an old password and a new password, then send both back to
the server.
"""
prompt, language, rest = getNS(packet, 2)
self._oldPass = self._newPass = None
d = self.getPassword('Old Password: ')
d = d.addCallbacks(self._setOldPass, self._ebAuth)
d.addCallback(lambda ignored: self.getPassword(prompt))
d.addCallbacks(self._setNewPass, self._ebAuth)
def ssh_USERAUTH_PK_OK_keyboard_interactive(self, packet):
"""
This is MSG_USERAUTH_INFO_RESPONSE. The server has sent us the
questions it wants us to answer, so we ask the user and sent the
responses.
"""
name, instruction, lang, data = getNS(packet, 3)
numPrompts = struct.unpack('!L', data[:4])[0]
data = data[4:]
prompts = []
for i in range(numPrompts):
prompt, data = getNS(data)
echo = bool(ord(data[0]))
data = data[1:]
prompts.append((prompt, echo))
d = self.getGenericAnswers(name, instruction, prompts)
d.addCallback(self._cbGenericAnswers)
d.addErrback(self._ebAuth)
def _cbSignedData(self, signedData):
"""
Called back out of self.signData with the signed data. Send the
authentication request with the signature.
@param signedData: the data signed by the user's private key.
@type signedData: C{str}
"""
publicKey = self.lastPublicKey
self.askForAuth('publickey', '\x01' + NS(publicKey.sshType()) +
NS(publicKey.blob()) + NS(signedData))
def _setOldPass(self, op):
"""
Called back when we are choosing a new password. Simply store the old
password for now.
@param op: the old password as entered by the user
@type op: C{str}
"""
self._oldPass = op
def _setNewPass(self, np):
"""
Called back when we are choosing a new password. Get the old password
and send the authentication message with both.
@param np: the new password as entered by the user
@type np: C{str}
"""
op = self._oldPass
self._oldPass = None
self.askForAuth('password', '\xff' + NS(op) + NS(np))
def _cbGenericAnswers(self, responses):
"""
Called back when we are finished answering keyboard-interactive
questions. Send the info back to the server in a
MSG_USERAUTH_INFO_RESPONSE.
@param responses: a list of C{str} responses
@type responses: C{list}
"""
data = struct.pack('!L', len(responses))
for r in responses:
data += NS(r.encode('UTF8'))
self.transport.sendPacket(MSG_USERAUTH_INFO_RESPONSE, data)
def auth_publickey(self):
"""
Try to authenticate with a public key. Ask the user for a public key;
if the user has one, send the request to the server and return True.
Otherwise, return False.
@rtype: C{bool}
"""
d = defer.maybeDeferred(self.getPublicKey)
d.addBoth(self._cbGetPublicKey)
return d
def _cbGetPublicKey(self, publicKey):
if isinstance(publicKey, str):
warnings.warn("Returning a string from "
"SSHUserAuthClient.getPublicKey() is deprecated "
"since Twisted 9.0. Return a keys.Key() instead.",
DeprecationWarning)
publicKey = keys.Key.fromString(publicKey)
if not isinstance(publicKey, keys.Key): # failure or None
publicKey = None
if publicKey is not None:
self.lastPublicKey = publicKey
self.triedPublicKeys.append(publicKey)
log.msg('using key of type %s' % publicKey.type())
self.askForAuth('publickey', '\x00' + NS(publicKey.sshType()) +
NS(publicKey.blob()))
return True
else:
return False
def auth_password(self):
"""
Try to authenticate with a password. Ask the user for a password.
If the user will return a password, return True. Otherwise, return
False.
@rtype: C{bool}
"""
d = self.getPassword()
if d:
d.addCallbacks(self._cbPassword, self._ebAuth)
return True
else: # returned None, don't do password auth
return False
def auth_keyboard_interactive(self):
"""
Try to authenticate with keyboard-interactive authentication. Send
the request to the server and return True.
@rtype: C{bool}
"""
log.msg('authing with keyboard-interactive')
self.askForAuth('keyboard-interactive', NS('') + NS(''))
return True
def _cbPassword(self, password):
"""
Called back when the user gives a password. Send the request to the
server.
@param password: the password the user entered
@type password: C{str}
"""
self.askForAuth('password', '\x00' + NS(password))
def signData(self, publicKey, signData):
"""
Sign the given data with the given public key.
By default, this will call getPrivateKey to get the private key,
then sign the data using Key.sign().
This method is factored out so that it can be overridden to use
alternate methods, such as a key agent.
@param publicKey: The public key object returned from L{getPublicKey}
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: C{str}
@return: a Deferred that's called back with the signature
@rtype: L{defer.Deferred}
"""
key = self.getPrivateKey()
if not key:
return
return key.addCallback(self._cbSignData, signData)
def _cbSignData(self, privateKey, signData):
"""
Called back when the private key is returned. Sign the data and
return the signature.
@param privateKey: the private key object
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: C{str}
@return: the signature
@rtype: C{str}
"""
if not isinstance(privateKey, keys.Key):
warnings.warn("Returning a PyCrypto key object from "
"SSHUserAuthClient.getPrivateKey() is deprecated "
"since Twisted 9.0. Return a keys.Key() instead.",
DeprecationWarning)
privateKey = keys.Key(privateKey)
return privateKey.sign(signData)
def getPublicKey(self):
"""
Return a public key for the user. If no more public keys are
available, return C{None}.
This implementation always returns C{None}. Override it in a
subclass to actually find and return a public key object.
@rtype: L{Key} or L{NoneType}
"""
return None
def getPrivateKey(self):
"""
Return a L{Deferred} that will be called back with the private key
object corresponding to the last public key from getPublicKey().
If the private key is not available, errback on the Deferred.
@rtype: L{Deferred} called back with L{Key}
"""
return defer.fail(NotImplementedError())
def getPassword(self, prompt = None):
"""
Return a L{Deferred} that will be called back with a password.
prompt is a string to display for the password, or None for a generic
'user@hostname's password: '.
@type prompt: C{str}/C{None}
@rtype: L{defer.Deferred}
"""
return defer.fail(NotImplementedError())
def getGenericAnswers(self, name, instruction, prompts):
"""
Returns a L{Deferred} with the responses to the promopts.
@param name: The name of the authentication currently in progress.
@param instruction: Describes what the authentication wants.
@param prompts: A list of (prompt, echo) pairs, where prompt is a
string to display and echo is a boolean indicating whether the
user's response should be echoed as they type it.
"""
return defer.fail(NotImplementedError())
MSG_USERAUTH_REQUEST = 50
MSG_USERAUTH_FAILURE = 51
MSG_USERAUTH_SUCCESS = 52
MSG_USERAUTH_BANNER = 53
MSG_USERAUTH_INFO_RESPONSE = 61
MSG_USERAUTH_PK_OK = 60
messages = {}
for k, v in locals().items():
if k[:4]=='MSG_':
messages[v] = k
SSHUserAuthServer.protocolMessages = messages
SSHUserAuthClient.protocolMessages = messages
del messages
del v
# Doubles, not included in the protocols' mappings
MSG_USERAUTH_PASSWD_CHANGEREQ = 60
MSG_USERAUTH_INFO_REQUEST = 60
| mit |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_utils/library/oo_ec2_group.py | 26 | 37438 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: skip-file
# flake8: noqa
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
requirements: [ boto3 ]
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
- One of and only one of I(name) or I(group_id) is required.
- Required if I(state=present).
required: false
group_id:
description:
- Id of group to delete (works only with absent).
- One of and only one of I(name) or I(group_id) is required.
required: false
version_added: "2.4"
description:
description:
- Description of the security group. Required when C(state) is C(present).
required: false
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied,
no inbound rules will be enabled. Rules list may include its own name in `group_name`.
This allows idempotent loopback additions (e.g. allow group to access itself).
Rule sources list support was added in version 2.4. This allows to define multiple sources per
source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied,
a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
Rule Egress sources list support was added in version 2.4.
required: false
version_added: "1.6"
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
tags:
version_added: "2.4"
description:
- A dictionary of one or more tags to assign to the security group.
required: false
purge_tags:
version_added: "2.4"
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
tags will not be modified.
required: false
default: yes
choices: [ 'yes', 'no' ]
extends_documentation_fragment:
- aws
- ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
cidr_ipv6: 64:ff9b::/96
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
- name: example2 ec2 group
ec2_group:
name: example2
description: an example2 EC2 group
vpc_id: 12345
region: eu-west-1
rules:
# 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- proto: tcp
ports: 22
group_name: example-vpn
- proto: tcp
ports:
- 80
- 443
- 8080-8099
cidr_ip: 0.0.0.0/0
# Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- proto: tcp
ports:
- 6379
- 26379
group_name:
- example-vpn
- example-redis
- proto: tcp
ports: 5665
group_name: example-vpn
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
cidr_ipv6:
- 2607:F8B0::/32
- 64:ff9b::/96
group_id:
- sg-edcd9784
- name: "Delete group by its id"
ec2_group:
group_id: sg-33b4ee5b
state: absent
'''
RETURN = '''
group_name:
description: Security group name
sample: My Security Group
type: string
returned: on create/update
group_id:
description: Security group id
sample: sg-abcd1234
type: string
returned: on create/update
description:
description: Description of security group
sample: My Security Group
type: string
returned: on create/update
tags:
description: Tags associated with the security group
sample:
Name: My Security Group
Purpose: protecting stuff
type: dict
returned: on create/update
vpc_id:
description: ID of VPC to which the security group belongs
sample: vpc-abcd1234
type: string
returned: on create/update
ip_permissions:
description: Inbound rules associated with the security group.
sample:
- from_port: 8182
ip_protocol: tcp
ip_ranges:
- cidr_ip: "1.1.1.1/32"
ipv6_ranges: []
prefix_list_ids: []
to_port: 8182
user_id_group_pairs: []
type: list
returned: on create/update
ip_permissions_egress:
description: Outbound rules associated with the security group.
sample:
- ip_protocol: -1
ip_ranges:
- cidr_ip: "0.0.0.0/0"
ipv6_ranges: []
prefix_list_ids: []
user_id_group_pairs: []
type: list
returned: on create/update
owner_id:
description: AWS Account ID of the security group
sample: 123456789012
type: int
returned: on create/update
'''
import json
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.ec2 import HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
from ansible.module_utils.ec2 import AWSRetry
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_security_groups_with_backoff(connection, **kwargs):
return connection.describe_security_groups(**kwargs)
def deduplicate_rules_args(rules):
"""Returns unique rules"""
if rules is None:
return None
return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
def make_rule_key(prefix, rule, group_id, cidr_ip):
if 'proto' in rule:
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
elif 'IpProtocol' in rule:
proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')]
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def add_rules_to_lookup(ipPermissions, group_id, prefix, dict):
for rule in ipPermissions:
for groupGrant in rule.get('UserIdGroupPairs', []):
dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant)
for ipv4Grants in rule.get('IpRanges', []):
dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants)
for ipv6Grants in rule.get('Ipv6Ranges', []):
dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip', 'cidr_ipv6',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
if not isinstance(rule, dict):
module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
elif 'group_name' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None
ipv6 = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
elif 'group_name' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = dict(GroupId=group_id, GroupName=group_name)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group['GroupId']
groups[group_id] = group
groups[group_name] = group
elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
# both are VPC groups, this is ok
group_id = groups[group_name]['GroupId']
elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
# both are EC2 classic, this is ok
group_id = groups[group_name]['GroupId']
else:
# if we got here, either the target group does not exist, or there
# is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
# is bad, so we have to create a new SG because no compatible group
# exists
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and "
"no description was provided" % (group_name, rule))
if not module.check_mode:
params = dict(GroupName=group_name, Description=rule['group_desc'])
if vpc_id:
params['VpcId'] = vpc_id
auto_group = client.create_security_group(**params)
group_id = auto_group['GroupId']
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
elif 'cidr_ipv6' in rule:
ipv6 = rule['cidr_ipv6']
return group_id, ip, ipv6, target_group_created
def ports_expand(ports):
# takes a list of ports and returns a list of (port_from, port_to)
ports_expanded = []
for port in ports:
if not isinstance(port, str):
ports_expanded.append((port,) * 2)
elif '-' in port:
ports_expanded.append(tuple(p.strip() for p in port.split('-', 1)))
else:
ports_expanded.append((port.strip(),) * 2)
return ports_expanded
def rule_expand_ports(rule):
# takes a rule dict and returns a list of expanded rule dicts
if 'ports' not in rule:
return [rule]
ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
rule_expanded = []
for from_to in ports_expand(ports):
temp_rule = rule.copy()
del temp_rule['ports']
temp_rule['from_port'], temp_rule['to_port'] = from_to
rule_expanded.append(temp_rule)
return rule_expanded
def rules_expand_ports(rules):
# takes a list of rules and expands it based on 'ports'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_ports(rule_complex)]
def rule_expand_source(rule, source_type):
# takes a rule dict and returns a list of expanded rule dicts for specified source_type
sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name')
rule_expanded = []
for source in sources:
temp_rule = rule.copy()
for s in source_types_all:
temp_rule.pop(s, None)
temp_rule[source_type] = source
rule_expanded.append(temp_rule)
return rule_expanded
def rule_expand_sources(rule):
# takes a rule dict and returns a list of expanded rule discts
source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule)
return [r for stype in source_types
for r in rule_expand_source(rule, stype)]
def rules_expand_sources(rules):
# takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_sources(rule_complex)]
def authorize_ip(type, changed, client, group, groupRules,
ip, ip_permission, module, rule, ethertype):
# If rule already exists, don't later delete it
for thisip in ip:
rule_id = make_rule_key(type, rule, group['GroupId'], thisip)
if rule_id in groupRules:
del groupRules[rule_id]
else:
if not module.check_mode:
ip_permission = serialize_ip_grant(rule, thisip, ethertype)
if ip_permission:
try:
if type == "in":
client.authorize_security_group_ingress(GroupId=group['GroupId'],
IpPermissions=[ip_permission])
elif type == "out":
client.authorize_security_group_egress(GroupId=group['GroupId'],
IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" %
(type, thisip, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
return changed, ip_permission
def serialize_group_grant(group_id, rule):
permission = {'IpProtocol': rule['proto'],
'FromPort': rule['from_port'],
'ToPort': rule['to_port'],
'UserIdGroupPairs': [{'GroupId': group_id}]}
return fix_port_and_protocol(permission)
def serialize_revoke(grant, rule):
permission = dict()
fromPort = rule['FromPort'] if 'FromPort' in rule else None
toPort = rule['ToPort'] if 'ToPort' in rule else None
if 'GroupId' in grant:
permission = {'IpProtocol': rule['IpProtocol'],
'FromPort': fromPort,
'ToPort': toPort,
'UserIdGroupPairs': [{'GroupId': grant['GroupId']}]
}
elif 'CidrIp' in grant:
permission = {'IpProtocol': rule['IpProtocol'],
'FromPort': fromPort,
'ToPort': toPort,
'IpRanges': [grant]
}
elif 'CidrIpv6' in grant:
permission = {'IpProtocol': rule['IpProtocol'],
'FromPort': fromPort,
'ToPort': toPort,
'Ipv6Ranges': [grant]
}
return fix_port_and_protocol(permission)
def serialize_ip_grant(rule, thisip, ethertype):
permission = {'IpProtocol': rule['proto'],
'FromPort': rule['from_port'],
'ToPort': rule['to_port']}
if ethertype == "ipv4":
permission['IpRanges'] = [{'CidrIp': thisip}]
elif ethertype == "ipv6":
permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}]
return fix_port_and_protocol(permission)
def fix_port_and_protocol(permission):
for key in ['FromPort', 'ToPort']:
if key in permission:
if permission[key] is None:
del permission[key]
else:
permission[key] = int(permission[key])
permission['IpProtocol'] = str(permission['IpProtocol'])
return permission
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(),
group_id=dict(),
description=dict(),
vpc_id=dict(),
rules=dict(type='list'),
rules_egress=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
tags=dict(required=False, type='dict', aliases=['resource_tags']),
purge_tags=dict(default=True, required=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'group_id']],
required_if=[['state', 'present', ['name']]],
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
name = module.params['name']
group_id = module.params['group_id']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
tags = module.params['tags']
purge_tags = module.params['purge_tags']
if state == 'present' and not description:
module.fail_json(msg='Must provide description when state is present.')
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="The AWS region must be specified as an "
"environment variable or in the AWS credentials "
"profile.")
client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
group = None
groups = dict()
security_groups = []
# do get all security groups
# find if the group is present
try:
response = get_security_groups_with_backoff(client)
security_groups = response.get('SecurityGroups', [])
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc())
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for sg in security_groups:
groups[sg['GroupId']] = sg
groupName = sg['GroupName']
if groupName in groups:
# Prioritise groups from the current VPC
# even if current VPC is EC2-Classic
if groups[groupName].get('VpcId') == vpc_id:
# Group saved already matches current VPC, change nothing
pass
elif vpc_id is None and groups[groupName].get('VpcId') is None:
# We're in EC2 classic, and the group already saved is as well
# No VPC groups can be used alongside EC2 classic groups
pass
else:
# the current SG stored has no direct match, so we can replace it
groups[groupName] = sg
else:
groups[groupName] = sg
if group_id and sg['GroupId'] == group_id:
group = sg
elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id):
group = sg
# Ensure requested group is absent
if state == 'absent':
if group:
# found a match, delete it
try:
if not module.check_mode:
client.delete_security_group(GroupId=group['GroupId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
group = None
changed = True
else:
# no match found, no changes required
pass
# Ensure requested group is present
elif state == 'present':
if group:
# existing group
if group['Description'] != description:
module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
"and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
# if the group doesn't exist, create it now
else:
# no match found, create it
if not module.check_mode:
params = dict(GroupName=name, Description=description)
if vpc_id:
params['VpcId'] = vpc_id
group = client.create_security_group(**params)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while True:
group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
if group.get('VpcId') and not group.get('IpPermissionsEgress'):
pass
else:
break
changed = True
if tags is not None:
current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
if tags_to_delete:
try:
client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
# Add/update tags
if tags_need_modify:
try:
client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
ip_permission = []
if group:
# Manage ingress rules
groupRules = {}
add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules is not None:
for rule in rules:
validate_rule(module, rule)
group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
if group_id:
rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
if rule_id in groupRules:
del groupRules[rule_id]
else:
if not module.check_mode:
ip_permission = serialize_group_grant(group_id, rule)
if ip_permission:
ips = ip_permission
if vpc_id:
[useridpair.update({'VpcId': vpc_id}) for useridpair in
ip_permission.get('UserIdGroupPairs', [])]
try:
client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
except botocore.exceptions.ClientError as e:
module.fail_json(
msg="Unable to authorize ingress for group %s security group '%s' - %s" %
(group_id, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
elif ip:
# Convert ip to list we can iterate over
if ip and not isinstance(ip, list):
ip = [ip]
changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission,
module, rule, "ipv4")
elif ipv6:
# Convert ip to list we can iterate over
if not isinstance(ipv6, list):
ipv6 = [ipv6]
# If rule already exists, don't later delete it
changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission,
module, rule, "ipv6")
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for (rule, grant) in groupRules.values():
ip_permission = serialize_revoke(grant, rule)
if not module.check_mode:
try:
client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(
msg="Unable to revoke ingress for security group '%s' - %s" %
(group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
# Manage egress rules
groupRules = {}
add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress is not None:
for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name,
group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
if group_id:
rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
if rule_id in groupRules:
del groupRules[rule_id]
else:
if not module.check_mode:
ip_permission = serialize_group_grant(group_id, rule)
if ip_permission:
ips = ip_permission
if vpc_id:
[useridpair.update({'VpcId': vpc_id}) for useridpair in
ip_permission.get('UserIdGroupPairs', [])]
try:
client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
except botocore.exceptions.ClientError as e:
module.fail_json(
msg="Unable to authorize egress for group %s security group '%s' - %s" %
(group_id, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
elif ip:
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip,
ip_permission, module, rule, "ipv4")
elif ipv6:
# Convert ip to list we can iterate over
if not isinstance(ipv6, list):
ipv6 = [ipv6]
# If rule already exists, don't later delete it
changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6,
ip_permission, module, rule, "ipv6")
elif vpc_id is not None:
# when no egress rules are specified and we're in a VPC,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0'
if default_egress_rule not in groupRules:
if not module.check_mode:
ip_permission = [{'IpProtocol': '-1',
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
}
]
try:
client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" %
('0.0.0.0/0',
group['GroupName'],
e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress and vpc_id is not None:
for (rule, grant) in groupRules.values():
# we shouldn't be revoking 0.0.0.0 egress
if grant != '0.0.0.0/0':
ip_permission = serialize_revoke(grant, rule)
if not module.check_mode:
try:
client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" %
(grant, group['GroupName'], e),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
if group:
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
security_group = camel_dict_to_snake_dict(security_group)
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []),
tag_name_key_name='key', tag_value_key_name='value')
module.exit_json(changed=changed, **security_group)
else:
module.exit_json(changed=changed, group_id=None)
if __name__ == '__main__':
main()
| apache-2.0 |
gooofy/zamia-ai | tests/bench_aiprolog.py | 3 | 1592 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Guenter Bartsch, Heiko Schaefer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# benchmark the macro engine
#
import logging
import codecs
import cProfile, pstats
from nltools import misc
from sqlalchemy.orm import sessionmaker
import model
from zamiaprolog.logicdb import LogicDB
from aiprolog.runtime import AIPrologRuntime
from aiprolog.parser import AIPrologParser
from aiprolog.nlp_macro_engine import NLPMacroEngine
def _bench_fn():
Session = sessionmaker(bind=model.engine)
session = Session()
me = NLPMacroEngine(session)
discourses = me.macro_expand("de", u"(a|b|c|d|e|f) (a|b|c|d|e|f) (a|b|c|d|e|f) (a|b|c|d|e|f) (a|b|c|d|e|f) (a|b|c|d|e|f)", u"foo @MACRO_0:TSTART_W_0 @MACRO_1:TEND_W_0?", None)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
cProfile.run('_bench_fn()', 'mestats')
p = pstats.Stats('mestats')
# p.strip_dirs().sort_stats(-1).print_stats()
p.sort_stats('cumulative').print_stats(10)
| apache-2.0 |
mykytamorachov/outpost | flask/lib/python2.7/site-packages/sqlalchemy/sql/expression.py | 17 | 193682 | # sql/expression.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the base components of SQL expression trees.
All components are derived from a common base class
:class:`.ClauseElement`. Common behaviors are organized
based on class hierarchies, in some cases via mixins.
All object construction from this package occurs via functions which
in some cases will construct composite :class:`.ClauseElement` structures
together, and in other cases simply return a single :class:`.ClauseElement`
constructed directly. The function interface affords a more "DSL-ish"
feel to constructing SQL expressions and also allows future class
reorganizations.
Even though classes are not constructed directly from the outside,
most classes which have additional public methods are considered to be
public (i.e. have no leading underscore). Other classes which are
"semi-public" are marked with a single leading underscore; these
classes usually have few or no public methods and are less guaranteed
to stay the same in future releases.
"""
import itertools, re
from operator import attrgetter
from sqlalchemy import util, exc
from sqlalchemy.sql import operators
from sqlalchemy.sql.operators import Operators, ColumnOperators
from sqlalchemy.sql.visitors import Visitable, cloned_traverse
import operator
functions = util.importlater("sqlalchemy.sql", "functions")
sqlutil = util.importlater("sqlalchemy.sql", "util")
sqltypes = util.importlater("sqlalchemy", "types")
default = util.importlater("sqlalchemy.engine", "default")
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery', 'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT')
def nullsfirst(column):
"""Return a NULLS FIRST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullsfirst())
produces::
ORDER BY mycol DESC NULLS FIRST
"""
return _UnaryExpression(column, modifier=operators.nullsfirst_op)
def nullslast(column):
"""Return a NULLS LAST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullslast())
produces::
ORDER BY mycol DESC NULLS LAST
"""
return _UnaryExpression(column, modifier=operators.nullslast_op)
def desc(column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol))
produces::
ORDER BY mycol DESC
"""
return _UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(asc(table1.mycol))
produces::
ORDER BY mycol ASC
"""
return _UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.outerjoin()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
"""Return a ``JOIN`` clause element (regular inner join).
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.join()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
"""Returns a ``SELECT`` clause element.
Similar functionality is also available via the :func:`select()`
method on any :class:`.FromClause`.
The returned object is an instance of :class:`.Select`.
All arguments which accept :class:`.ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
See also:
:ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`.
:param columns:
A list of :class:`.ClauseElement` objects, typically
:class:`.ColumnElement` objects or subclasses, which will form the
columns clause of the resulting statement. For all members which are
instances of :class:`.Selectable`, the individual :class:`.ColumnElement`
members of the :class:`.Selectable` will be added individually to the
columns clause. For example, specifying a
:class:`~sqlalchemy.schema.Table` instance will result in all the
contained :class:`~sqlalchemy.schema.Column` objects within to be added
to the columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
:param whereclause:
A :class:`.ClauseElement` expression which will be used to form the
``WHERE`` clause.
:param from_obj:
A list of :class:`.ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from" objects are
automatically located within the columns and whereclause ClauseElements.
Use this parameter to explicitly specify "from" objects which are not
automatically locatable. This could include
:class:`~sqlalchemy.schema.Table` objects that aren't otherwise present,
or :class:`.Join` objects whose presence will supercede that of the
:class:`~sqlalchemy.schema.Table` objects already located in the other
clauses.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind=None:
an :class:`~.base.Engine` or :class:`~.base.Connection` instance
to which the
resulting :class:`.Select` object will be bound. The :class:`.Select`
object will otherwise automatically bind to whatever
:class:`~.base.Connectable` instances can be located within its contained
:class:`.ClauseElement` members.
:param correlate=True:
indicates that this :class:`.Select` object should have its
contained :class:`.FromClause` elements "correlated" to an enclosing
:class:`.Select` object. This means that any :class:`.ClauseElement`
instance within the "froms" collection of this :class:`.Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
The boolean argument may also be a column expression or list
of column expressions - this is a special calling form which
is understood by the Postgresql dialect to render the
``DISTINCT ON (<columns>)`` syntax.
``distinct`` is also available via the :meth:`~.Select.distinct`
generative method.
.. note::
The ``distinct`` keyword's acceptance of a string
argument for usage with MySQL is deprecated. Use
the ``prefixes`` argument or :meth:`~.Select.prefix_with`.
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement.
Certain database dialects also support
alternate values for this parameter:
* With the MySQL dialect, the value ``"read"`` translates to
``LOCK IN SHARE MODE``.
* With the Oracle and Postgresql dialects, the value ``"nowait"``
translates to ``FOR UPDATE NOWAIT``.
* With the Postgresql dialect, the values "read" and ``"read_nowait"``
translate to ``FOR SHARE`` and ``FOR SHARE NOWAIT``, respectively.
.. versionadded:: 0.7.7
:param group_by:
a list of :class:`.ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
:param having:
a :class:`.ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
:param limit=None:
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
:param offset=None:
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
:param order_by:
a scalar or list of :class:`.ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
:param prefixes:
a list of strings or :class:`.ClauseElement` objects to include
directly after the SELECT keyword in the generated statement,
for dialect-specific query features. ``prefixes`` is
also available via the :meth:`~.Select.prefix_with`
generative method.
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`.Select` object will use these
names as well for targeting column members.
use_labels is also available via the :meth:`~._SelectBase.apply_labels`
generative method.
"""
return Select(columns, whereclause=whereclause, from_obj=from_obj,
**kwargs)
def subquery(alias, *args, **kwargs):
"""Return an :class:`.Alias` object derived
from a :class:`.Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
"""Represent an ``INSERT`` statement via the :class:`.Insert` SQL
construct.
Similar functionality is available via the :meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: The table to be inserted into.
:param values: A dictionary which specifies the column specifications of
the ``INSERT``, and is optional. If left as None, the column
specifications are determined from the bind parameters used during the
compile phase of the ``INSERT`` statement. If the bind parameters also
are None during the compile phase, then the column specifications will be
generated from the full list of table columns. Note that the
:meth:`~Insert.values()` generative method may also be used for this.
:param prefixes: A list of modifier keywords to be inserted between INSERT
and INTO. Alternatively, the :meth:`~Insert.prefix_with` generative
method may be used.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
See also:
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
"""Represent an ``UPDATE`` statement via the :class:`.Update` SQL
construct.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the :meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
See also:
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause = None, **kwargs):
"""Represent a ``DELETE`` statement via the :class:`.Delete` SQL
construct.
Similar functionality is available via the :meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
See also:
:ref:`deletes` - SQL Expression Tutorial
"""
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
"""Return a ``DISTINCT`` clause.
e.g.::
distinct(a)
renders::
DISTINCT a
"""
expr = _literal_as_binds(expr)
return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`_CompareMixin` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
"""Produce a ``CASE`` statement.
whens
A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
value
Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
else\_
Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the :func:`literal_column`
construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100,
literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'",
String))
], else_=literal_column("'lethan10'", String))
"""
return _Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
"""Return a ``CAST`` function.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
"""
return _Cast(clause, totype, **kwargs)
def extract(field, expr):
"""Return the clause ``extract(field FROM expr)``."""
return _Extract(field, expr)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return _BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
"""Return an ``EXISTS`` clause as applied to a :class:`.Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
return _Exists(*args, **kwargs)
def union(*selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
def except_(*selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
def except_all(*selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
def intersect(*selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
def intersect_all(*selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def alias(selectable, name=None):
"""Return an :class:`.Alias` object.
An :class:`.Alias` represents any :class:`.FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the
:meth:`~.FromClause.alias` method
available on all :class:`.FromClause` subclasses.
When an :class:`.Alias` is created from a :class:`.Table` object,
this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For :func:`.select` objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The ``name`` parameter is optional, and provides the name
to use in the rendered SQL. If blank, an "anonymous" name
will be deterministically generated at compile time.
Deterministic means the name is guaranteed to be unique against
other constructs used in the same statement, and will also be the
same name for each successive compilation of the same statement
object.
:param selectable: any :class:`.FromClause` subclass,
such as a table, select statement, etc.
:param name: string name to be assigned as the alias.
If ``None``, a name will be deterministically generated
at compile time.
"""
return Alias(selectable, name=name)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`_CompareMixin`
subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the
generation of a literal clause, which will be created as a
:class:`_BindParamClause` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return _BindParamClause(None, value, type_=type_, unique=True)
def tuple_(*expr):
"""Return a SQL tuple.
Main usage is to produce a composite IN construct::
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
return _Tuple(*expr)
def type_coerce(expr, type_):
"""Coerce the given expression into the given type, on the Python side only.
:func:`.type_coerce` is roughly similar to :func:`.cast`, except no
"CAST" expression is rendered - the given type is only applied towards
expression typing and against received result values.
e.g.::
from sqlalchemy.types import TypeDecorator
import uuid
class AsGuid(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
return str(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return uuid.UUID(value)
else:
return None
conn.execute(
select([type_coerce(mytable.c.ident, AsGuid)]).\\
where(
type_coerce(mytable.c.ident, AsGuid) ==
uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
)
)
"""
if hasattr(expr, '__clause_expr__'):
return type_coerce(expr.__clause_expr__())
elif not isinstance(expr, Visitable):
if expr is None:
return null()
else:
return literal(expr, type_=type_)
else:
return _Label(None, expr, type_=type_)
def label(name, obj):
"""Return a :class:`_Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:func:`label()` method on :class:`.ColumnElement`.
name
label name
obj
a :class:`.ColumnElement`.
"""
return _Label(name, obj)
def column(text, type_=None):
"""Return a textual column clause, as would be in the columns clause of a
``SELECT`` statement.
The object returned is an instance of :class:`.ColumnClause`, which
represents the "syntactical" portion of the schema-level
:class:`~sqlalchemy.schema.Column` object. It is often used directly
within :func:`~.expression.select` constructs or with lightweight :func:`~.expression.table`
constructs.
Note that the :func:`~.expression.column` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package::
from sqlalchemy.sql import table, column
:param text: the name of the column. Quoting rules will be applied
to the clause like any other column name. For textual column constructs
that are not to be quoted, use the :func:`literal_column` function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object
which will provide result-set translation for this column.
See :class:`.ColumnClause` for further examples.
"""
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
"""Represent a textual table clause.
The object returned is an instance of :class:`.TableClause`, which represents the
"syntactical" portion of the schema-level :class:`~.schema.Table` object.
It may be used to construct lightweight table constructs.
Note that the :func:`~.expression.table` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package::
from sqlalchemy.sql import table, column
:param name: Name of the table.
:param columns: A collection of :func:`~.expression.column` constructs.
See :class:`.TableClause` for further examples.
"""
return TableClause(name, *columns)
def bindparam(key, value=None, type_=None, unique=False, required=False, callable_=None):
"""Create a bind parameter clause with the given key.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`_BindParamClause` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`_BindParamClause` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`_BindParamClause` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
a value is required at execution time.
"""
if isinstance(key, ColumnClause):
return _BindParamClause(key.name, value, type_=key.type,
callable_=callable_,
unique=unique, required=required)
else:
return _BindParamClause(key, value, type_=type_,
callable_=callable_,
unique=unique, required=required)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return _BindParamClause(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
"""Create a SQL construct that is represented by a literal string.
E.g.::
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
To invoke SQLAlchemy typing logic for bind parameters, the
``bindparams`` list allows specification of :func:`bindparam`
constructs which specify the type for a given name::
t = text("SELECT id FROM users WHERE updated_at>:updated",
bindparams=[bindparam('updated', DateTime())]
)
Typing during result row processing is also an important concern.
Result column types
are specified using the ``typemap`` dictionary, where the keys
match the names of columns. These names are taken from what
the DBAPI returns as ``cursor.description``::
t = text("SELECT id, name FROM users",
typemap={
'id':Integer,
'name':Unicode
}
)
The :func:`text` construct is used internally for most cases when
a literal string is specified for part of a larger query, such as
within :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`text` construct that should be subject to "autocommit"
can be set explicitly so using the ``autocommit`` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`text` constructs - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
:param typemap:
a dictionary mapping the names of columns represented in the
columns clause of a ``SELECT`` statement to type objects,
which will be used to perform post-processing on columns within
the result set. This argument applies to any expression
that returns result sets.
"""
return _TextClause(text, bind=bind, *args, **kwargs)
def over(func, partition_by=None, order_by=None):
"""Produce an OVER clause against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :attr:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :attr:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
return _Over(func, partition_by=partition_by, order_by=order_by)
def null():
"""Return a :class:`_Null` object, which compiles to ``NULL``.
"""
return _Null()
def true():
"""Return a :class:`_True` object, which compiles to ``true``, or the
boolean equivalent for the target dialect.
"""
return _True()
def false():
"""Return a :class:`_False` object, which compiles to ``false``, or the
boolean equivalent for the target dialect.
"""
return _False()
class _FunctionGenerator(object):
"""Generate :class:`.Function` objects based on getattr calls."""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
if len(self.__names) == 1:
func = getattr(functions, self.__names[-1].lower(), None)
if func is not None and \
isinstance(func, type) and \
issubclass(func, Function):
return func(*c, **o)
return Function(self.__names[-1],
packagenames=self.__names[0:-1], *c, **o)
# "func" global - i.e. func.count()
func = _FunctionGenerator()
"""Generate SQL function expressions.
``func`` is a special object instance which generates SQL functions based on name-based attributes, e.g.::
>>> print func.count(1)
count(:param_1)
The element is a column-oriented SQL element like any other, and is
used in that way::
>>> print select([func.count(table.c.id)])
SELECT count(sometable.id) FROM sometable
Any name can be given to ``func``. If the function name is unknown to
SQLAlchemy, it will be rendered exactly as is. For common SQL functions
which SQLAlchemy is aware of, the name may be interpreted as a *generic
function* which will be compiled appropriately to the target database::
>>> print func.current_timestamp()
CURRENT_TIMESTAMP
To call functions which are present in dot-separated packages, specify them in the same manner::
>>> print func.stats.yield_curve(5, 10)
stats.yield_curve(:yield_curve_1, :yield_curve_2)
SQLAlchemy can be made aware of the return type of functions to enable
type-specific lexical and result-based behavior. For example, to ensure
that a string-based function returns a Unicode value and is similarly
treated as a string in expressions, specify
:class:`~sqlalchemy.types.Unicode` as the type:
>>> print func.my_string(u'hi', type_=Unicode) + ' ' + \
... func.my_string(u'there', type_=Unicode)
my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
The object returned by a ``func`` call is an instance of :class:`.Function`.
This object meets the "column" interface, including comparison and labeling
functions. The object can also be passed the :meth:`~.Connectable.execute`
method of a :class:`.Connection` or :class:`.Engine`, where it will be
wrapped inside of a SELECT statement first::
print connection.execute(func.current_timestamp()).scalar()
A function can also be "bound" to a :class:`.Engine` or :class:`.Connection`
using the ``bind`` keyword argument, providing an execute() as well
as a scalar() method::
myfunc = func.current_timestamp(bind=some_engine)
print myfunc.scalar()
Functions which are interpreted as "generic" functions know how to
calculate their return type automatically. For a listing of known generic
functions, see :ref:`generic_functions`.
"""
# "modifier" global - i.e. modifier.distinct
# TODO: use UnaryExpression for this instead ?
modifier = _FunctionGenerator(group=False)
class _truncated_label(unicode):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
unicode(self) +
unicode(other))
def __radd__(self, other):
return _anonymous_label(
unicode(other) +
unicode(self))
def apply_map(self, map_):
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, basestring):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _clone(element, **kw):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element.key
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, basestring):
return _TextClause(unicode(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _const_expr(element):
if element is None:
return null()
elif element is False:
return false()
elif element is True:
return true()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_column(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return literal_column(str(element))
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
c.__dict__.pop('_cloned_set', None)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
"""Return a distinct hash code.
ClauseElements may have special equality comparisons which
makes us rely on them having unique hash codes for use in
hash-based collections. Stock __hash__ doesn't guarantee
unique values on platforms with moving GCs.
"""
return id(self)
def _annotate(self, values):
"""return a copy of this ClauseElement with the given annotations
dictionary.
"""
return sqlutil.Annotated(self, values)
def _deannotate(self):
"""return a copy of this ClauseElement with an empty annotations
dictionary.
"""
return self._clone()
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam':visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:func:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.deprecated('0.7',
'Only SQL expressions which subclass '
':class:`.Executable` may provide the '
':func:`.execute` method.')
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.ClauseElement`.
"""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s does not support direct execution.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
@util.deprecated('0.7',
'Only SQL expressions which subclass '
':class:`.Executable` may provide the '
':func:`.scalar` method.')
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.ClauseElement`, returning
the result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
def compile(self, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~sqlalchemy.engine.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~sqlalchemy.engine.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).encode('ascii', 'backslashreplace')
# end Py2K
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return _UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class _Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class _CompareMixin(ColumnOperators):
"""Defines comparison and math operations for :class:`.ClauseElement`
instances.
See :class:`.ColumnOperators` and :class:`.Operators` for descriptions
of all operations.
"""
def __compare(self, op, obj, negate=None, reverse=False,
**kwargs
):
if obj is None or isinstance(obj, _Null):
if op in (operators.eq, operators.is_):
return _BinaryExpression(self, null(), operators.is_,
negate=operators.isnot)
elif op in (operators.ne, operators.isnot):
return _BinaryExpression(self, null(), operators.isnot,
negate=operators.is_)
else:
raise exc.ArgumentError("Only '='/'!=' operators can "
"be used with NULL")
else:
obj = self._check_literal(op, obj)
if reverse:
return _BinaryExpression(obj,
self,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return _BinaryExpression(self,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def __operate(self, op, obj, reverse=False):
obj = self._check_literal(op, obj)
if reverse:
left, right = obj, self
else:
left, right = self, obj
if left.type is None:
op, result_type = sqltypes.NULLTYPE._adapt_expression(op,
right.type)
elif right.type is None:
op, result_type = left.type._adapt_expression(op,
sqltypes.NULLTYPE)
else:
op, result_type = left.type._adapt_expression(op,
right.type)
return _BinaryExpression(left, right, op, type_=result_type)
# a mapping of operators with the method they use, along with their negated
# operator for comparison operators
operators = {
operators.add : (__operate,),
operators.mul : (__operate,),
operators.sub : (__operate,),
# Py2K
operators.div : (__operate,),
# end Py2K
operators.mod : (__operate,),
operators.truediv : (__operate,),
operators.lt : (__compare, operators.ge),
operators.le : (__compare, operators.gt),
operators.ne : (__compare, operators.eq),
operators.gt : (__compare, operators.le),
operators.ge : (__compare, operators.lt),
operators.eq : (__compare, operators.ne),
operators.like_op : (__compare, operators.notlike_op),
operators.ilike_op : (__compare, operators.notilike_op),
operators.is_ : (__compare, operators.is_),
operators.isnot : (__compare, operators.isnot),
}
def operate(self, op, *other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other[0], *o[1:], **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other, reverse=True, *o[1:], **kwargs)
def in_(self, other):
"""See :meth:`.ColumnOperators.in_`."""
return self._in_impl(operators.in_op, operators.notin_op, other)
def _in_impl(self, op, negate_op, seq_or_selectable):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, _ScalarSelect):
return self.__compare(op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, _SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return self.__compare(op, seq_or_selectable.as_scalar(),
negate=negate_op)
elif isinstance(seq_or_selectable, (Selectable, _TextClause)):
return self.__compare(op, seq_or_selectable,
negate=negate_op)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, _CompareMixin):
raise exc.InvalidRequestError('in() function accept'
's either a list of non-selectable values, '
'or a selectable: %r' % o)
else:
o = self._bind_param(op, o)
args.append(o)
if len(args) == 0:
# Special case handling for empty IN's, behave like
# comparison against zero row selectable. We use != to
# build the contradiction as it handles NULL values
# appropriately, i.e. "not (x IN ())" should not return NULL
# values for x.
util.warn('The IN-predicate on "%s" was invoked with an '
'empty sequence. This results in a '
'contradiction, which nonetheless can be '
'expensive to evaluate. Consider alternative '
'strategies for improved performance.' % self)
return self != self
return self.__compare(op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def __neg__(self):
"""See :meth:`.ColumnOperators.__neg__`."""
return _UnaryExpression(self, operator=operators.neg)
def startswith(self, other, escape=None):
"""See :meth:`.ColumnOperators.startswith`."""
# use __radd__ to force string concat behavior
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String).__radd__(
self._check_literal(operators.like_op, other)
),
escape=escape)
def endswith(self, other, escape=None):
"""See :meth:`.ColumnOperators.endswith`."""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(operators.like_op, other),
escape=escape)
def contains(self, other, escape=None):
"""See :meth:`.ColumnOperators.contains`."""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(operators.like_op, other) +
literal_column("'%'", type_=sqltypes.String),
escape=escape)
def match(self, other):
"""See :meth:`.ColumnOperators.match`."""
return self.__compare(operators.match_op,
self._check_literal(operators.match_op,
other))
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return _Label(name, self, self.type)
def desc(self):
"""See :meth:`.ColumnOperators.desc`."""
return desc(self)
def asc(self):
"""See :meth:`.ColumnOperators.asc`."""
return asc(self)
def nullsfirst(self):
"""See :meth:`.ColumnOperators.nullsfirst`."""
return nullsfirst(self)
def nullslast(self):
"""See :meth:`.ColumnOperators.nullslast`."""
return nullslast(self)
def distinct(self):
"""See :meth:`.ColumnOperators.distinct`."""
return _UnaryExpression(self, operator=operators.distinct_op,
type_=self.type)
def between(self, cleft, cright):
"""See :meth:`.ColumnOperators.between`."""
return _BinaryExpression(
self,
ClauseList(
self._check_literal(operators.and_, cleft),
self._check_literal(operators.and_, cright),
operator=operators.and_,
group=False),
operators.between_op)
def collate(self, collation):
"""See :meth:`.ColumnOperators.collate`."""
return collate(self, collation)
def op(self, operator):
"""See :meth:`.ColumnOperators.op`."""
return lambda other: self.__operate(operator, other)
def _bind_param(self, operator, obj):
return _BindParamClause(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
def _check_literal(self, operator, other):
if isinstance(other, _BindParamClause) and \
isinstance(other.type, sqltypes.NullType):
# TODO: perhaps we should not mutate the incoming bindparam()
# here and instead make a copy of it. this might
# be the only place that we're mutating an incoming construct.
other.type = self.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
if isinstance(other, (_SelectBase, Alias)):
other = other.as_scalar()
return other
elif not isinstance(other, ClauseElement):
return self._bind_param(operator, other)
elif isinstance(other, (_SelectBase, Alias)):
return other.as_scalar()
else:
return other
class ColumnElement(ClauseElement, _CompareMixin):
"""Represent an element that is usable within the "column clause" portion
of a ``SELECT`` statement.
This includes columns associated with tables, aliases, and
subqueries, expressions, function calls, SQL keywords such as
``NULL``, literals, etc. :class:`.ColumnElement` is the ultimate base
class for all such elements.
:class:`.ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`.ColumnElement` may be associated with
a :class:`.Selectable` which was derived from another :class:`.Selectable`.
An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a
:class:`~sqlalchemy.schema.Table`.
A :class:`.ColumnElement`, by subclassing the :class:`_CompareMixin` mixin
class, provides the ability to generate new :class:`.ClauseElement`
objects using Python expressions. See the :class:`_CompareMixin`
docstring for more details.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
_key_label = None
_alt_names = ()
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, 'proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, 'proxies'):
for c in self.proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
# TODO: may want to change this to anon_label,
# or some value that is more useful than the
# compiled form of the expression
key = str(self)
else:
key = name
co = ColumnClause(_as_truncated(name),
selectable,
type_=getattr(self,
'type', None))
co.proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif oth is self:
return True
else:
return False
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self._data.update((c.key, c) for c in cols)
self.__dict__['_all_cols'] = util.column_set(self)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self._data[other.name]
self._all_cols.remove(other)
if column.key in self._data:
self._all_cols.remove(self._data[column.key])
self._all_cols.add(column)
self._data[column.key] = column
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, object):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'another column with the same key. Consider '
'use_labels for select() statements.' % (key,
getattr(existing, 'table', None)))
self._all_cols.remove(existing)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
value.__dict__.pop('proxy_set', None)
self._all_cols.add(value)
self._data[key] = value
def clear(self):
self._data.clear()
self._all_cols.clear()
def remove(self, column):
del self._data[column.key]
self._all_cols.remove(column)
def update(self, value):
self._data.update(value)
self._all_cols.clear()
self._all_cols.update(self._data.values())
def extend(self, iter):
self.update((c.key, c) for c in iter)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
self.__dict__['_all_cols'] = util.column_set(self._data.values())
def contains_column(self, col):
# this has to be done via set() membership
return col in self._all_cols
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_cols)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, colset):
util.ImmutableProperties.__init__(self, data)
self.__dict__['_all_cols'] = colset
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
_memoized_property = util.group_expirable_memoized_property(["_columns"])
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`.FromClause`."""
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""return a join of this :class:`.FromClause` against another
:class:`.FromClause`."""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""return an outer join of this :class:`.FromClause` against another
:class:`.FromClause`."""
return Join(self, right, onclause, True)
def alias(self, name=None):
"""return an alias of this :class:`.FromClause`.
This is shorthand for calling::
from sqlalchemy import alias
a = alias(self, name=name)
See :func:`~.expression.alias` for details.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
# this is essentially an "identity" check in the base class.
# Other constructs override this to traverse through
# contained elements.
return fromclause in self._cloned_set
def _is_lexical_equivalent(self, other):
"""Return True if this FromClause and the other represent
the same lexical identity.
This tests if either one is a copy of the other, or
if they are the same via annotation identity.
"""
return self._cloned_set.intersection(other._cloned_set)
def replace_selectable(self, old, alias):
"""replace all occurrences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`.FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`.ColumnElement`, return the exported
:class:`.ColumnElement` object from this :class:`.Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common ancestor
column.
:param column: the target :class:`.ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`.ColumnElement`, if the given
:class:`.ColumnElement` is actually present within a sub-element
of this :class:`.FromClause`. Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`.FromClause`.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])
).intersection(expanded_proxy_set):
return False
return True
# don't dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (not require_embedded
or embedded(expanded_proxy_set, target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
col.proxy_set if sc.shares_lineage(column)])
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
c.proxy_set if sc.shares_lineage(column)])
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
self._memoized_property.expire_instance(self)
@_memoized_property
def columns(self):
"""Return the collection of Column objects contained by this
FromClause."""
if '_columns' not in self.__dict__:
self._init_collections()
self._populate_column_collection()
return self._columns.as_immutable()
@_memoized_property
def primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._init_collections()
self._populate_column_collection()
return self.primary_key
@_memoized_property
def foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._init_collections()
self._populate_column_collection()
return self.foreign_keys
c = property(attrgetter('columns'))
_select_iterable = property(attrgetter('columns'))
def _init_collections(self):
assert '_columns' not in self.__dict__
assert 'primary_key' not in self.__dict__
assert 'foreign_keys' not in self.__dict__
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
def _populate_column_collection(self):
pass
class _BindParamClause(ColumnElement):
"""Represent a bind parameter.
Public constructor is the :func:`bindparam()` function.
"""
__visit_name__ = 'bindparam'
quote = None
def __init__(self, key, value, type_=None, unique=False,
callable_=None,
isoutparam=False, required=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Construct a _BindParamClause.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`_BindParamClause` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`_BindParamClause` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`_BindParamClause` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
a value is required at execution time.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter.
"""
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type._coerce_compared_value(
_compared_to_operator, value)
else:
self.type = sqltypes._type_map.get(type(value),
sqltypes.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`_BindParamClause` to the given
clause."""
return isinstance(other, _BindParamClause) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return '_BindParamClause(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class _TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class _Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(_Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide access
to execution options which they in turn configure upon connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
See also:
:meth:`.Connection.execution_options()`
:meth:`.Query.execution_options()`
"""
if 'isolation_level' in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if 'compiled_cache' in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not directly bound to a Connection or Engine.'
'Use the .execute() method of a Connection or Engine '
'to execute this construct.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
# legacy, some outside users may be calling this
_Executable = Executable
class _TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union({'autocommit'
: PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
_hide_froms = []
def __init__(
self,
text='',
bind=None,
bindparams=None,
typemap=None,
autocommit=None,
):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=Tru'
'e)')
self._execution_options = \
self._execution_options.union({'autocommit'
: autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return sqltypes.NULLTYPE
def self_group(self, against=None):
if against is operators.in_op:
return _Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self.bindparams = dict((b.key, clone(b, **kw))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class _Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
Public constructor is the :func:`null()` function.
"""
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
def compare(self, other):
return isinstance(other, _Null)
class _False(ColumnElement):
"""Represent the ``false`` keyword in a SQL statement.
Public constructor is the :func:`false()` function.
"""
__visit_name__ = 'false'
def __init__(self):
self.type = sqltypes.BOOLEANTYPE
class _True(ColumnElement):
"""Represent the ``true`` keyword in a SQL statement.
Public constructor is the :func:`true()` function.
"""
__visit_name__ = 'true'
def __init__(self):
self.type = sqltypes.BOOLEANTYPE
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
@util.memoized_property
def type(self):
if self.clauses:
return self.clauses[0].type
else:
return sqltypes.NULLTYPE
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_',
sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
class _Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
super(_Tuple, self).__init__(*clauses, **kw)
self.type = _type_from_args(clauses)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return _Tuple(*[
_BindParamClause(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class _Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs."""
packagenames = ()
def __init__(self, *clauses, **kwargs):
"""Construct a :class:`.FunctionElement`.
"""
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
"""Fulfill the 'columns' contract of :class:`.ColumnElement`.
Returns a single-element list consisting of this object.
"""
return [self]
@util.memoized_property
def clauses(self):
"""Return the underlying :class:`.ClauseList` which contains
the arguments for this :class:`.FunctionElement`.
"""
return self.clause_expr.element
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.row_number().over(order_by='x')
is shorthand for::
from sqlalchemy import over
over(func.row_number(), order_by='x')
See :func:`~.expression.over` for a full description.
.. versionadded:: 0.7
"""
return over(self, partition_by=partition_by, order_by=order_by)
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone, **kw):
self.clause_expr = clone(self.clause_expr, **kw)
self._reset_exported()
util.reset_memoized(self, 'clauses')
def select(self):
"""Produce a :func:`~.expression.select` construct
against this :class:`.FunctionElement`.
This is shorthand for::
s = select([function_element])
"""
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind' and return a scalar value.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.scalar` method of :class:`.Connection`
or :class:`.Engine`.
"""
return self.select().execute().scalar()
def execute(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind'.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.execute` method of :class:`.Connection`
or :class:`.Engine`.
"""
return self.select().execute()
def _bind_param(self, operator, obj):
return _BindParamClause(None, obj, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
class Function(FunctionElement):
"""Describe a named SQL function.
See the superclass :class:`.FunctionElement` for a description
of public methods.
"""
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
"""Construct a :class:`.Function`.
The :attr:`.func` construct is normally used to construct
new :class:`.Function` instances.
"""
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, operator, obj):
return _BindParamClause(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
class _Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = _TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class _Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _UnaryExpression(ColumnElement):
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).\
self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`_UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, _UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return _UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(_UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator,
against):
return _Grouping(self)
else:
return self
class _BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``."""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
try:
return self.operator(hash(self.left), hash(self.right))
except:
raise TypeError("Boolean value of this clause is not defined")
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`_BinaryExpression` against the
given :class:`_BinaryExpression`."""
return (
isinstance(other, _BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return _BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(_BinaryExpression, self)._negate()
class _Exists(_UnaryExpression):
__visit_name__ = _UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (_SelectBase, _ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
_UnaryExpression.__init__(self, s, operator=operators.exists,
type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, fromclause):
e = self._clone()
e.element = self.element.correlate(fromclause).self_group()
return e
def select_from(self, clause):
"""return a new :class:`._Exists` construct, applying the given expression
to the :meth:`.Select.select_from` method of the select statement
contained.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`.FromClause`
elements.
The public constructor function for :class:`.Join` is the module-level
:func:`join()` function, as well as the :func:`join()` method available
off all :class:`.FromClause` subclasses.
"""
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
"""Construct a new :class:`.Join`.
The usual entrypoint here is the :func:`~.expression.join`
function or the :meth:`.FromClause.join` method of any
:class:`.FromClause` object.
"""
self.left = _literal_as_text(left)
self.right = _literal_as_text(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.__folded_equivalents = None
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or\
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return _FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + \
[c for c in self.right.columns]
self.primary_key.extend(sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self.foreign_keys.update(itertools.chain(
*[col.foreign_keys for col in columns]))
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
self.onclause = clone(self.onclause, **kw)
self.__folded_equivalents = None
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return sqlutil.join_condition(left, right, a_subset=left_right)
def select(self, whereclause=None, fold_equivalents=False, **kwargs):
"""Create a :class:`.Select` from this :class:`.Join`.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select
j = select([j.left, j.right], **kw).\\
where(whereclause).\\
select_from(j)
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param fold_equivalents: based on the join criterion of this
:class:`.Join`, do not include
repeat column names in the column list of the resulting
select, for columns that are calculated to be "equivalent"
based on the join criterion of this :class:`.Join`. This will
recursively apply to any joins directly nested by this one
as well.
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
if fold_equivalents:
collist = sqlutil.folded_equivalents(self)
else:
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
"""return an alias of this :class:`.Join`.
Used against a :class:`.Join` object,
:meth:`~.Join.alias` calls the :meth:`~.Join.select`
method first so that a subquery against a
:func:`.select` construct is generated.
the :func:`~expression.select` construct also has the
``correlate`` flag set to ``False`` and will not
auto-correlate inside an enclosing :func:`~expression.select`
construct.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select, alias
j = alias(
select([j.left, j.right]).\\
select_from(j).\\
with_labels(True).\\
correlate(False),
name=name
)
See :func:`~.expression.alias` for further details on
aliases.
"""
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right)
for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`~.expression.alias` module level
function as well as the :meth:`.FromClause.alias` method available on all
:class:`.FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, name=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if name is None:
if self.original.named_with_column:
name = getattr(self.original, 'name', None)
name = _anonymous_label('%%(%d %s)s' % (id(self), name
or 'anon'))
self.name = name
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support "
"'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _copy_internals(self, clone=_clone, **kw):
# don't apply anything to an aliased Table
# for now. May want to drive this from
# the given **kw.
if isinstance(self.element, TableClause):
return
self._reset_exported()
self.element = clone(self.element, **kw)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, **kw):
if column_collections:
for c in self.c:
yield c
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class CTE(Alias):
"""Represent a Common Table Expression.
The :class:`.CTE` object is obtained using the
:meth:`._SelectBase.cte` method from any selectable.
See that method for complete examples.
.. versionadded:: 0.7.6
"""
__visit_name__ = 'cte'
def __init__(self, selectable,
name=None,
recursive=False,
cte_alias=False,
_restates=frozenset()):
self.recursive = recursive
self.cte_alias = cte_alias
self._restates = _restates
super(CTE, self).__init__(selectable, name=name)
def alias(self, name=None):
return CTE(
self.original,
name=name,
recursive=self.recursive,
cte_alias = self.name
)
def union(self, other):
return CTE(
self.original.union(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
def union_all(self, other):
return CTE(
self.original.union_all(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
class _Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', None)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element, 'type':self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
class _FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
def _init_collections(self):
pass
@property
def columns(self):
return self.element.columns
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
# this could be
# self.element.foreign_keys
# see SelectableTest.test_join_condition
return set()
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element}
def __setstate__(self, state):
self.element = state['element']
class _Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class _Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
This object is constructed from the :func:`label()` module level
function as well as the :func:`label()` method available on all
:class:`.ColumnElement` subclasses.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, _Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self.quote = element.quote
self.proxies = [element]
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return _Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name = None):
e = self.element._make_proxy(selectable, name=name or self.name)
e.proxies.append(self)
return e
class ColumnClause(_Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`.Selectable`.
:class:`.ColumnClause` is constructed by itself typically via
the :func:`~.expression.column` function. It may be placed directly
into constructs such as :func:`.select` constructs::
from sqlalchemy.sql import column, select
c1, c2 = column("c1"), column("c2")
s = select([c1, c2]).where(c1==5)
There is also a variant on :func:`~.expression.column` known
as :func:`~.expression.literal_column` - the difference is that
in the latter case, the string value is assumed to be an exact
expression, rather than a column name, so that no quoting rules
or similar are applied::
from sqlalchemy.sql import literal_column, select
s = select([literal_column("5 + 7")])
:class:`.ColumnClause` can also be used in a table-like
fashion by combining the :func:`~.expression.column` function
with the :func:`~.expression.table` function, to produce
a "lightweight" form of table metadata::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The above construct can be created in an ad-hoc fashion and is
not associated with any :class:`.schema.MetaData`, unlike it's
more full fledged :class:`.schema.Table` counterpart.
:param text: the text of the element.
:param selectable: parent selectable.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`.ColumnClause`.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and other.is_literal
):
return super(ColumnClause, self).\
_compare_name_for_result(other)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def label(self, name):
# currently, anonymous labels don't occur for
# ColumnClause. The use at the moment
# is that they do not generate nicely for
# is_literal clauses. We would like to change
# this so that label(None) acts as would be expected.
# See [ticket:2168].
if name is None:
return self
else:
return super(ColumnClause, self).label(name)
def _bind_param(self, operator, obj):
return _BindParamClause(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name),
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
c.proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.name)
if attach:
selectable._columns[c.name] = c
return c
class TableClause(_Immutable, FromClause):
"""Represents a minimal "table" construct.
The constructor for :class:`.TableClause` is the
:func:`~.expression.table` function. This produces
a lightweight table object that has only a name and a
collection of columns, which are typically produced
by the :func:`~.expression.column` function::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The :class:`.TableClause` construct serves as the base for
the more commonly used :class:`~.schema.Table` object, providing
the usual set of :class:`~.expression.FromClause` services including
the ``.c.`` collection and statement generation methods.
It does **not** provide all the additional schema-level services
of :class:`~.schema.Table`, including constraints, references to other
tables, or support for :class:`.MetaData`-level services. It's useful
on its own as an ad-hoc construct used to generate quick SQL
statements when a more fully fledged :class:`~.schema.Table` is not on hand.
"""
__visit_name__ = 'table'
named_with_column = True
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
for c in columns:
self.append_column(c)
def _init_collections(self):
pass
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.name] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
"""Generate an :func:`.insert` construct against this
:class:`.TableClause`.
E.g.::
table.insert().values(name='foo')
See :func:`.insert` for argument and usage information.
"""
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`.update` construct against this
:class:`.TableClause`.
E.g.::
table.update().where(table.c.id==7).values(name='foo')
See :func:`.update` for argument and usage information.
"""
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
"""Generate a :func:`.delete` construct against this
:class:`.TableClause`.
E.g.::
table.delete().where(table.c.id==7)
See :func:`.delete` for argument and usage information.
"""
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class _SelectBase(Executable, FromClause):
"""Base class for :class:`.Select` and ``CompoundSelects``."""
_order_by_clause = ClauseList()
_group_by_clause = ClauseList()
_limit = None
_offset = None
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated('autocommit on select() is '
'deprecated. Use .execution_options(a'
'utocommit=True)')
self._execution_options = \
self._execution_options.union({'autocommit'
: autocommit})
if limit is not None:
self._limit = util.asint(limit)
if offset is not None:
self._offset = util.asint(offset)
self._bind = bind
if order_by is not None:
self._order_by_clause = ClauseList(*util.to_list(order_by))
if group_by is not None:
self._group_by_clause = ClauseList(*util.to_list(group_by))
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`_ScalarSelect`.
"""
return _ScalarSelect(self)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
See also :meth:`~._SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
def cte(self, name=None, recursive=False):
"""Return a new :class:`.CTE`, or Common Table Expression instance.
Common table expressions are a SQL standard whereby SELECT
statements can draw upon secondary statements specified along
with the primary statement, using a clause called "WITH".
Special semantics regarding UNION can also be employed to
allow "recursive" queries, where a SELECT statement can draw
upon the set of rows that have previously been selected.
SQLAlchemy detects :class:`.CTE` objects, which are treated
similarly to :class:`.Alias` objects, as special elements
to be delivered to the FROM clause of the statement as well
as to a WITH clause at the top of the statement.
.. versionadded:: 0.7.6
:param name: name given to the common table expression. Like
:meth:`._FromClause.alias`, the name can be left as ``None``
in which case an anonymous symbol will be used at query
compile time.
:param recursive: if ``True``, will render ``WITH RECURSIVE``.
A recursive common table expression is intended to be used in
conjunction with UNION ALL in order to derive rows
from those already selected.
The following examples illustrate two examples from
Postgresql's documentation at
http://www.postgresql.org/docs/8.4/static/queries-with.html.
Example 1, non recursive::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
orders = Table('orders', metadata,
Column('region', String),
Column('amount', Integer),
Column('product', String),
Column('quantity', Integer)
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\\
where(
regional_sales.c.total_sales >
select([
func.sum(regional_sales.c.total_sales)/10
])
).cte("top_regions")
statement = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
result = conn.execute(statement).fetchall()
Example 2, WITH RECURSIVE::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
parts = Table('parts', metadata,
Column('part', String),
Column('sub_part', String),
Column('quantity', Integer),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\\
where(parts.c.part=='our part').\\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union_all(
select([
parts_alias.c.part,
parts_alias.c.sub_part,
parts_alias.c.quantity
]).
where(parts_alias.c.part==incl_alias.c.sub_part)
)
statement = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label('total_quantity')
]).\
select_from(included_parts.join(parts,
included_parts.c.part==parts.c.part)).\\
group_by(included_parts.c.sub_part)
result = conn.execute(statement).fetchall()
See also:
:meth:`.orm.query.Query.cte` - ORM version of :meth:`._SelectBase.cte`.
"""
return CTE(self, name=name, recursive=recursive)
@_generative
@util.deprecated('0.6',
message=":func:`.autocommit` is deprecated. Use "
":func:`.Executable.execution_options` with the "
"'autocommit' flag.")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True."""
self._execution_options = \
self._execution_options.union({'autocommit': True})
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied."""
self._limit = util.asint(limit)
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied."""
self._offset = util.asint(offset)
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY
criterion applied.
The criterion will be appended to any pre-existing ORDER BY
criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY
criterion applied.
The criterion will be appended to any pre-existing GROUP BY
criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class _ScalarSelect(_Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError('Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.')
c = columns
def self_group(self, **kwargs):
return self
def _make_proxy(self, selectable, name):
return list(self.inner_columns)[0]._make_proxy(selectable, name)
class CompoundSelect(_SelectBase):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations."""
__visit_name__ = 'compound_select'
UNION = util.symbol('UNION')
UNION_ALL = util.symbol('UNION ALL')
EXCEPT = util.symbol('EXCEPT')
EXCEPT_ALL = util.symbol('EXCEPT ALL')
INTERSECT = util.symbol('INTERSECT')
INTERSECT_ALL = util.symbol('INTERSECT ALL')
def __init__(self, keyword, *selects, **kwargs):
self._should_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError('All selectables passed to '
'CompoundSelect must have identical numbers of '
'columns; select #%d has %d columns, select '
'#%d has %d' % (1, len(self.selects[0].c), n
+ 1, len(s.c)))
self.selects.append(s.self_group(self))
_SelectBase.__init__(self, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return _FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(self, name=self.use_labels
and cols[0]._label or None)
# hand-construct the "proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy.proxies = [c._annotate({'weight': i + 1}) for (i,
c) in enumerate(cols)]
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.selects = [clone(s, **kw) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) \
+ [self._order_by_clause, self._group_by_clause] \
+ list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(_SelectBase):
"""Represents a ``SELECT`` statement.
See also:
:func:`~.expression.select` - the function which creates a :class:`.Select` object.
:ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`.
"""
__visit_name__ = 'select'
_prefixes = ()
_hints = util.immutabledict()
_distinct = False
_from_cloned = None
_memoized_property = _SelectBase._memoized_property
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a Select object.
The public constructor for Select is the
:func:`select` function; see that function for
argument descriptions.
Additional generative and mutator methods are available on the
:class:`_SelectBase` superclass.
"""
self._should_correlate = correlate
if distinct is not False:
if isinstance(distinct, basestring):
util.warn_deprecated(
"A string argument passed to the 'distinct' "
"keyword argument of 'select()' is deprecated "
"- please use 'prefixes' or 'prefix_with()' "
"to specify additional prefixes")
if prefixes:
prefixes = util.to_list(prefixes) + [distinct]
else:
prefixes = [distinct]
elif distinct is True:
self._distinct = True
else:
self._distinct = [
_literal_as_text(e)
for e in util.to_list(distinct)
]
self._correlate = set()
if from_obj is not None:
self._from_obj = util.OrderedSet(
_literal_as_text(f)
for f in util.to_list(from_obj))
else:
self._from_obj = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _literal_as_column(c)
if isinstance(c, _ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
_SelectBase.__init__(self, **kwargs)
@property
def _froms(self):
# would love to cache this,
# but there's just enough edge cases, particularly now that
# declarative encourages construction of SQL expressions
# without tables present, to just regen this each time.
froms = []
seen = set()
translate = self._from_cloned
def add(items):
for item in items:
if translate and item in translate:
item = translate[item]
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
add(_from_objects(*self._raw_columns))
if self._whereclause is not None:
add(_from_objects(self._whereclause))
add(self._from_obj)
return froms
def _get_display_froms(self, existing_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = set(itertools.chain(*[f._hide_froms for f in froms]))
if toremove:
# if we're maintaining clones of froms,
# add the copies out to the toremove list. only include
# clones that are lexical equivalents.
if self._from_cloned:
toremove.update(
self._from_cloned[f] for f in
toremove.intersection(self._from_cloned)
if self._from_cloned[f]._is_lexical_equivalent(f)
)
# filter out to FROM clauses not in the list,
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
if len(froms) > 1 or self._correlate:
if self._correlate:
froms = [f for f in froms if f not in _cloned_intersection(froms,
self._correlate)]
if self._should_correlate and existing_froms:
froms = [f for f in froms if f not in _cloned_intersection(froms,
existing_froms)]
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
"' returned no FROM clauses due to "
"auto-correlation; specify "
"correlate(<tables>) to control "
"correlation manually." % self)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@_generative
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given selectable to this
:class:`.Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`.Table` or :class:`.Alias` passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
"""
self._hints = self._hints.union({(selectable, dialect_name):text})
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@_memoized_property.method
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
froms = self._froms
return froms + list(_from_objects(*froms))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone, **kw):
# Select() object has been cloned and probably adapted by the
# given clone function. Apply the cloning function to internal
# objects
# 1. keep a dictionary of the froms we've cloned, and what
# they've become. This is consulted later when we derive
# additional froms from "whereclause" and the columns clause,
# which may still reference the uncloned parent table.
# as of 0.7.4 we also put the current version of _froms, which
# gets cleared on each generation. previously we were "baking"
# _froms into self._from_obj.
self._from_cloned = from_cloned = dict((f, clone(f, **kw))
for f in self._from_obj.union(self._froms))
# 3. update persistent _from_obj with the cloned versions.
self._from_obj = util.OrderedSet(from_cloned[f] for f in
self._from_obj)
# the _correlate collection is done separately, what can happen
# here is the same item is _correlate as in _from_obj but the
# _correlate version has an annotation on it - (specifically
# RelationshipProperty.Comparator._criterion_exists() does
# this). Also keep _correlate liberally open with it's previous
# contents, as this set is used for matching, not rendering.
self._correlate = set(clone(f) for f in
self._correlate).union(self._correlate)
# 4. clone other things. The difficulty here is that Column
# objects are not actually cloned, and refer to their original
# .table, resulting in the wrong "from" parent after a clone
# operation. Hence _from_cloned and _from_obj supercede what is
# present here.
self._raw_columns = [clone(c, **kw) for c in self._raw_columns]
for attr in '_whereclause', '_having', '_order_by_clause', \
'_group_by_clause':
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
# erase exported column list, _froms collection,
# etc.
self._reset_exported()
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
self.append_column(column)
@_generative
def with_only_columns(self, columns):
"""Return a new :func:`.select` construct with its columns
clause replaced with the given columns.
.. versionchanged:: 0.7.3
Due to a bug fix, this method has a slight
behavioral change as of version 0.7.3.
Prior to version 0.7.3, the FROM clause of
a :func:`.select` was calculated upfront and as new columns
were added; in 0.7.3 and later it's calculated
at compile time, fixing an issue regarding late binding
of columns to parent tables. This changes the behavior of
:meth:`.Select.with_only_columns` in that FROM clauses no
longer represented in the new list are dropped,
but this behavior is more consistent in
that the FROM clauses are consistently derived from the
current columns clause. The original intent of this method
is to allow trimming of the existing columns list to be fewer
columns than originally present; the use case of replacing
the columns list with an entirely different one hadn't
been anticipated until 0.7.3 was released; the usage
guidelines below illustrate how this should be done.
This method is exactly equivalent to as if the original
:func:`.select` had been called with the given columns
clause. I.e. a statement::
s = select([table1.c.a, table1.c.b])
s = s.with_only_columns([table1.c.b])
should be exactly equivalent to::
s = select([table1.c.b])
This means that FROM clauses which are only derived
from the column list will be discarded if the new column
list no longer contains that FROM::
>>> table1 = table('t1', column('a'), column('b'))
>>> table2 = table('t2', column('a'), column('b'))
>>> s1 = select([table1.c.a, table2.c.b])
>>> print s1
SELECT t1.a, t2.b FROM t1, t2
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1
The preferred way to maintain a specific FROM clause
in the construct, assuming it won't be represented anywhere
else (i.e. not in the WHERE clause, etc.) is to set it using
:meth:`.Select.select_from`::
>>> s1 = select([table1.c.a, table2.c.b]).\\
... select_from(table1.join(table2, table1.c.a==table2.c.a))
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a
Care should also be taken to use the correct
set of column objects passed to :meth:`.Select.with_only_columns`.
Since the method is essentially equivalent to calling the
:func:`.select` construct in the first place with the given
columns, the columns passed to :meth:`.Select.with_only_columns`
should usually be a subset of those which were passed
to the :func:`.select` construct, not those which are available
from the ``.c`` collection of that :func:`.select`. That
is::
s = select([table1.c.a, table1.c.b]).select_from(table1)
s = s.with_only_columns([table1.c.b])
and **not**::
# usually incorrect
s = s.with_only_columns([s.c.b])
The latter would produce the SQL::
SELECT b
FROM (SELECT t1.a AS a, t1.b AS b
FROM t1), t1
Since the :func:`.select` construct is essentially being
asked to select both from ``table1`` as well as itself.
"""
self._reset_exported()
rc = []
for c in columns:
c = _literal_as_column(c)
if isinstance(c, _ScalarSelect):
c = c.self_group(against=operators.comma_op)
rc.append(c)
self._raw_columns = rc
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self, *expr):
"""Return a new select() construct which will apply DISTINCT to its
columns clause.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if expr:
expr = [_literal_as_text(e) for e in expr]
if isinstance(self._distinct, list):
self._distinct = self._distinct + expr
else:
self._distinct = expr
else:
self._distinct = True
@_generative
def prefix_with(self, *expr):
"""return a new select() construct which will apply the given
expressions, typically strings, to the start of its columns clause,
not using any commas. In particular is useful for MySQL
keywords.
e.g.::
select(['a', 'b']).prefix_with('HIGH_PRIORITY',
'SQL_SMALL_RESULT',
'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL a, b
"""
expr = tuple(_literal_as_text(e) for e in expr)
self._prefixes = self._prefixes + expr
@_generative
def select_from(self, fromclause):
"""return a new :func:`.select` construct with the given FROM expression
merged into its list of FROM objects.
E.g.::
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s = select([table1.c.a]).\\
select_from(
table1.join(table2, table1.c.a==table2.c.b)
)
The "from" list is a unique set on the identity of each element,
so adding an already present :class:`.Table` or other selectable
will have no effect. Passing a :class:`.Join` that refers
to an already present :class:`.Table` or other selectable will have
the effect of concealing the presence of that selectable as
an individual element in the rendered FROM list, instead rendering it into a
JOIN clause.
While the typical purpose of :meth:`.Select.select_from` is to replace
the default, derived FROM clause with a join, it can also be called with
individual table elements, multiple times if desired, in the case that the
FROM clause cannot be fully derived from the columns clause::
select([func.count('*')]).select_from(table1)
"""
self.append_from(fromclause)
@_generative
def correlate(self, *fromclauses):
"""return a new select() construct which will correlate the given FROM
clauses to that of an enclosing select(), if a match is found.
By "match", the given fromclause must be present in this select's
list of FROM objects and also present in an enclosing select's list of
FROM objects.
Calling this method turns off the select's default behavior of
"auto-correlation". Normally, select() auto-correlates all of its FROM
clauses to those of an embedded select when compiled.
If the fromclause is None, correlation is disabled for the returned
select().
"""
self._should_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = set()
else:
self._correlate = self._correlate.union(fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct."""
self._should_correlate = False
self._correlate = self._correlate.union([fromclause])
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
"""
self._reset_exported()
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
"""
self._reset_exported()
whereclause = _literal_as_text(whereclause)
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
"""
self._reset_exported()
fromclause = _literal_as_text(fromclause)
self._from_obj = self._from_obj.union([fromclause])
def _populate_column_collection(self):
for c in self.inner_columns:
if hasattr(c, '_make_proxy'):
c._make_proxy(self,
name=self.use_labels
and c._label or None)
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions and should not require explicit use.
"""
if isinstance(against, CompoundSelect):
return self
return _FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
froms = self._froms
if not froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class UpdateBase(Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
kwargs = util.immutabledict()
_hints = util.immutabledict()
def _process_colparams(self, parameters):
if isinstance(parameters, (list, tuple)):
pp = {}
for i, c in enumerate(self.table.c):
pp[c.key] = parameters[i]
return pp
else:
return parameters
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
_returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning')
def _process_deprecated_kw(self, kwargs):
for k in list(kwargs):
m = self._returning_re.match(k)
if m:
self._returning = kwargs.pop(k)
util.warn_deprecated(
"The %r argument is deprecated. Please "
"use statement.returning(col1, col2, ...)" % k
)
return kwargs
@_generative
def returning(self, *cols):
"""Add a RETURNING or equivalent clause to this statement.
The given list of columns represent columns within the table that is
the target of the INSERT, UPDATE, or DELETE. Each element can be any
column expression. :class:`~sqlalchemy.schema.Table` objects will be
expanded into their individual columns.
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using ``fetchone()`` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT hints, use
:meth:`.Insert.prefix_with`. UPDATE/DELETE hints for
MySQL will be added in a future release.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union({(selectable, dialect_name):text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
def __init__(self, table, values):
self.table = table
self.parameters = self._process_colparams(values)
@_generative
def values(self, *args, **kwargs):
"""specify the VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: A single dictionary can be sent as the first positional
argument. This allows non-string based keys, such as Column
objects, to be used::
users.insert().values({users.c.name : "some name"})
users.update().where(users.c.id==5).values({users.c.name : "some name"})
See also:
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if args:
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters = self._process_colparams(v)
self.parameters.update(kwargs)
else:
self.parameters = self.parameters.copy()
self.parameters.update(self._process_colparams(v))
self.parameters.update(kwargs)
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the :func:`~.expression.insert()` function.
See also:
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_prefixes = ()
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
ValuesBase.__init__(self, table, values)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
@_generative
def prefix_with(self, clause):
"""Add a word or expression between INSERT and INTO. Generative.
If multiple prefixes are supplied, they will be separated with
spaces.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
returning=None,
**kwargs):
ValuesBase.__init__(self, table, values)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning =None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
| gpl-2.0 |
cloudera/ibis | ibis/tests/expr/test_literal.py | 2 | 1064 | import ibis
from ibis.expr import datatypes
from ibis.expr.operations import Literal
def test_literal_equality_basic():
a = ibis.literal(1).op()
b = ibis.literal(1).op()
assert a == b
assert hash(a) == hash(b)
def test_literal_equality_int_float():
# Note: This is different from the Python behavior for int/float comparison
a = ibis.literal(1).op()
b = ibis.literal(1.0).op()
assert a != b
def test_literal_equality_int16_int32():
# Note: This is different from the Python behavior for int/float comparison
a = Literal(1, datatypes.int16)
b = Literal(1, datatypes.int32)
assert a != b
def test_literal_equality_int_interval():
a = ibis.literal(1).op()
b = ibis.interval(seconds=1).op()
assert a != b
def test_literal_equality_interval():
a = ibis.interval(seconds=1).op()
b = ibis.interval(minutes=1).op()
assert a != b
# Currently these does't equal, but perhaps should be?
c = ibis.interval(seconds=60).op()
d = ibis.interval(minutes=1).op()
assert c != d
| apache-2.0 |
pniedzielski/fb-hackathon-2013-11-21 | src/repl.it/jsrepl/extern/python/reloop-closured/lib/python2.7/encodings/iso8859_2.py | 593 | 13660 | """ Python Character Mapping Codec iso8859_2 generated from 'MAPPINGS/ISO8859/8859-2.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u02d8' # 0xA2 -> BREVE
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u013d' # 0xA5 -> LATIN CAPITAL LETTER L WITH CARON
u'\u015a' # 0xA6 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u0164' # 0xAB -> LATIN CAPITAL LETTER T WITH CARON
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u02db' # 0xB2 -> OGONEK
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\u013e' # 0xB5 -> LATIN SMALL LETTER L WITH CARON
u'\u015b' # 0xB6 -> LATIN SMALL LETTER S WITH ACUTE
u'\u02c7' # 0xB7 -> CARON
u'\xb8' # 0xB8 -> CEDILLA
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u0165' # 0xBB -> LATIN SMALL LETTER T WITH CARON
u'\u017a' # 0xBC -> LATIN SMALL LETTER Z WITH ACUTE
u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/xlsclients/package.py | 5 | 1787 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xlsclients(AutotoolsPackage):
"""xlsclients is a utility for listing information about the client
applications running on a X11 server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xlsclients"
url = "https://www.x.org/archive/individual/app/xlsclients-1.1.3.tar.gz"
version('1.1.3', '093c748d98b61dbddcaf3de1740fbd26')
depends_on('[email protected]:', when='@1.1:')
depends_on('libx11', when='@:1.0')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
atosorigin/ansible | test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py | 44 | 2221 | # Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: statichost
short_description: Add a single host
description: Add a single host
extends_documentation_fragment:
- inventory_cache
options:
plugin:
description: plugin name (must be statichost)
required: true
hostname:
description: Toggle display of stderr even when script was successful
required: True
'''
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
class InventoryModule(BaseInventoryPlugin, Cacheable):
NAME = 'testns.content_adj.statichost'
def __init__(self):
super(InventoryModule, self).__init__()
self._hosts = set()
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check '''
if not path.endswith('.statichost.yml') and not path.endswith('.statichost.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
# Initialize and validate options
self._read_config_data(path)
# Exercise cache
cache_key = self.get_cache_key(path)
attempt_to_read_cache = self.get_option('cache') and cache
cache_needs_update = self.get_option('cache') and not cache
if attempt_to_read_cache:
try:
host_to_add = self._cache[cache_key]
except KeyError:
cache_needs_update = True
if not attempt_to_read_cache or cache_needs_update:
host_to_add = self.get_option('hostname')
# this is where the magic happens
self.inventory.add_host(host_to_add, 'all')
self._cache[cache_key] = host_to_add
# self.inventory.add_group()...
# self.inventory.add_child()...
# self.inventory.set_variable()..
| gpl-3.0 |
gaganmac/learnfast | lib/werkzeug/debug/__init__.py | 310 | 7800 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| apache-2.0 |
york-2015/yowsup | yowsup/layers/protocol_groups/protocolentities/notification_groups_add.py | 61 | 1946 | from .notification_groups import GroupsNotificationProtocolEntity
from yowsup.structs import ProtocolTreeNode
class AddGroupsNotificationProtocolEntity(GroupsNotificationProtocolEntity):
'''
<notification participant="{{participant_jiid}}" t="{{TIMESTAMP}}" from="{{group_jid}}" type="w:gp2" id="{{id}}" notify="WhatsApp">
<add>
<participant jid="{{JID_1}}">
</participant>
</add>
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, participant, offline, participants):
super(AddGroupsNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, participant, offline)
self.setParticipants(participants)
def setParticipants(self, participants):
assert type(participants) is list, "Must be a list of jids, got %s instead." % type(participants)
self.participants = participants
def getParticipants(self):
return self.participants
def __str__(self):
out = super(AddGroupsNotificationProtocolEntity, self).__str__()
out += "Participants: %s\n" % " ".join(self.getParticipants())
return out
def toProtocolTreeNode(self):
node = super(AddGroupsNotificationProtocolEntity, self).toProtocolTreeNode()
addNode = ProtocolTreeNode("add")
participants = []
for jid in self.getParticipants():
pnode = ProtocolTreeNode("participant", {"jid": jid})
participants.append(pnode)
addNode.addChildren(participants)
node.addChild(addNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
addNode = node.getChild("add")
participants = []
for p in addNode.getAllChildren("participant"):
participants.append(p["jid"])
return AddGroupsNotificationProtocolEntity(
node["id"], node["from"], node["t"], node["notify"], node["participant"], node["offline"],
participants
) | gpl-3.0 |
Crespo911/pyspace | pySPACE/tests/unittests/data_types/test_base.py | 4 | 6699 | """Unit test for BaseData type
This unit test creates TimeSeries objects and FeatureVector,
tries to change and inherit meta information and runs separate
tests for key, tag, specs and inheritance.
.. todo:: test pickling?
:Author: Sirko Straube ([email protected]), David Feess
:Last Revision: 2012/04/02
"""
if __name__ == '__main__':
import sys
import os
# The root of the code
file_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(file_path[:file_path.rfind('pySPACE')-1])
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.feature_vector import FeatureVector
import unittest, numpy
class BaseDataTestCase(unittest.TestCase):
"""Test BaseData data type"""
def setUp(self):
"""Create some example data """
# Create some TimeSeries:
self.x1 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
marker_name='S4', name='Name_text ending with Standard',
start_time=1000.0, end_time=1004.0)
self.x1.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
self.x1.generate_meta() #automatically generate key and tag
self.x2 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
marker_name='S4', start_time=2000.0, end_time=2004.0,
name='Name_text ending with Standard')
#manually generate key and tag
import uuid
self.x2_key=uuid.uuid4()
self.x2.key=self.x2_key
self.x2.tag='Tag of x2'
self.x2.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
self.x3 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
marker_name='S4', start_time=3000.0, end_time=3004.0)
self.x3.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
self.x3.generate_meta()
self.x4 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
self.x4.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
self.x5 = TimeSeries([1,2], ['a','b'], 12)
self.x5.inherit_meta_from(self.x2)
self.x6 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12)
self.x6.specs={'Nice_Parameter': 11, 'Less_Nice_Param': '21'}
self.x6.generate_meta()
#safe information
self.x6_key=self.x6.key
self.x6.inherit_meta_from(self.x2)
self.some_nice_dict = {'guido': 4127, 'irv': 4127, 'jack': 4098}
self.x6.add_to_history(self.x5, self.some_nice_dict)
# Create some FeatureVectors:
self.f1 = FeatureVector([1,2,3,4,5,6],['a','b','c','d','e','f'])
self.f1.specs={'NiceParam':1,'LessNiceParam':2}
self.f2 = FeatureVector([1,2,3,4,5,6],['a','b','c','d','e','f'], tag = 'Tag of f2')
self.f2.specs={'NiceParam':1,'LessNiceParam':2}
self.f3 = FeatureVector([1,2], ['a','b'])
self.f3.inherit_meta_from(self.x2)
self.f3.add_to_history(self.x5)
def testTag(self):
"""Test tag behavior"""
# Generate from Meta Data
self.assertEqual(self.x1.tag,
'Epoch Start: 1000ms; End: 1004ms; Class: Standard')
# Tag passed, use that!
self.assertEqual(self.x2.tag, 'Tag of x2')
self.assertEqual(self.f2.tag, 'Tag of f2')
# No tag and only partial meta passed
self.assertEqual(self.x3.tag,
'Epoch Start: 3000ms; End: 3004ms; Class: na')
# No Tag and no meta passed, Tag remains None
self.assertEqual(self.x4.tag, None)
self.assertEqual(self.f1.tag, None)
def testKey(self):
"""Test key behavior"""
import uuid
self.assertEqual(type(self.x1.key),uuid.UUID)
# If Key passed, use that!
self.assertEqual(self.x2.key, self.x2_key)
def testInheritAndAddStuff(self):
"""test inheritance of meta data from other objects"""
# Inherit
self.assertEqual(self.x5.tag, self.x2.tag)
self.assertEqual(self.x5.key, self.x2.key)
self.assertEqual(self.f3.tag, self.x2.tag)
self.assertEqual(self.f3.key, self.x2.key)
#Inherit
#suppress warning of BaseData type and cast data back to numpy
hist_x6=self.x6.history[0].view(numpy.ndarray)
data_x5=self.x5.view(numpy.ndarray)
# history
self.assertEqual((hist_x6==data_x5).all(),True)
self.assertEqual(self.x6.history[0].key,self.x5.key)
self.assertEqual(self.x6.history[0].tag,self.x5.tag)
self.assertEqual(self.x6.history[0].specs['node_specs'],self.some_nice_dict)
hist_f3=self.f3.history[0].view(numpy.ndarray)
self.assertEqual((hist_f3==data_x5).all(),True)
self.assertEqual(self.f3.history[0].key,self.x5.key)
self.assertEqual(self.f3.history[0].tag,self.x5.tag)
#if key (and tag) were already set, these original values
#have to be kept
#
self.assertEqual(self.x6.key, self.x6_key)
self.assertEqual(self.x6.tag, self.x2.tag)
self.x6.inherit_meta_from(self.f3) #should not change tag and key
self.assertEqual(self.x6.key, self.x6_key)
self.assertEqual(self.x6.tag, self.x2.tag)
#testing multiple histories
x7 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
x7.add_to_history(self.x1)
x7.add_to_history(self.x2)
x7.add_to_history(self.x3)
x7.add_to_history(self.x4)
x7.add_to_history(self.x5)
x7.add_to_history(self.x6)
x7.add_to_history(self.x1)
self.assertEqual(len(x7.history),7)
self.assertEqual(x7.history[0].key,x7.history[6].key)
self.assertEqual(x7.history[5].history,[])
def testSpecs(self):
"""Test specs behavior"""
# so far, there's not much going on with specs...
# same problem as in testkey
# timeseries doesn't set spec
self.assertEqual(self.x1.specs,
{'Nice_Parameter': 1, 'Less_Nice_Param': '2'})
# Inherit
self.assertEqual(self.x5.specs,self.x2.specs)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromName('test_base_data')
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 |
sargas/scipy | scipy/weave/tests/test_inline_tools.py | 6 | 1500 | from __future__ import absolute_import, print_function
from numpy.testing import TestCase, dec, assert_
from scipy.weave import inline_tools
class TestInline(TestCase):
""" These are long running tests...
I'd like to benchmark these things somehow.
"""
@dec.slow
def test_exceptions(self):
a = 3
code = """
if (a < 2)
throw_error(PyExc_ValueError,
"the variable 'a' should not be less than 2");
else
return_val = PyInt_FromLong(a+1);
"""
result = inline_tools.inline(code,['a'])
assert_(result == 4)
## Unfortunately, it is not always possible to catch distutils compiler
## errors, since SystemExit is used. Until that is fixed, these tests
## cannot be run in the same process as the test suite.
## try:
## a = 1
## result = inline_tools.inline(code,['a'])
## assert_(1) # should've thrown a ValueError
## except ValueError:
## pass
## from distutils.errors import DistutilsError, CompileError
## try:
## a = 'string'
## result = inline_tools.inline(code,['a'])
## assert_(1) # should've gotten an error
## except:
## # ?CompileError is the error reported, but catching it doesn't work
## pass
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
DataDog/moto | tests/test_emr/test_emr.py | 2 | 9462 | from __future__ import unicode_literals
import boto
from boto.emr.instance_group import InstanceGroup
from boto.emr.step import StreamingStep
import sure # noqa
from moto import mock_emr
from tests.helpers import requires_boto_gte
@mock_emr
def test_create_job_flow():
conn = boto.connect_emr()
step1 = StreamingStep(
name='My wordcount example',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input',
output='s3n://output_bucket/output/wordcount_output'
)
step2 = StreamingStep(
name='My wordcount example2',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input2',
output='s3n://output_bucket/output/wordcount_output2'
)
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
master_instance_type='m1.medium',
slave_instance_type='m1.small',
steps=[step1, step2],
)
job_flow = conn.describe_jobflow(job_id)
job_flow.state.should.equal('STARTING')
job_flow.jobflowid.should.equal(job_id)
job_flow.name.should.equal('My jobflow')
job_flow.masterinstancetype.should.equal('m1.medium')
job_flow.slaveinstancetype.should.equal('m1.small')
job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs')
job_flow.visibletoallusers.should.equal('False')
int(job_flow.normalizedinstancehours).should.equal(0)
job_step = job_flow.steps[0]
job_step.name.should.equal('My wordcount example')
job_step.state.should.equal('STARTING')
args = [arg.value for arg in job_step.args]
args.should.equal([
'-mapper',
's3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
'-reducer',
'aggregate',
'-input',
's3n://elasticmapreduce/samples/wordcount/input',
'-output',
's3n://output_bucket/output/wordcount_output',
])
job_step2 = job_flow.steps[1]
job_step2.name.should.equal('My wordcount example2')
job_step2.state.should.equal('PENDING')
args = [arg.value for arg in job_step2.args]
args.should.equal([
'-mapper',
's3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
'-reducer',
'aggregate',
'-input',
's3n://elasticmapreduce/samples/wordcount/input2',
'-output',
's3n://output_bucket/output/wordcount_output2',
])
@requires_boto_gte("2.8")
@mock_emr
def test_create_job_flow_with_new_params():
# Test that run_jobflow works with newer params
conn = boto.connect_emr()
conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
master_instance_type='m1.medium',
slave_instance_type='m1.small',
job_flow_role='some-role-arn',
steps=[],
)
@requires_boto_gte("2.8")
@mock_emr
def test_create_job_flow_visible_to_all_users():
conn = boto.connect_emr()
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
steps=[],
visible_to_all_users=True,
)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('True')
@mock_emr
def test_terminate_job_flow():
conn = boto.connect_emr()
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
steps=[]
)
flow = conn.describe_jobflows()[0]
flow.state.should.equal('STARTING')
conn.terminate_jobflow(job_id)
flow = conn.describe_jobflows()[0]
flow.state.should.equal('TERMINATED')
@mock_emr
def test_add_steps_to_flow():
conn = boto.connect_emr()
step1 = StreamingStep(
name='My wordcount example',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input',
output='s3n://output_bucket/output/wordcount_output'
)
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
steps=[step1]
)
job_flow = conn.describe_jobflow(job_id)
job_flow.state.should.equal('STARTING')
job_flow.jobflowid.should.equal(job_id)
job_flow.name.should.equal('My jobflow')
job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs')
step2 = StreamingStep(
name='My wordcount example2',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input2',
output='s3n://output_bucket/output/wordcount_output2'
)
conn.add_jobflow_steps(job_id, [step2])
job_flow = conn.describe_jobflow(job_id)
job_step = job_flow.steps[0]
job_step.name.should.equal('My wordcount example')
job_step.state.should.equal('STARTING')
args = [arg.value for arg in job_step.args]
args.should.equal([
'-mapper',
's3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
'-reducer',
'aggregate',
'-input',
's3n://elasticmapreduce/samples/wordcount/input',
'-output',
's3n://output_bucket/output/wordcount_output',
])
job_step2 = job_flow.steps[1]
job_step2.name.should.equal('My wordcount example2')
job_step2.state.should.equal('PENDING')
args = [arg.value for arg in job_step2.args]
args.should.equal([
'-mapper',
's3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
'-reducer',
'aggregate',
'-input',
's3n://elasticmapreduce/samples/wordcount/input2',
'-output',
's3n://output_bucket/output/wordcount_output2',
])
@mock_emr
def test_create_instance_groups():
conn = boto.connect_emr()
step1 = StreamingStep(
name='My wordcount example',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input',
output='s3n://output_bucket/output/wordcount_output'
)
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
steps=[step1],
)
instance_group = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07')
instance_group = conn.add_instance_groups(job_id, [instance_group])
instance_group_id = instance_group.instancegroupids
job_flow = conn.describe_jobflows()[0]
int(job_flow.instancecount).should.equal(6)
instance_group = job_flow.instancegroups[0]
instance_group.instancegroupid.should.equal(instance_group_id)
int(instance_group.instancerunningcount).should.equal(6)
instance_group.instancerole.should.equal('TASK')
instance_group.instancetype.should.equal('c1.medium')
instance_group.market.should.equal('SPOT')
instance_group.name.should.equal('spot-0.07')
instance_group.bidprice.should.equal('0.07')
@mock_emr
def test_modify_instance_groups():
conn = boto.connect_emr()
step1 = StreamingStep(
name='My wordcount example',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input',
output='s3n://output_bucket/output/wordcount_output'
)
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
steps=[step1]
)
instance_group1 = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07')
instance_group2 = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07')
instance_group = conn.add_instance_groups(job_id, [instance_group1, instance_group2])
instance_group_ids = instance_group.instancegroupids.split(",")
job_flow = conn.describe_jobflows()[0]
int(job_flow.instancecount).should.equal(12)
instance_group = job_flow.instancegroups[0]
int(instance_group.instancerunningcount).should.equal(6)
conn.modify_instance_groups(instance_group_ids, [2, 3])
job_flow = conn.describe_jobflows()[0]
int(job_flow.instancecount).should.equal(5)
instance_group1 = [
group for group
in job_flow.instancegroups
if group.instancegroupid == instance_group_ids[0]
][0]
int(instance_group1.instancerunningcount).should.equal(2)
instance_group2 = [
group for group
in job_flow.instancegroups
if group.instancegroupid == instance_group_ids[1]
][0]
int(instance_group2.instancerunningcount).should.equal(3)
@requires_boto_gte("2.8")
@mock_emr
def test_set_visible_to_all_users():
conn = boto.connect_emr()
job_id = conn.run_jobflow(
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
steps=[],
visible_to_all_users=False,
)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('False')
conn.set_visible_to_all_users(job_id, True)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('True')
conn.set_visible_to_all_users(job_id, False)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('False')
| apache-2.0 |
yellowcap/academicspam | apps/spamparser/tests.py | 1 | 3028 | """Test suite for spam email parser"""
from django.test import TestCase
from django_dynamic_fixture import G
from django_mailbox.models import Message
from .models import ParseResult
###############################################################################
class ScenarioTests(TestCase):
def test_regular_header(self):
regular_header = """
> From: George Bluth <[email protected]>
> Subject: Fwd: There's money in the banana stand
> Date: May 20, 2014 at 4:50:55 AM PDT
> To: Michael Bluth <[email protected]>
"""
message = G(Message, body=regular_header)
parseresult = G(ParseResult, message=message)
parseresult.parse()
self.assertEqual(parseresult.from_address, '[email protected]')
self.assertEqual(parseresult.to_address, '[email protected]')
self.assertEqual(parseresult.subject, 'Fwd: There\'s money in the banana stand')
self.assertEqual(parseresult.confidence, 'ok')
def test_foreign_header(self):
foreign_header = """
> Remetente: George Bluth <[email protected]>
> Assunto: Fwd: There's money in the banana stand
> Data: May 20, 2014 at 4:50:55 AM PDT
> Para: Michael Bluth <[email protected]>
"""
message = G(Message, body=foreign_header)
parseresult = G(ParseResult, message=message)
parseresult.parse()
self.assertEqual(parseresult.from_address, '[email protected]')
self.assertEqual(parseresult.to_address, '[email protected]')
self.assertEqual(parseresult.subject, 'Fwd: There\'s money in the banana stand')
self.assertEqual(parseresult.confidence, 'ok')
def test_multiple_to_emails_header(self):
double_header = """
> From: George Bluth <[email protected]>
> Subject: Fwd: There's money in the banana stand
> Date: May 20, 2014 at 4:50:55 AM PDT
> To: Michael Bluth <[email protected]>, George Michael Bluth <[email protected]>
"""
message = G(Message, body=double_header)
parseresult = G(ParseResult, message=message)
parseresult.parse()
self.assertEqual(parseresult.from_address, '[email protected]')
self.assertEqual(parseresult.to_address, '[email protected]')
self.assertEqual(parseresult.subject, 'Fwd: There\'s money in the banana stand')
self.assertEqual(parseresult.confidence, 'ok')
def test_false_friends_header(self):
double_header = """
> From: George Bluth <[email protected]>
> Subject: Subject: Fwd: There's money in the banana stand
> Date: May 20, 2014 at 4:50:55 AM PDT
> To: Michael Bluth <[email protected]>
"""
message = G(Message, body=double_header)
parseresult = G(ParseResult, message=message)
parseresult.parse()
self.assertEqual(parseresult.subject, 'Subject: Fwd: There\'s money in the banana stand')
self.assertEqual(parseresult.confidence, 'fa')
| mit |
anand-c-goog/tensorflow | tensorflow/python/framework/registry.py | 18 | 2935 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry mechanism for "registering" classes/functions for general use.
This is typically used with a decorator that calls Register for adding
a class or function to a registry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Registry mechanism below is based on mapreduce.python.mrpython.Register.
_LOCATION_TAG = "location"
_TYPE_TAG = "type"
class Registry(object):
"""Provides a registry for saving objects."""
def __init__(self, name):
"""Creates a new registry."""
self._name = name
self._registry = dict()
def register(self, candidate, name=None):
"""Registers a Python object "candidate" for the given "name".
Args:
candidate: The candidate object to add to the registry.
name: An optional string specifying the registry key for the candidate.
If None, candidate.__name__ will be used.
Raises:
KeyError: If same name is used twice.
"""
if not name:
name = candidate.__name__
if name in self._registry:
(filename, line_number, function_name, _) = (
self._registry[name][_LOCATION_TAG])
raise KeyError("Registering two %s with name '%s' !"
"(Previous registration was in %s %s:%d)" %
(self._name, name, function_name, filename, line_number))
logging.vlog(1, "Registering %s (%s) in %s.", name, candidate, self._name)
# stack trace is [this_function, Register(), user_function,...]
# so the user function is #2.
stack = traceback.extract_stack()
self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: stack[2]}
def lookup(self, name):
"""Looks up "name".
Args:
name: a string specifying the registry key for the candidate.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered.
"""
name = compat.as_str(name)
if name in self._registry:
return self._registry[name][_TYPE_TAG]
else:
raise LookupError(
"%s registry has no entry for: %s" % (self._name, name))
| apache-2.0 |
mohamedhagag/community-addons | openeducat_erp/op_student/op_student.py | 1 | 7142 | # -*- coding: utf-8 -*-
###############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2009-TODAY Tech-Receptives(<http://www.techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import models, fields, api
class OpStudent(models.Model):
_name = 'op.student'
_inherits = {'res.partner': 'partner_id'}
@api.depends('roll_number_line', 'roll_number_line.roll_number',
'roll_number_line.student_id', 'roll_number_line.standard_id',
'roll_number_line.standard_id.sequence')
def _get_curr_roll_number(self):
# for student in self:
roll_no = 0
seq = 0
for roll_number in self.roll_number_line:
if roll_number.standard_id.sequence > seq:
roll_no = roll_number.roll_number
seq = roll_number.standard_id.sequence
self.roll_number = roll_no
middle_name = fields.Char('Middle Name', size=128, required=True)
last_name = fields.Char('Middle Name', size=128, required=True)
birth_date = fields.Date('Birth Date', required=True)
blood_group = fields.Selection(
[('A+', 'A+ve'), ('B+', 'B+ve'), ('O+', 'O+ve'), ('AB+', 'AB+ve'),
('A-', 'A-ve'), ('B-', 'B-ve'), ('O-', 'O-ve'), ('AB-', 'AB-ve')],
'Blood Group')
gender = fields.Selection(
[('m', 'Male'), ('f', 'Female'),
('o', 'Other')], 'Gender', required=True)
nationality = fields.Many2one('res.country', 'Nationality')
language = fields.Many2one('res.lang', 'Mother Tongue')
category = fields.Many2one(
'op.category', 'Category', required=True)
religion = fields.Many2one('op.religion', 'Religion')
library_card = fields.Char('Library Card', size=64)
emergency_contact = fields.Many2one(
'res.partner', 'Emergency Contact')
pan_card = fields.Char('PAN Card', size=64)
bank_acc_num = fields.Char('Bank Acc Number', size=64)
visa_info = fields.Char('Visa Info', size=64)
id_number = fields.Char('ID Card Number', size=64)
photo = fields.Binary('Photo')
course_id = fields.Many2one('op.course', 'Course', required=True)
division_id = fields.Many2one('op.division', 'Division')
batch_id = fields.Many2one('op.batch', 'Batch', required=True)
standard_id = fields.Many2one(
'op.standard', 'Standard', required=True)
roll_number_line = fields.One2many(
'op.roll.number', 'student_id', 'Roll Number')
partner_id = fields.Many2one(
'res.partner', 'Partner', required=True, ondelete="cascade")
health_lines = fields.One2many('op.health', 'student_id', 'Health Detail')
roll_number = fields.Char(
'Current Roll Number', compute='_get_curr_roll_number',
size=8, store=True)
allocation_ids = fields.Many2many('op.assignment', string='Assignment')
alumni_boolean = fields.Boolean('Alumni Student')
passing_year = fields.Many2one('op.batch', 'Passing Year')
current_position = fields.Char('Current Position', size=256)
current_job = fields.Char('Current Job', size=256)
email = fields.Char('Email', size=128)
phone = fields.Char('Phone Number', size=256)
user_id = fields.Many2one('res.users', 'User')
placement_line = fields.One2many(
'op.placement.offer', 'student_id', 'Placement Details')
activity_log = fields.One2many(
'op.activity', 'student_id', 'Activity Log')
parent_ids = fields.Many2many('op.parent', string='Parent')
gr_no = fields.Char("GR Number", size=20)
invoice_exists = fields.Boolean('Invoice')
@api.multi
def create_invoice(self):
""" Create invoice for fee payment process of student """
invoice_pool = self.env['account.invoice']
default_fields = invoice_pool.fields_get(self)
invoice_default = invoice_pool.default_get(default_fields)
for student in self:
type = 'out_invoice'
partner_id = student.partner_id.id
onchange_partner = invoice_pool.onchange_partner_id(
type, partner_id)
invoice_default.update(onchange_partner['value'])
invoice_data = {
'partner_id': student.partner_id.id,
'date_invoice': fields.Date.today(),
'payment_term': student.standard_id.payment_term and
student.standard_id.payment_term.id or
student.course_id.payment_term and
student.course_id.payment_term.id or False,
}
invoice_default.update(invoice_data)
invoice_id = invoice_pool.create(invoice_default).id
self.write({'invoice_ids': [(4, invoice_id)], 'invoice_exists': True})
form_view = self.env.ref('account.invoice_form')
tree_view = self.env.ref('account.invoice_tree')
value = {
'domain': str([('id', '=', invoice_id)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.invoice',
'view_id': False,
'views': [(form_view and form_view.id or False, 'form'),
(tree_view and tree_view.id or False, 'tree')],
'type': 'ir.actions.act_window',
'res_id': invoice_id,
'target': 'current',
'nodestroy': True
}
return value
@api.multi
def action_view_invoice(self):
'''
This function returns an action that
display existing invoices of given student ids and show a invoice"
'''
result = self.env.ref('account.action_invoice_tree1')
id = result and result.id or False
result = self.env['ir.actions.act_window'].browse(id).read()[0]
# compute the number of invoices to display
inv_ids = []
for so in self:
inv_ids += [invoice.id for invoice in so.invoice_ids]
# choose the view_mode accordingly
if len(inv_ids) > 1:
result['domain'] = \
"[('id','in',[" + ','.join(map(str, inv_ids)) + "])]"
else:
res = self.env.ref('account.invoice_form')
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mith1979/ansible_automation | applied_python/applied_python/lib/python2.7/site-packages/dns/rdtypes/ANY/RP.py | 100 | 3274 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.name
class RP(dns.rdata.Rdata):
"""RP record
@ivar mbox: The responsible person's mailbox
@type mbox: dns.name.Name object
@ivar txt: The owner name of a node with TXT records, or the root name
if no TXT records are associated with this RP.
@type txt: dns.name.Name object
@see: RFC 1183"""
__slots__ = ['mbox', 'txt']
def __init__(self, rdclass, rdtype, mbox, txt):
super(RP, self).__init__(rdclass, rdtype)
self.mbox = mbox
self.txt = txt
def to_text(self, origin=None, relativize=True, **kw):
mbox = self.mbox.choose_relativity(origin, relativize)
txt = self.txt.choose_relativity(origin, relativize)
return "%s %s" % (str(mbox), str(txt))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
mbox = tok.get_name()
txt = tok.get_name()
mbox = mbox.choose_relativity(origin, relativize)
txt = txt.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, mbox, txt)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.mbox.to_wire(file, None, origin)
self.txt.to_wire(file, None, origin)
def to_digestable(self, origin = None):
return self.mbox.to_digestable(origin) + \
self.txt.to_digestable(origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(mbox, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
if rdlen <= 0:
raise dns.exception.FormError
(txt, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
mbox = mbox.relativize(origin)
txt = txt.relativize(origin)
return cls(rdclass, rdtype, mbox, txt)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.mbox = self.mbox.choose_relativity(origin, relativize)
self.txt = self.txt.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.mbox, other.mbox)
if v == 0:
v = cmp(self.txt, other.txt)
return v
| apache-2.0 |
zsiciarz/django | tests/flatpages_tests/test_forms.py | 43 | 4461 | from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.flatpages', ]})
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
def setUp(self):
# Site fields cache needs to be cleared after flatpages is added to
# INSTALLED_APPS
Site._meta._expire_cache()
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True, MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False, MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
with translation.override('en'):
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(f.errors, {'sites': [translation.gettext('This field is required.')]})
| bsd-3-clause |
jstammers/EDMSuite | NavPython/IronPython/Lib/dircache.py | 326 | 1126 | """Read and cache directory listings.
The listdir() routine returns a sorted list of the files in a directory,
using a cache to avoid reading the directory more often than necessary.
The annotate() routine appends slashes to directories."""
from warnings import warnpy3k
warnpy3k("the dircache module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import os
__all__ = ["listdir", "opendir", "annotate", "reset"]
cache = {}
def reset():
"""Reset the cache completely."""
global cache
cache = {}
def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except KeyError:
cached_mtime, list = -1, []
mtime = os.stat(path).st_mtime
if mtime != cached_mtime:
list = os.listdir(path)
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/'
| mit |
kula85/perf-sqlite3 | Documentation/target/tcm_mod_builder.py | 337 | 24391 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
mmargoliono/git-cola | cola/widgets/status.py | 1 | 33987 | from __future__ import division, absolute_import, unicode_literals
import itertools
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtCore import SIGNAL
from cola import cmds
from cola import core
from cola import qtutils
from cola import utils
from cola.i18n import N_
from cola.models import main
from cola.models import selection
from cola.widgets import completion
from cola.widgets import defs
class StatusWidget(QtGui.QWidget):
"""
Provides a git-status-like repository widget.
This widget observes the main model and broadcasts
Qt signals.
"""
def __init__(self, titlebar, parent=None):
QtGui.QWidget.__init__(self, parent)
tooltip = N_('Toggle the paths filter')
self.filter_button = qtutils.create_action_button(
tooltip=tooltip,
icon=qtutils.filter_icon())
self.filter_widget = StatusFilterWidget()
self.filter_widget.hide()
self.tree = StatusTreeWidget()
self.main_layout = qtutils.vbox(defs.no_margin, defs.no_spacing,
self.filter_widget, self.tree)
self.setLayout(self.main_layout)
self.toggle_action = qtutils.add_action(self, tooltip,
self.toggle_filter, 'Shift+Ctrl+F')
titlebar.add_corner_widget(self.filter_button)
qtutils.connect_button(self.filter_button, self.toggle_filter)
def toggle_filter(self):
shown = not self.filter_widget.isVisible()
self.filter_widget.setVisible(shown)
if shown:
self.filter_widget.setFocus(True)
else:
self.tree.setFocus(True)
def set_initial_size(self):
self.setMaximumWidth(222)
QtCore.QTimer.singleShot(1, self.restore_size)
def restore_size(self):
self.setMaximumWidth(2 ** 13)
def refresh(self):
self.tree.show_selection()
def set_filter(self, txt):
self.filter_widget.setVisible(True)
self.filter_widget.text.set_value(txt)
self.filter_widget.apply_filter()
class StatusTreeWidget(QtGui.QTreeWidget):
# Item categories
idx_header = -1
idx_staged = 0
idx_unmerged = 1
idx_modified = 2
idx_untracked = 3
idx_end = 4
# Read-only access to the mode state
mode = property(lambda self: self.m.mode)
def __init__(self, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.headerItem().setHidden(True)
self.setAllColumnsShowFocus(True)
self.setSortingEnabled(False)
self.setUniformRowHeights(True)
self.setAnimated(True)
self.setRootIsDecorated(False)
self.setIndentation(0)
self.setDragEnabled(True)
self.add_item(N_('Staged'), hide=True)
self.add_item(N_('Unmerged'), hide=True)
self.add_item(N_('Modified'), hide=True)
self.add_item(N_('Untracked'), hide=True)
# Used to restore the selection
self.old_scroll = None
self.old_selection = None
self.old_contents = None
self.old_current_item = None
self.expanded_items = set()
self.process_selection_action = qtutils.add_action(self,
cmds.StageOrUnstage.name(),
cmds.run(cmds.StageOrUnstage),
cmds.StageOrUnstage.SHORTCUT)
self.revert_unstaged_edits_action = qtutils.add_action(self,
cmds.RevertUnstagedEdits.name(),
cmds.run(cmds.RevertUnstagedEdits),
cmds.RevertUnstagedEdits.SHORTCUT)
self.revert_unstaged_edits_action.setIcon(qtutils.icon('undo.svg'))
self.revert_uncommitted_edits_action = qtutils.add_action(self,
cmds.RevertUncommittedEdits.name(),
cmds.run(cmds.RevertUncommittedEdits),
cmds.RevertUncommittedEdits.SHORTCUT)
self.revert_uncommitted_edits_action.setIcon(qtutils.icon('undo.svg'))
self.launch_difftool_action = qtutils.add_action(self,
cmds.LaunchDifftool.name(),
cmds.run(cmds.LaunchDifftool),
cmds.LaunchDifftool.SHORTCUT)
self.launch_difftool_action.setIcon(qtutils.icon('git.svg'))
self.launch_editor_action = qtutils.add_action(self,
cmds.LaunchEditor.name(),
cmds.run(cmds.LaunchEditor),
cmds.LaunchEditor.SHORTCUT,
'Return', 'Enter')
self.launch_editor_action.setIcon(qtutils.options_icon())
if not utils.is_win32():
self.open_using_default_app = qtutils.add_action(self,
cmds.OpenDefaultApp.name(),
self._open_using_default_app,
cmds.OpenDefaultApp.SHORTCUT)
self.open_using_default_app.setIcon(qtutils.file_icon())
self.open_parent_dir_action = qtutils.add_action(self,
cmds.OpenParentDir.name(),
self._open_parent_dir,
cmds.OpenParentDir.SHORTCUT)
self.open_parent_dir_action.setIcon(qtutils.open_file_icon())
self.up_action = qtutils.add_action(self,
N_('Move Up'), self.move_up, Qt.Key_K)
self.down_action = qtutils.add_action(self,
N_('Move Down'), self.move_down, Qt.Key_J)
self.copy_path_action = qtutils.add_action(self,
N_('Copy Path to Clipboard'),
self.copy_path, QtGui.QKeySequence.Copy)
self.copy_path_action.setIcon(qtutils.theme_icon('edit-copy.svg'))
self.copy_relpath_action = qtutils.add_action(self,
N_('Copy Relative Path to Clipboard'),
self.copy_relpath, QtGui.QKeySequence.Cut)
self.copy_relpath_action.setIcon(qtutils.theme_icon('edit-copy.svg'))
if cmds.MoveToTrash.AVAILABLE:
self.move_to_trash_action = qtutils.add_action(self,
N_('Move file(s) to trash'),
self._trash_untracked_files, cmds.MoveToTrash.SHORTCUT)
self.move_to_trash_action.setIcon(qtutils.discard_icon())
delete_shortcut = cmds.Delete.SHORTCUT
else:
self.move_to_trash_action = None
delete_shortcut = cmds.Delete.ALT_SHORTCUT
self.delete_untracked_files_action = qtutils.add_action(self,
N_('Delete File(s)...'),
self._delete_untracked_files, delete_shortcut)
self.delete_untracked_files_action.setIcon(qtutils.discard_icon())
self.connect(self, SIGNAL('about_to_update'), self._about_to_update)
self.connect(self, SIGNAL('updated'), self._updated)
self.m = main.model()
self.m.add_observer(self.m.message_about_to_update,
self.about_to_update)
self.m.add_observer(self.m.message_updated, self.updated)
self.connect(self, SIGNAL('itemSelectionChanged()'),
self.show_selection)
self.connect(self, SIGNAL('itemDoubleClicked(QTreeWidgetItem*,int)'),
self.double_clicked)
self.connect(self, SIGNAL('itemCollapsed(QTreeWidgetItem*)'),
lambda x: self.update_column_widths())
self.connect(self, SIGNAL('itemExpanded(QTreeWidgetItem*)'),
lambda x: self.update_column_widths())
def add_item(self, txt, hide=False):
"""Create a new top-level item in the status tree."""
# TODO no icon
font = self.font()
font.setBold(True)
item = QtGui.QTreeWidgetItem(self)
item.setFont(0, font)
item.setText(0, txt)
if hide:
self.setItemHidden(item, True)
def restore_selection(self):
if not self.old_selection or not self.old_contents:
return
old_c = self.old_contents
old_s = self.old_selection
new_c = self.contents()
def mkselect(lst, widget_getter):
def select(item, current=False):
idx = lst.index(item)
widget = widget_getter(idx)
if current:
self.setCurrentItem(widget)
self.setItemSelected(widget, True)
return select
select_staged = mkselect(new_c.staged, self.staged_item)
select_unmerged = mkselect(new_c.unmerged, self.unmerged_item)
select_modified = mkselect(new_c.modified, self.modified_item)
select_untracked = mkselect(new_c.untracked, self.untracked_item)
saved_selection = [
(set(new_c.staged), old_c.staged, set(old_s.staged),
select_staged),
(set(new_c.unmerged), old_c.unmerged, set(old_s.unmerged),
select_unmerged),
(set(new_c.modified), old_c.modified, set(old_s.modified),
select_modified),
(set(new_c.untracked), old_c.untracked, set(old_s.untracked),
select_untracked),
]
# Restore the current item
if self.old_current_item:
category, idx = self.old_current_item
if category == self.idx_header:
item = self.invisibleRootItem().child(idx)
if item is not None:
self.setCurrentItem(item)
self.setItemSelected(item, True)
return
# Reselect the current item
selection_info = saved_selection[category]
new = selection_info[0]
old = selection_info[1]
reselect = selection_info[3]
try:
item = old[idx]
except:
return
if item in new:
reselect(item, current=True)
# Restore selection
# When reselecting we only care that the items are selected;
# we do not need to rerun the callbacks which were triggered
# above. Block signals to skip the callbacks.
self.blockSignals(True)
for (new, old, sel, reselect) in saved_selection:
for item in sel:
if item in new:
reselect(item, current=False)
self.blockSignals(False)
for (new, old, sel, reselect) in saved_selection:
# When modified is staged, select the next modified item
# When unmerged is staged, select the next unmerged item
# When unstaging, select the next staged item
# When staging untracked files, select the next untracked item
if len(new) >= len(old):
# The list did not shrink so it is not one of these cases.
continue
for item in sel:
# The item still exists so ignore it
if item in new or item not in old:
continue
# The item no longer exists in this list so search for
# its nearest neighbors and select them instead.
idx = old.index(item)
for j in itertools.chain(old[idx+1:], reversed(old[:idx])):
if j in new:
reselect(j, current=True)
return
def restore_scrollbar(self):
vscroll = self.verticalScrollBar()
if vscroll and self.old_scroll is not None:
vscroll.setValue(self.old_scroll)
self.old_scroll = None
def staged_item(self, itemidx):
return self._subtree_item(self.idx_staged, itemidx)
def modified_item(self, itemidx):
return self._subtree_item(self.idx_modified, itemidx)
def unmerged_item(self, itemidx):
return self._subtree_item(self.idx_unmerged, itemidx)
def untracked_item(self, itemidx):
return self._subtree_item(self.idx_untracked, itemidx)
def unstaged_item(self, itemidx):
# is it modified?
item = self.topLevelItem(self.idx_modified)
count = item.childCount()
if itemidx < count:
return item.child(itemidx)
# is it unmerged?
item = self.topLevelItem(self.idx_unmerged)
count += item.childCount()
if itemidx < count:
return item.child(itemidx)
# is it untracked?
item = self.topLevelItem(self.idx_untracked)
count += item.childCount()
if itemidx < count:
return item.child(itemidx)
# Nope..
return None
def _subtree_item(self, idx, itemidx):
parent = self.topLevelItem(idx)
return parent.child(itemidx)
def about_to_update(self):
self.emit(SIGNAL('about_to_update'))
def _about_to_update(self):
self.save_selection()
self.save_scrollbar()
def save_scrollbar(self):
vscroll = self.verticalScrollBar()
if vscroll:
self.old_scroll = vscroll.value()
else:
self.old_scroll = None
def current_item(self):
s = self.selected_indexes()
if not s:
return None
current = self.currentItem()
if not current:
return None
idx = self.indexFromItem(current, 0)
if idx.parent().isValid():
parent_idx = idx.parent()
entry = (parent_idx.row(), idx.row())
else:
entry = (self.idx_header, idx.row())
return entry
def save_selection(self):
self.old_contents = self.contents()
self.old_selection = self.selection()
self.old_current_item = self.current_item()
def updated(self):
"""Update display from model data."""
self.emit(SIGNAL('updated'))
def _updated(self):
self.set_staged(self.m.staged)
self.set_modified(self.m.modified)
self.set_unmerged(self.m.unmerged)
self.set_untracked(self.m.untracked)
self.restore_selection()
self.restore_scrollbar()
self.update_column_widths()
self.update_actions()
def update_actions(self, selected=None):
if selected is None:
selected = selection.selection()
can_revert_edits = bool(selected.staged or selected.modified)
self.revert_unstaged_edits_action.setEnabled(can_revert_edits)
self.revert_uncommitted_edits_action.setEnabled(can_revert_edits)
def set_staged(self, items):
"""Adds items to the 'Staged' subtree."""
self._set_subtree(items, self.idx_staged, staged=True,
check=not self.m.amending())
def set_modified(self, items):
"""Adds items to the 'Modified' subtree."""
self._set_subtree(items, self.idx_modified)
def set_unmerged(self, items):
"""Adds items to the 'Unmerged' subtree."""
self._set_subtree(items, self.idx_unmerged)
def set_untracked(self, items):
"""Adds items to the 'Untracked' subtree."""
self._set_subtree(items, self.idx_untracked)
def _set_subtree(self, items, idx,
staged=False,
untracked=False,
check=True):
"""Add a list of items to a treewidget item."""
self.blockSignals(True)
parent = self.topLevelItem(idx)
if items:
self.setItemHidden(parent, False)
else:
self.setItemHidden(parent, True)
# sip v4.14.7 and below leak memory in parent.takeChildren()
# so we use this backwards-compatible construct instead
while parent.takeChild(0) is not None:
pass
for item in items:
treeitem = qtutils.create_treeitem(item,
staged=staged,
check=check,
untracked=untracked)
parent.addChild(treeitem)
self.expand_items(idx, items)
self.blockSignals(False)
def update_column_widths(self):
self.resizeColumnToContents(0)
def expand_items(self, idx, items):
"""Expand the top-level category "folder" once and only once."""
# Don't do this if items is empty; this makes it so that we
# don't add the top-level index into the expanded_items set
# until an item appears in a particular category.
if not items:
return
# Only run this once; we don't want to re-expand items that
# we've clicked on to re-collapse on updated().
if idx in self.expanded_items:
return
self.expanded_items.add(idx)
item = self.topLevelItem(idx)
if item:
self.expandItem(item)
def contextMenuEvent(self, event):
"""Create context menus for the repo status tree."""
menu = self.create_context_menu()
menu.exec_(self.mapToGlobal(event.pos()))
def create_context_menu(self):
"""Set up the status menu for the repo status tree."""
s = self.selection()
menu = QtGui.QMenu(self)
selected_indexes = self.selected_indexes()
if selected_indexes:
category, idx = selected_indexes[0]
# A header item e.g. 'Staged', 'Modified', etc.
if category == self.idx_header:
return self._create_header_context_menu(menu, idx)
if s.staged:
return self._create_staged_context_menu(menu, s)
elif s.unmerged:
return self._create_unmerged_context_menu(menu, s)
else:
return self._create_unstaged_context_menu(menu, s)
def _create_header_context_menu(self, menu, idx):
if idx == self.idx_staged:
menu.addAction(qtutils.icon('remove.svg'),
N_('Unstage All'),
cmds.run(cmds.UnstageAll))
return menu
elif idx == self.idx_unmerged:
action = menu.addAction(qtutils.icon('add.svg'),
cmds.StageUnmerged.name(),
cmds.run(cmds.StageUnmerged))
action.setShortcut(cmds.StageUnmerged.SHORTCUT)
return menu
elif idx == self.idx_modified:
action = menu.addAction(qtutils.icon('add.svg'),
cmds.StageModified.name(),
cmds.run(cmds.StageModified))
action.setShortcut(cmds.StageModified.SHORTCUT)
return menu
elif idx == self.idx_untracked:
action = menu.addAction(qtutils.icon('add.svg'),
cmds.StageUntracked.name(),
cmds.run(cmds.StageUntracked))
action.setShortcut(cmds.StageUntracked.SHORTCUT)
return menu
def _create_staged_context_menu(self, menu, s):
if s.staged[0] in self.m.submodules:
return self._create_staged_submodule_context_menu(menu, s)
if self.m.unstageable():
action = menu.addAction(qtutils.icon('remove.svg'),
N_('Unstage Selected'),
cmds.run(cmds.Unstage, self.staged()))
action.setShortcut(cmds.Unstage.SHORTCUT)
# Do all of the selected items exist?
staged_items = self.staged_items()
all_exist = all([i.exists for i in staged_items])
if all_exist:
menu.addAction(self.launch_editor_action)
menu.addAction(self.launch_difftool_action)
if all_exist and not utils.is_win32():
menu.addSeparator()
action = menu.addAction(qtutils.file_icon(),
cmds.OpenDefaultApp.name(),
cmds.run(cmds.OpenDefaultApp, self.staged()))
action.setShortcut(cmds.OpenDefaultApp.SHORTCUT)
action = menu.addAction(qtutils.open_file_icon(),
cmds.OpenParentDir.name(),
self._open_parent_dir)
action.setShortcut(cmds.OpenParentDir.SHORTCUT)
if self.m.undoable():
menu.addSeparator()
menu.addAction(self.revert_unstaged_edits_action)
menu.addSeparator()
menu.addAction(self.copy_path_action)
menu.addAction(self.copy_relpath_action)
return menu
def _create_staged_submodule_context_menu(self, menu, s):
menu.addAction(qtutils.git_icon(),
N_('Launch git-cola'),
cmds.run(cmds.OpenRepo,
core.abspath(s.staged[0])))
menu.addAction(self.launch_editor_action)
menu.addSeparator()
action = menu.addAction(qtutils.icon('remove.svg'),
N_('Unstage Selected'),
cmds.run(cmds.Unstage, self.staged()))
action.setShortcut(cmds.Unstage.SHORTCUT)
menu.addSeparator()
menu.addAction(self.copy_path_action)
menu.addAction(self.copy_relpath_action)
return menu
def _create_unmerged_context_menu(self, menu, s):
menu.addAction(self.launch_difftool_action)
action = menu.addAction(qtutils.icon('add.svg'),
N_('Stage Selected'),
cmds.run(cmds.Stage, self.unstaged()))
action.setShortcut(cmds.Stage.SHORTCUT)
menu.addSeparator()
menu.addAction(self.launch_editor_action)
if not utils.is_win32():
menu.addSeparator()
action = menu.addAction(qtutils.file_icon(),
cmds.OpenDefaultApp.name(),
cmds.run(cmds.OpenDefaultApp, self.unmerged()))
action.setShortcut(cmds.OpenDefaultApp.SHORTCUT)
action = menu.addAction(qtutils.open_file_icon(),
cmds.OpenParentDir.name(),
self._open_parent_dir)
action.setShortcut(cmds.OpenParentDir.SHORTCUT)
menu.addSeparator()
menu.addAction(self.copy_path_action)
menu.addAction(self.copy_relpath_action)
return menu
def _create_unstaged_context_menu(self, menu, s):
modified_submodule = (s.modified and
s.modified[0] in self.m.submodules)
if modified_submodule:
return self._create_modified_submodule_context_menu(menu, s)
if self.m.stageable():
action = menu.addAction(qtutils.icon('add.svg'),
N_('Stage Selected'),
cmds.run(cmds.Stage, self.unstaged()))
action.setShortcut(cmds.Stage.SHORTCUT)
# Do all of the selected items exist?
unstaged_items = self.unstaged_items()
all_exist = all([i.exists for i in unstaged_items])
if all_exist and self.unstaged():
menu.addAction(self.launch_editor_action)
if all_exist and s.modified and self.m.stageable():
menu.addAction(self.launch_difftool_action)
if s.modified and self.m.stageable():
if self.m.undoable():
menu.addSeparator()
menu.addAction(self.revert_unstaged_edits_action)
menu.addAction(self.revert_uncommitted_edits_action)
if all_exist and self.unstaged() and not utils.is_win32():
menu.addSeparator()
action = menu.addAction(qtutils.file_icon(),
cmds.OpenDefaultApp.name(),
cmds.run(cmds.OpenDefaultApp, self.unstaged()))
action.setShortcut(cmds.OpenDefaultApp.SHORTCUT)
action = menu.addAction(qtutils.open_file_icon(),
cmds.OpenParentDir.name(),
self._open_parent_dir)
action.setShortcut(cmds.OpenParentDir.SHORTCUT)
if all_exist and s.untracked:
menu.addSeparator()
if self.move_to_trash_action is not None:
menu.addAction(self.move_to_trash_action)
menu.addAction(self.delete_untracked_files_action)
menu.addSeparator()
menu.addAction(qtutils.icon('edit-clear.svg'),
N_('Add to .gitignore'),
cmds.run(cmds.Ignore,
map(lambda x: '/' + x, self.untracked())))
menu.addSeparator()
menu.addAction(self.copy_path_action)
menu.addAction(self.copy_relpath_action)
return menu
def _create_modified_submodule_context_menu(self, menu, s):
menu.addAction(qtutils.git_icon(),
N_('Launch git-cola'),
cmds.run(cmds.OpenRepo, core.abspath(s.modified[0])))
menu.addAction(self.launch_editor_action)
if self.m.stageable():
menu.addSeparator()
action = menu.addAction(qtutils.icon('add.svg'),
N_('Stage Selected'),
cmds.run(cmds.Stage, self.unstaged()))
action.setShortcut(cmds.Stage.SHORTCUT)
menu.addSeparator()
menu.addAction(self.copy_path_action)
menu.addAction(self.copy_relpath_action)
return menu
def _delete_untracked_files(self):
cmds.do(cmds.Delete, self.untracked())
def _trash_untracked_files(self):
cmds.do(cmds.MoveToTrash, self.untracked())
def single_selection(self):
"""Scan across staged, modified, etc. and return a single item."""
st = None
um = None
m = None
ut = None
s = self.selection()
if s.staged:
st = s.staged[0]
elif s.modified:
m = s.modified[0]
elif s.unmerged:
um = s.unmerged[0]
elif s.untracked:
ut = s.untracked[0]
return selection.State(st, um, m, ut)
def selected_indexes(self):
"""Returns a list of (category, row) representing the tree selection."""
selected = self.selectedIndexes()
result = []
for idx in selected:
if idx.parent().isValid():
parent_idx = idx.parent()
entry = (parent_idx.row(), idx.row())
else:
entry = (self.idx_header, idx.row())
result.append(entry)
return result
def selection(self):
"""Return the current selection in the repo status tree."""
return selection.State(self.staged(), self.unmerged(),
self.modified(), self.untracked())
def contents(self):
return selection.State(self.m.staged, self.m.unmerged,
self.m.modified, self.m.untracked)
def all_files(self):
c = self.contents()
return c.staged + c.unmerged + c.modified + c.untracked
def selected_group(self):
"""A list of selected files in various states of being"""
return selection.pick(self.selection())
def selected_idx(self):
c = self.contents()
s = self.single_selection()
offset = 0
for content, selection in zip(c, s):
if len(content) == 0:
continue
if selection is not None:
return offset + content.index(selection)
offset += len(content)
return None
def select_by_index(self, idx):
c = self.contents()
to_try = [
(c.staged, self.idx_staged),
(c.unmerged, self.idx_unmerged),
(c.modified, self.idx_modified),
(c.untracked, self.idx_untracked),
]
for content, toplevel_idx in to_try:
if len(content) == 0:
continue
if idx < len(content):
parent = self.topLevelItem(toplevel_idx)
item = parent.child(idx)
self.select_item(item)
return
idx -= len(content)
def select_item(self, item):
self.scrollToItem(item)
self.setCurrentItem(item)
self.setItemSelected(item, True)
def staged(self):
return self._subtree_selection(self.idx_staged, self.m.staged)
def unstaged(self):
return self.unmerged() + self.modified() + self.untracked()
def modified(self):
return self._subtree_selection(self.idx_modified, self.m.modified)
def unmerged(self):
return self._subtree_selection(self.idx_unmerged, self.m.unmerged)
def untracked(self):
return self._subtree_selection(self.idx_untracked, self.m.untracked)
def staged_items(self):
return self._subtree_selection_items(self.idx_staged)
def unstaged_items(self):
return (self.unmerged_items() + self.modified_items() +
self.untracked_items())
def modified_items(self):
return self._subtree_selection_items(self.idx_modified)
def unmerged_items(self):
return self._subtree_selection_items(self.idx_unmerged)
def untracked_items(self):
return self._subtree_selection_items(self.idx_untracked)
def _subtree_selection(self, idx, items):
item = self.topLevelItem(idx)
return qtutils.tree_selection(item, items)
def _subtree_selection_items(self, idx):
item = self.topLevelItem(idx)
return qtutils.tree_selection_items(item)
def double_clicked(self, item, idx):
"""Called when an item is double-clicked in the repo status tree."""
cmds.do(cmds.StageOrUnstage)
def _open_using_default_app(self):
cmds.do(cmds.OpenDefaultApp, self.selected_group())
def _open_parent_dir(self):
cmds.do(cmds.OpenParentDir, self.selected_group())
def show_selection(self):
"""Show the selected item."""
# Sync the selection model
selected = self.selection()
selection.selection_model().set_selection(selected)
self.update_actions(selected=selected)
selected_indexes = self.selected_indexes()
if not selected_indexes:
if self.m.amending():
cmds.do(cmds.SetDiffText, '')
else:
cmds.do(cmds.ResetMode)
return
category, idx = selected_indexes[0]
# A header item e.g. 'Staged', 'Modified', etc.
if category == self.idx_header:
cls = {
self.idx_staged: cmds.DiffStagedSummary,
self.idx_modified: cmds.Diffstat,
# TODO implement UnmergedSummary
#self.idx_unmerged: cmds.UnmergedSummary,
self.idx_untracked: cmds.UntrackedSummary,
}.get(idx, cmds.Diffstat)
cmds.do(cls)
# A staged file
elif category == self.idx_staged:
cmds.do(cmds.DiffStaged, self.staged())
# A modified file
elif category == self.idx_modified:
cmds.do(cmds.Diff, self.modified())
elif category == self.idx_unmerged:
cmds.do(cmds.Diff, self.unmerged())
elif category == self.idx_untracked:
cmds.do(cmds.ShowUntracked, self.unstaged())
def move_up(self):
idx = self.selected_idx()
all_files = self.all_files()
if idx is None:
selected_indexes = self.selected_indexes()
if selected_indexes:
category, toplevel_idx = selected_indexes[0]
if category == self.idx_header:
item = self.itemAbove(self.topLevelItem(toplevel_idx))
if item is not None:
self.select_item(item)
return
if all_files:
self.select_by_index(len(all_files) - 1)
return
if idx - 1 >= 0:
self.select_by_index(idx - 1)
else:
self.select_by_index(len(all_files) - 1)
def move_down(self):
idx = self.selected_idx()
all_files = self.all_files()
if idx is None:
selected_indexes = self.selected_indexes()
if selected_indexes:
category, toplevel_idx = selected_indexes[0]
if category == self.idx_header:
item = self.itemBelow(self.topLevelItem(toplevel_idx))
if item is not None:
self.select_item(item)
return
if all_files:
self.select_by_index(0)
return
if idx + 1 < len(all_files):
self.select_by_index(idx + 1)
else:
self.select_by_index(0)
def copy_path(self, absolute=True):
"""Copy a selected path to the clipboard"""
filename = selection.selection_model().filename()
qtutils.copy_path(filename, absolute=absolute)
def copy_relpath(self):
"""Copy a selected relative path to the clipboard"""
self.copy_path(absolute=False)
def mimeData(self, items):
"""Return a list of absolute-path URLs"""
paths = qtutils.paths_from_items(items, item_filter=lambda x: x.exists)
return qtutils.mimedata_from_paths(paths)
class StatusFilterWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.main_model = main.model()
hint = N_('Filter paths...')
self.text = completion.GitStatusFilterLineEdit(hint=hint, parent=self)
self.text.setToolTip(hint)
self.text.enable_hint(True)
self.setFocusProxy(self.text)
self.main_layout = qtutils.hbox(defs.no_margin, defs.spacing, self.text)
self.setLayout(self.main_layout)
self.connect(self.text, SIGNAL('changed()'), self.apply_filter)
self.connect(self.text, SIGNAL('returnPressed()'), self.apply_filter)
def apply_filter(self):
text = self.text.value()
paths = utils.shell_split(text)
self.main_model.update_path_filter(paths)
| gpl-2.0 |
dnet/pyqso | setup.py | 1 | 1183 | #!/usr/bin/env python
# Copyright (C) 2013 Christian T. Jacobs.
# This file is part of PyQSO.
# PyQSO is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyQSO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyQSO. If not, see <http://www.gnu.org/licenses/>.
from distutils.core import setup
setup(name='PyQSO',
version='0.2',
description='A contact logging tool for amateur radio operators.',
author='Christian T. Jacobs',
author_email='[email protected]',
url='https://github.com/ctjacobs/pyqso',
packages=['pyqso'],
package_dir = {'pyqso': 'pyqso'},
scripts=["bin/pyqso"],
data_files=[("icons", ["icons/log_64x64.png"])]
)
| gpl-3.0 |
strint/tensorflow | tensorflow/python/kernel_tests/unstack_op_test.py | 99 | 5236 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Unpack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class UnstackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unpack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [c.eval() for c in cs]
self.assertAllEqual(cs, data)
def testGradientsAxis0(self):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.test_session(use_gpu=True):
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[0])
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
shapes[i])
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
out_shape = list(shape)
del out_shape[1]
for i in xrange(shape[1]):
with self.test_session(use_gpu=True):
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[1], axis=1)
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
out_shape)
self.assertLess(err, 1e-6)
def testInferNum(self):
with self.test_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = array_ops.placeholder(np.float32, shape=shape)
cs = array_ops.unstack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
def testCannotInferNumFromUnknownShape(self):
x = array_ops.placeholder(np.float32)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape <unknown>'):
array_ops.unstack(x)
def testUnknownShapeOkWithNum(self):
x = array_ops.placeholder(np.float32)
array_ops.unstack(x, num=2)
def testCannotInferNumFromNoneShape(self):
x = array_ops.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
array_ops.unstack(x)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
a = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
expected = np_split_squeeze(a, j)
with self.test_session() as sess:
actual_unstack = sess.run(array_ops.unstack(a, axis=j))
self.assertAllEqual(expected, actual_unstack)
def testAxis0Default(self):
with self.test_session() as sess:
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
unstacked = sess.run(array_ops.unstack(a))
self.assertEqual(len(unstacked), 2)
self.assertAllEqual(unstacked[0], [1, 2, 3])
self.assertAllEqual(unstacked[1], [4, 5, 6])
def testAxisOutOfRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
array_ops.unstack(a, axis=2)
def testAxisOutOfNegativeRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
array_ops.unstack(a, axis=-3)
def testZeroLengthDim(self):
with self.test_session():
x = array_ops.zeros(shape=(0, 1, 2))
y = array_ops.unstack(x, axis=1)[0].eval()
self.assertEqual(y.shape, (0, 2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
stackArmor/security_monkey | security_monkey/auditors/rds/rds_db_instance.py | 3 | 2486 | # Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.rds.rds_db_instance
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]>
"""
from security_monkey.auditor import Auditor
from security_monkey.watchers.rds.rds_db_instance import RDSDBInstance
from security_monkey.watchers.security_group import SecurityGroup
class RDSDBInstanceAuditor(Auditor):
index = RDSDBInstance.index
i_am_singular = RDSDBInstance.i_am_singular
i_am_plural = RDSDBInstance.i_am_plural
support_auditor_indexes = [SecurityGroup.index]
def __init__(self, accounts=None, debug=False):
super(RDSDBInstanceAuditor, self).__init__(accounts=accounts, debug=debug)
def _get_listener_ports_and_protocols(self, item):
"""
"endpoint": {
"HostedZoneId": "ZZZZZZZZZZZZZZ",
"Port": 3306,
"Address": "blah.region.rds.amazonaws.com"
},
"""
port = item.config.get('endpoint', {}).get('Port')
return dict(TCP=set([port]))
def check_internet_accessible(self, item):
publicly_accessible = item.config.get('publicly_accessible')
if publicly_accessible:
security_groups = item.config.get('vpc_security_groups', [])
security_group_ids = {sg['VpcSecurityGroupId'] for sg in security_groups}
sg_auditor_items = self.get_auditor_support_items(SecurityGroup.index, item.account)
security_auditor_groups = [sg for sg in sg_auditor_items if sg.config.get('id') in security_group_ids]
for sg in security_auditor_groups:
for issue in sg.db_item.issues:
if self._issue_matches_listeners(item, issue):
self.link_to_support_item_issues(item, sg.db_item,
sub_issue_message=issue.issue, score=issue.score) | apache-2.0 |
kid/troposphere | troposphere/cloudformation.py | 22 | 5724 | # Copyright (c) 2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, Ref, BaseAWSObject
from .validators import integer, boolean, encoding
class Stack(AWSObject):
resource_type = "AWS::CloudFormation::Stack"
props = {
'NotificationARNs': ([basestring], False),
'Parameters': (dict, False),
'TemplateURL': (basestring, True),
'TimeoutInMinutes': (integer, False),
}
class AWSCustomObject(BaseAWSObject):
dictname = 'Properties'
class CustomResource(AWSCustomObject):
resource_type = "AWS::CloudFormation::CustomResource"
props = {
'ServiceToken': (basestring, True)
}
class WaitCondition(AWSObject):
resource_type = "AWS::CloudFormation::WaitCondition"
props = {
'Count': (integer, False),
'Handle': (Ref, True),
'Timeout': (integer, True),
}
class WaitConditionHandle(AWSObject):
resource_type = "AWS::CloudFormation::WaitConditionHandle"
props = {}
class Metadata(AWSHelperFn):
def __init__(self, *args):
self.data = args
def JSONrepr(self):
t = []
for i in self.data:
t += i.JSONrepr().items()
return dict(t)
class InitFileContext(AWSHelperFn):
def __init__(self, data):
self.data = data
def JSONrepr(self):
return self.data
class InitFile(AWSProperty):
props = {
'content': (basestring, False),
'mode': (basestring, False),
'owner': (basestring, False),
'encoding': (encoding, False),
'group': (basestring, False),
'source': (basestring, False),
'authentication': (basestring, False),
'context': (InitFileContext, False)
}
class InitFiles(AWSHelperFn):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
for k in data:
if not isinstance(data[k], InitFile):
raise ValueError("File '" + k + "' must be of type InitFile")
def JSONrepr(self):
return self.data
class InitService(AWSProperty):
props = {
'ensureRunning': (boolean, False),
'enabled': (boolean, False),
'files': (list, False),
'packages': (dict, False),
'sources': (list, False),
'commands': (list, False)
}
class InitServices(AWSHelperFn):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
for k in data:
if not isinstance(data[k], InitService):
raise ValueError(
"Service '" + k + "' must be of type InitService"
)
def JSONrepr(self):
return self.data
class InitConfigSets(AWSHelperFn):
def __init__(self, **kwargs):
self.validate(dict(kwargs))
self.data = kwargs
def validate(self, config_sets):
for k, v in config_sets.iteritems():
if not isinstance(v, list):
raise ValueError('configSets values must be of type list')
def JSONrepr(self):
return self.data
class InitConfig(AWSProperty):
props = {
'groups': (dict, False),
'users': (dict, False),
'sources': (dict, False),
'packages': (dict, False),
'files': (dict, False),
'commands': (dict, False),
'services': (dict, False)
}
def validate_authentication_type(auth_type):
valid_types = ['S3', 'basic']
if auth_type not in valid_types:
raise ValueError('Type needs to be one of %r' % valid_types)
return auth_type
class AuthenticationBlock(AWSProperty):
props = {
"accessKeyId": (basestring, False),
"buckets": ([basestring], False),
"password": (basestring, False),
"secretKey": (basestring, False),
"type": (validate_authentication_type, False),
"uris": ([basestring], False),
"username": (basestring, False),
"roleName": (basestring, False)
}
class Authentication(AWSHelperFn):
def __init__(self, data):
self.validate(data)
self.data = {"AWS::CloudFormation::Authentication": data}
def validate(self, data):
for k, v in data.iteritems():
if not isinstance(v, AuthenticationBlock):
raise ValueError(
'authentication block must be of type'
' cloudformation.AuthenticationBlock'
)
def JSONrepr(self):
return self.data
class Init(AWSHelperFn):
def __init__(self, data, **kwargs):
self.validate(data, dict(kwargs))
if isinstance(data, InitConfigSets):
self.data = {
'AWS::CloudFormation::Init': dict({'configSets': data},
**kwargs)
}
else:
self.data = {'AWS::CloudFormation::Init': data}
def validate(self, data, config_sets):
if isinstance(data, InitConfigSets):
for k, v in sorted(config_sets.iteritems()):
if not isinstance(v, InitConfig):
raise ValueError(
'init configs must of type ',
'cloudformation.InitConfigSet'
)
else:
if 'config' not in data:
raise ValueError('config property is required')
if not isinstance(data['config'], InitConfig):
raise ValueError(
'config property must be of type cloudformation.InitConfig'
)
def JSONrepr(self):
return self.data
| bsd-2-clause |
acsone/purchase-workflow | purchase_multi_picking/__openerp__.py | 13 | 1765 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012-2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Purchase multi picking",
'version': '0.2',
'category': 'Purchase Management',
'summary': "Multi Pickings from Purchase Orders",
'description': """
This module allows to generate several pickings from the same purchase order.
You just have to indicate which order lines have to be grouped in the same
picking. When confirming the order, for each group a picking is generated.
""",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": ['purchase', 'stock'],
"data": [
'purchase_view.xml',
'security/ir.model.access.csv',
],
"demo": [],
"active": False,
"installable": False
}
| agpl-3.0 |
cpennington/edx-platform | common/djangoapps/student/tests/test_events.py | 4 | 6996 | # -*- coding: utf-8 -*-
"""
Test that various events are fired for models in the student app.
"""
import mock
from django.db.utils import IntegrityError
from django.test import TestCase
from django_countries.fields import Country
from student.models import CourseEnrollmentAllowed
from student.tests.factories import CourseEnrollmentAllowedFactory, UserFactory
from student.tests.tests import UserSettingsEventTestMixin
class TestUserProfileEvents(UserSettingsEventTestMixin, TestCase):
"""
Test that we emit field change events when UserProfile models are changed.
"""
def setUp(self):
super(TestUserProfileEvents, self).setUp()
self.table = 'auth_userprofile'
self.user = UserFactory.create()
self.profile = self.user.profile
self.reset_tracker()
def test_change_one_field(self):
"""
Verify that we emit an event when a single field changes on the user
profile.
"""
self.profile.year_of_birth = 1900
self.profile.save()
self.assert_user_setting_event_emitted(setting='year_of_birth', old=None, new=self.profile.year_of_birth)
# Verify that we remove the temporary `_changed_fields` property from
# the model after we're done emitting events.
with self.assertRaises(AttributeError):
self.profile._changed_fields # pylint: disable=pointless-statement, protected-access
def test_change_many_fields(self):
"""
Verify that we emit one event per field when many fields change on the
user profile in one transaction.
"""
self.profile.gender = u'o'
self.profile.bio = 'test bio'
self.profile.save()
self.assert_user_setting_event_emitted(setting='bio', old=None, new=self.profile.bio)
self.assert_user_setting_event_emitted(setting='gender', old=u'm', new=u'o')
def test_unicode(self):
"""
Verify that the events we emit can handle unicode characters.
"""
old_name = self.profile.name
self.profile.name = u'Dånîél'
self.profile.save()
self.assert_user_setting_event_emitted(setting='name', old=old_name, new=self.profile.name)
def test_country(self):
"""
Verify that we properly serialize the JSON-unfriendly Country field.
"""
self.profile.country = Country(u'AL', 'dummy_flag_url')
self.profile.save()
self.assert_user_setting_event_emitted(setting='country', old=None, new=self.profile.country)
def test_excluded_field(self):
"""
Verify that we don't emit events for ignored fields.
"""
self.profile.meta = {u'foo': u'bar'}
self.profile.save()
self.assert_no_events_were_emitted()
@mock.patch('student.models.UserProfile.save', side_effect=IntegrityError)
def test_no_event_if_save_failed(self, _save_mock):
"""
Verify no event is triggered if the save does not complete. Note that the pre_save
signal is not called in this case either, but the intent is to make it clear that this model
should never emit an event if save fails.
"""
self.profile.gender = "unknown"
with self.assertRaises(IntegrityError):
self.profile.save()
self.assert_no_events_were_emitted()
class TestUserEvents(UserSettingsEventTestMixin, TestCase):
"""
Test that we emit field change events when User models are changed.
"""
def setUp(self):
super(TestUserEvents, self).setUp()
self.user = UserFactory.create()
self.reset_tracker()
self.table = 'auth_user'
def test_change_one_field(self):
"""
Verify that we emit an event when a single field changes on the user.
"""
old_username = self.user.username
self.user.username = u'new username'
self.user.save()
self.assert_user_setting_event_emitted(setting='username', old=old_username, new=self.user.username)
def test_change_many_fields(self):
"""
Verify that we emit one event per field when many fields change on the
user in one transaction.
"""
old_email = self.user.email
old_is_staff = self.user.is_staff
self.user.email = u'[email protected]'
self.user.is_staff = True
self.user.save()
self.assert_user_setting_event_emitted(setting='email', old=old_email, new=self.user.email)
self.assert_user_setting_event_emitted(setting='is_staff', old=old_is_staff, new=self.user.is_staff)
def test_password(self):
"""
Verify that password values are not included in the event payload.
"""
self.user.password = u'new password'
self.user.save()
self.assert_user_setting_event_emitted(setting='password', old=None, new=None)
def test_related_fields_ignored(self):
"""
Verify that we don't emit events for related fields.
"""
self.user.loginfailures_set.create()
self.user.save()
self.assert_no_events_were_emitted()
@mock.patch('django.contrib.auth.models.User.save', side_effect=IntegrityError)
def test_no_event_if_save_failed(self, _save_mock):
"""
Verify no event is triggered if the save does not complete. Note that the pre_save
signal is not called in this case either, but the intent is to make it clear that this model
should never emit an event if save fails.
"""
self.user.password = u'new password'
with self.assertRaises(IntegrityError):
self.user.save()
self.assert_no_events_were_emitted()
def test_no_first_and_last_name_events(self):
"""
Verify that first_name and last_name events are not emitted.
"""
self.user.first_name = "Donald"
self.user.last_name = "Duck"
self.user.save()
self.assert_no_events_were_emitted()
def test_enrolled_after_email_change(self):
"""
Test that when a user's email changes, the user is enrolled in pending courses.
"""
pending_enrollment = CourseEnrollmentAllowedFactory(auto_enroll=True)
# the e-mail will change to [email protected] (from something else)
self.assertNotEqual(self.user.email, '[email protected]')
# there's a CEA for the new e-mail
self.assertEqual(CourseEnrollmentAllowed.objects.count(), 1)
self.assertEqual(CourseEnrollmentAllowed.objects.filter(email='[email protected]').count(), 1)
# Changing the e-mail to the enrollment-allowed e-mail should enroll
self.user.email = '[email protected]'
self.user.save()
self.assert_user_enrollment_occurred('edX/toy/2012_Fall')
# CEAs shouldn't have been affected
self.assertEqual(CourseEnrollmentAllowed.objects.count(), 1)
self.assertEqual(CourseEnrollmentAllowed.objects.filter(email='[email protected]').count(), 1)
| agpl-3.0 |
mvesper/invenio | modules/bibsched/lib/bibsched.py | 3 | 48933 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibSched - task management, scheduling and executing system for Invenio
"""
import os
import sys
import time
import re
import marshal
import getopt
from itertools import chain
from socket import gethostname
from subprocess import Popen
from datetime import datetime
import signal
from invenio.bibtask_config import \
CFG_BIBTASK_VALID_TASKS, \
CFG_BIBTASK_MONOTASKS, \
CFG_BIBTASK_FIXEDTIMETASKS
from invenio.config import \
CFG_PREFIX, \
CFG_TMPSHAREDDIR, \
CFG_BIBSCHED_REFRESHTIME, \
CFG_BINDIR, \
CFG_LOGDIR, \
CFG_BIBSCHED_GC_TASKS_OLDER_THAN, \
CFG_BIBSCHED_GC_TASKS_TO_REMOVE, \
CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE, \
CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS, \
CFG_SITE_URL, \
CFG_BIBSCHED_NODE_TASKS, \
CFG_INSPIRE_SITE, \
CFG_BIBSCHED_INCOMPATIBLE_TASKS, \
CFG_BIBSCHED_NON_CONCURRENT_TASKS, \
CFG_VERSION, \
CFG_BIBSCHED_NEVER_STOPS
from invenio.dbquery import run_sql, real_escape_string
from invenio.errorlib import register_exception, register_emergency
from invenio.shellutils import run_shell_command
CFG_VALID_STATUS = ('WAITING', 'SCHEDULED', 'RUNNING', 'CONTINUING',
'% DELETED', 'ABOUT TO STOP', 'ABOUT TO SLEEP', 'STOPPED',
'SLEEPING', 'KILLED', 'NOW STOP', 'ERRORS REPORTED')
CFG_MOTD_PATH = os.path.join(CFG_TMPSHAREDDIR, "bibsched.motd")
ACTIVE_STATUS = ('SCHEDULED', 'ABOUT TO SLEEP', 'ABOUT TO STOP',
'CONTINUING', 'RUNNING')
SHIFT_RE = re.compile(r"([-\+]{0,1})([\d]+)([dhms])")
class RecoverableError(StandardError):
pass
def get_datetime(var, format_string="%Y-%m-%d %H:%M:%S"):
"""Returns a date string according to the format string.
It can handle normal date strings and shifts with respect
to now."""
try:
date = time.time()
factors = {"d": 24*3600, "h": 3600, "m": 60, "s": 1}
m = SHIFT_RE.match(var)
if m:
sign = m.groups()[0] == "-" and -1 or 1
factor = factors[m.groups()[2]]
value = float(m.groups()[1])
date = time.localtime(date + sign * factor * value)
date = time.strftime(format_string, date)
else:
date = time.strptime(var, format_string)
date = time.strftime(format_string, date)
return date
except ValueError:
return None
def get_my_pid(process, args=''):
if sys.platform.startswith('freebsd'):
command = "ps -o pid,args | grep '%s %s' | grep -v 'grep' | sed -n 1p" % (process, args)
else:
command = "ps -C %s o '%%p%%a' | grep '%s %s' | grep -v 'grep' | sed -n 1p" % (process, process, args)
answer = run_shell_command(command)[1].strip()
if answer == '':
answer = 0
else:
answer = answer[:answer.find(' ')]
return int(answer)
def get_task_pid(task_id):
"""Return the pid of task_name/task_id"""
try:
path = os.path.join(CFG_PREFIX, 'var', 'run', 'bibsched_task_%d.pid' % task_id)
pid = int(open(path).read())
os.kill(pid, 0)
return pid
except (OSError, IOError):
return None
def get_last_taskid():
"""Return the last taskid used."""
return run_sql("SELECT MAX(id) FROM schTASK")[0][0]
def delete_task(task_id):
"""Delete the corresponding task."""
run_sql("DELETE FROM schTASK WHERE id=%s", (task_id, ))
def is_task_scheduled(task_name):
"""Check if a certain task_name is due for execution (WAITING or RUNNING)"""
sql = """SELECT COUNT(proc) FROM schTASK
WHERE proc = %s AND (status='WAITING' OR status='RUNNING')"""
return run_sql(sql, (task_name,))[0][0] > 0
def get_task_ids_by_descending_date(task_name, statuses=['SCHEDULED']):
"""Returns list of task ids, ordered by descending runtime."""
sql = """SELECT id FROM schTASK
WHERE proc=%s AND (%s)
ORDER BY runtime DESC""" \
% " OR ".join(["status = '%s'" % x for x in statuses])
return [x[0] for x in run_sql(sql, (task_name,))]
def get_task_options(task_id):
"""Returns options for task_id read from the BibSched task queue table."""
res = run_sql("SELECT arguments FROM schTASK WHERE id=%s", (task_id,))
try:
return marshal.loads(res[0][0])
except IndexError:
return list()
def gc_tasks(verbose=False, statuses=None, since=None, tasks=None): # pylint: disable=W0613
"""Garbage collect the task queue."""
if tasks is None:
tasks = CFG_BIBSCHED_GC_TASKS_TO_REMOVE + CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE
if since is None:
since = '-%id' % CFG_BIBSCHED_GC_TASKS_OLDER_THAN
if statuses is None:
statuses = ['DONE']
statuses = [status.upper() for status in statuses if status.upper() != 'RUNNING']
date = get_datetime(since)
status_query = 'status in (%s)' % ','.join([repr(real_escape_string(status)) for status in statuses])
for task in tasks:
if task in CFG_BIBSCHED_GC_TASKS_TO_REMOVE:
res = run_sql("""DELETE FROM schTASK WHERE proc=%%s AND %s AND
runtime<%%s""" % status_query, (task, date))
write_message('Deleted %s %s tasks (created before %s) with %s'
% (res, task, date, status_query))
elif task in CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE:
run_sql("""INSERT INTO hstTASK(id,proc,host,user,
runtime,sleeptime,arguments,status,progress)
SELECT id,proc,host,user,
runtime,sleeptime,arguments,status,progress
FROM schTASK WHERE proc=%%s AND %s AND
runtime<%%s""" % status_query, (task, date))
res = run_sql("""DELETE FROM schTASK WHERE proc=%%s AND %s AND
runtime<%%s""" % status_query, (task, date))
write_message('Archived %s %s tasks (created before %s) with %s'
% (res, task, date, status_query))
def spawn_task(command, wait=False):
"""
Spawn the provided command in a way that is detached from the current
group. In this way a signal received by bibsched is not going to be
automatically propagated to the spawned process.
"""
def preexec(): # Don't forward signals.
os.setsid()
devnull = open(os.devnull, "w")
process = Popen(command, preexec_fn=preexec, shell=True,
stderr=devnull, stdout=devnull)
if wait:
process.wait()
def bibsched_get_host(task_id):
"""Retrieve the hostname of the task"""
res = run_sql("SELECT host FROM schTASK WHERE id=%s LIMIT 1", (task_id, ), 1)
if res:
return res[0][0]
def bibsched_set_host(task_id, host=""):
"""Update the progress of task_id."""
return run_sql("UPDATE schTASK SET host=%s WHERE id=%s", (host, task_id))
def bibsched_get_status(task_id):
"""Retrieve the task status."""
res = run_sql("SELECT status FROM schTASK WHERE id=%s LIMIT 1", (task_id, ), 1)
if res:
return res[0][0]
def bibsched_set_status(task_id, status, when_status_is=None):
"""Update the status of task_id."""
if when_status_is is None:
return run_sql("UPDATE schTASK SET status=%s WHERE id=%s",
(status, task_id))
else:
return run_sql("UPDATE schTASK SET status=%s WHERE id=%s AND status=%s",
(status, task_id, when_status_is))
def bibsched_set_progress(task_id, progress):
"""Update the progress of task_id."""
return run_sql("UPDATE schTASK SET progress=%s WHERE id=%s", (progress, task_id))
def bibsched_set_priority(task_id, priority):
"""Update the priority of task_id."""
return run_sql("UPDATE schTASK SET priority=%s WHERE id=%s", (priority, task_id))
def bibsched_set_name(task_id, name):
"""Update the name of task_id."""
return run_sql("UPDATE schTASK SET proc=%s WHERE id=%s", (name, task_id))
def bibsched_set_sleeptime(task_id, sleeptime):
"""Update the sleeptime of task_id."""
return run_sql("UPDATE schTASK SET sleeptime=%s WHERE id=%s", (sleeptime, task_id))
def bibsched_set_runtime(task_id, runtime):
"""Update the sleeptime of task_id."""
return run_sql("UPDATE schTASK SET runtime=%s WHERE id=%s", (runtime, task_id))
def bibsched_send_signal(task_id, sig):
"""Send a signal to a given task."""
if bibsched_get_host(task_id) != gethostname():
return False
pid = get_task_pid(task_id)
if pid:
try:
os.kill(pid, sig)
return True
except OSError:
return False
return False
def is_monotask(proc):
#procname = proc.split(':')[0]
return proc in CFG_BIBTASK_MONOTASKS
def stop_task(task):
Log("Sending STOP signal to #%d (%s) which was in status %s" % (task.id, task.proc, task.status))
bibsched_set_status(task.id, 'ABOUT TO STOP', task.status)
def sleep_task(task):
Log("Sending SLEEP signal to #%d (%s) which was in status %s" % (task.id, task.proc, task.status))
bibsched_set_status(task.id, 'ABOUT TO SLEEP', task.status)
def fetch_debug_mode():
r = run_sql('SELECT value FROM schSTATUS WHERE name = "debug_mode"')
try:
debug_mode = bool(int(r[0][0]))
except (ValueError, IndexError):
# We insert the missing configuration variable in the DB
run_sql('INSERT INTO schSTATUS (name, value) VALUES ("debug_mode", "0")')
debug_mode = False
return debug_mode
class Task(object):
def __init__(self, task_id, proc, runtime, status, priority, host, sequenceid):
self.id = task_id
self.proc = proc
self.runtime = runtime
self.status = status
self.priority = priority
self.host = host
self.sequenceid = sequenceid
@staticmethod
def from_resultset(resultset):
return [Task(*row) for row in resultset]
def __eq__(self, other):
return self.id == other.id
def __lt__(self, other):
return self.id < other.id
def __unicode__(self):
msg = u"Task(id=%r, proc=%r, runtime=%r, status=%r, " \
u"priority=%r, host=%r, sequenceid=%r"
return msg % (self.id, self.proc, self.runtime, self.status,
self.priority, self.host, self.sequenceid)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return unicode(self)
class BibSched(object):
def __init__(self, debug=False):
self.cycles_count = 0
self.debug = debug
self.hostname = gethostname()
self.helper_modules = CFG_BIBTASK_VALID_TASKS
## All the tasks in the queue that the node is allowed to manipulate
self.node_relevant_bibupload_tasks = ()
self.node_relevant_waiting_tasks = ()
self.node_sleeping_tasks = ()
self.node_active_tasks = ()
## All tasks of all nodes
self.sleeping_tasks_all_nodes = ()
self.waiting_tasks_all_nodes = ()
self.active_tasks_all_nodes = ()
self.mono_tasks_all_nodes = ()
self.allowed_task_types = CFG_BIBSCHED_NODE_TASKS.get(self.hostname, CFG_BIBTASK_VALID_TASKS)
def tie_task_to_host(self, task_id):
"""Sets the hostname of a task to the machine executing this script
@return: True if the scheduling was successful, False otherwise,
e.g. if the task was scheduled concurrently on a different host.
"""
r = run_sql("""UPDATE schTASK SET host=%s, status='SCHEDULED'
WHERE id=%s AND status='WAITING'""",
(self.hostname, task_id))
return bool(r)
def filter_for_allowed_tasks(self):
""" Removes all tasks that are not allowed in this Invenio instance
"""
def relevant_task(task):
procname = task.proc.split(':')[0]
if procname not in self.allowed_task_types:
return False
return True
def filter_tasks(tasks):
return tuple(t for t in tasks if relevant_task(t))
self.node_relevant_bibupload_tasks = filter_tasks(self.node_relevant_bibupload_tasks)
self.node_relevant_waiting_tasks = filter_tasks(self.waiting_tasks_all_nodes)
def is_task_compatible(self, task1, task2):
"""Return True when the two tasks can run concurrently or can run when
the other task is sleeping"""
procname1 = task1.proc.split(':')[0]
procname2 = task2.proc.split(':')[0]
for non_compatible_tasks in CFG_BIBSCHED_INCOMPATIBLE_TASKS:
if (task1.proc in non_compatible_tasks or procname1 in non_compatible_tasks) \
and (task2.proc in non_compatible_tasks or procname2 in non_compatible_tasks):
return False
if task1.proc == task2.proc == 'bibupload':
return True
return task1.proc != task2.proc
def is_task_non_concurrent(self, task1, task2):
for non_concurrent_tasks in CFG_BIBSCHED_NON_CONCURRENT_TASKS:
if (task1.proc.split(':')[0] in non_concurrent_tasks
or task1.proc in non_concurrent_tasks):
if (task2.proc.split(':')[0] in non_concurrent_tasks
or task2.proc in non_concurrent_tasks):
return True
return False
def get_tasks_to_sleep_and_stop(self, task, task_set):
"""Among the task_set, return the list of tasks to stop and the list
of tasks to sleep.
"""
def minimum_priority_task(task_set):
min_task = None
## For all the lower priority tasks...
for t in task_set:
if (min_task is None or t.priority < min_task.priority) \
and t.status != 'SLEEPING' and t.priority < task.priority \
and task.host == t.host:
# We don't put to sleep already sleeping task :-)
# And it only makes sense to free a spot on the local node
min_task = t
return min_task
to_stop = []
to_sleep = []
for t in task_set:
if not self.is_task_compatible(task, t):
to_stop.append(t)
if is_monotask(task.proc):
to_sleep = [t for t in task_set if t.status != 'SLEEPING']
else:
for t in task_set:
if t.status != 'SLEEPING' and self.is_task_non_concurrent(task, t):
to_sleep.append(t)
# Only needed if we are not freeing a spot already
# So to_stop and to_sleep should be empty
if not to_stop and not to_sleep and \
len(self.node_active_tasks) >= CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS:
min_task = minimum_priority_task(task_set)
if min_task:
to_sleep = [min_task]
return to_stop, to_sleep
def split_active_tasks_by_priority(self, task):
"""Return two lists: the list of task_ids with lower priority and
those with higher or equal priority."""
higher = []
lower = []
for t in self.node_active_tasks:
if task.id == t.id:
continue
if t.priority < task.priority:
lower.append(t)
else:
higher.append(t)
return lower, higher
def handle_task(self, task):
"""Perform needed action of the row representing a task.
Return True when task_status need to be refreshed"""
debug = self.debug
Log(str(task), debug)
# If the task is active, we do scheduling stuff here
if task in self.node_active_tasks:
# For multi-node
# check if we need to sleep ourselves for monotasks
# to be able to run
for t in self.mono_tasks_all_nodes:
# 2 cases here
# If a monotask is running, we want to sleep
# If a monotask is waiting, we want to sleep if our priority
# is inferior
if task.priority < t.priority or t.status in ACTIVE_STATUS:
# Sleep ourselves
if task.status not in ('ABOUT TO STOP', 'ABOUT TO SLEEP', 'SCHEDULED'):
Log("Sleeping ourselves because of a monotask: %s" % t, debug)
sleep_task(task)
return True
else:
Log("A monotask is running but already going to sleep/stop", debug)
return False
# We try to run a task in waiting status here
elif task in self.node_relevant_waiting_tasks:
Log("Trying to run %r" % task, debug)
if task.priority < -10:
Log("Cannot run because priority < -10", debug)
return False
if task.host and task.host != self.hostname:
Log("Cannot run because this task is bound to a different machine", debug)
return False
lower, higher = self.split_active_tasks_by_priority(task)
Log('lower: %r' % lower, debug)
Log('higher: %r' % higher, debug)
for t in self.active_tasks_all_nodes:
if task.id != t.id and not self.is_task_compatible(task, t):
### !!! WE NEED TO CHECK FOR TASKS THAT CAN ONLY BE EXECUTED ON ONE MACHINE AT ONE TIME
### !!! FOR EXAMPLE BIBUPLOADS WHICH NEED TO BE EXECUTED SEQUENTIALLY AND NEVER CONCURRENTLY
## There's at least a higher priority task running that
## cannot run at the same time of the given task.
## We give up
Log("Cannot run because task_id: %s, proc: %s is in the queue and incompatible" % (t.id, t.proc), debug)
return False
if task.sequenceid:
## Let's normalize the prority of all tasks in a sequenceid to the
## max priority of the group
max_priority = run_sql("""SELECT MAX(priority) FROM schTASK
WHERE status IN ('WAITING', 'RUNNING',
'SLEEPING', 'ABOUT TO STOP',
'ABOUT TO SLEEP',
'SCHEDULED', 'CONTINUING')
AND sequenceid=%s""",
(task.sequenceid, ))[0][0]
if run_sql("""UPDATE schTASK SET priority=%s
WHERE status IN ('WAITING', 'RUNNING',
'SLEEPING', 'ABOUT TO STOP', 'ABOUT TO SLEEP',
'SCHEDULED', 'CONTINUING') AND sequenceid=%s""",
(max_priority, task.sequenceid)):
Log("Raised all waiting tasks with sequenceid "
"%s to the max priority %s" % (task.sequenceid, max_priority))
## Some priorities where raised
return True
## Let's normalize the runtime of all tasks in a sequenceid to
## the compatible runtime.
current_runtimes = run_sql("""SELECT id, runtime FROM schTASK WHERE sequenceid=%s AND status='WAITING' ORDER by id""", (task.sequenceid, ))
runtimes_adjusted = False
if current_runtimes:
last_runtime = current_runtimes[0][1]
for the_task_id, runtime in current_runtimes:
if runtime < last_runtime:
run_sql("""UPDATE schTASK SET runtime=%s WHERE id=%s""", (last_runtime, the_task_id))
Log("Adjusted runtime of task_id %s to %s in order to be executed in the correct sequenceid order" % (the_task_id, last_runtime), debug)
runtimes_adjusted = True
runtime = last_runtime
last_runtime = runtime
if runtimes_adjusted:
## Some runtime have been adjusted
return True
if task.sequenceid is not None:
for t in chain(self.active_tasks_all_nodes,
self.waiting_tasks_all_nodes):
if task.sequenceid == t.sequenceid and task.id > t.id:
Log('Task %s need to run after task %s since they have the same sequence id: %s' % (task.id, t.id, task.sequenceid), debug)
## If there is a task with same sequence number then do not run the current task
return False
if is_monotask(task.proc) and higher:
## This is a monotask
Log("Cannot run because this is a monotask and there are higher priority tasks: %s" % (higher, ), debug)
return False
## Check for monotasks wanting to run
for t in self.mono_tasks_all_nodes:
if task.priority < t.priority:
Log("Cannot run because there is a monotask with higher priority: %s %s" % (t.id, t.proc), debug)
return False
## We check if it is necessary to stop/put to sleep some lower priority
## task.
tasks_to_stop, tasks_to_sleep = self.get_tasks_to_sleep_and_stop(task, self.active_tasks_all_nodes)
Log('tasks_to_stop: %s' % tasks_to_stop, debug)
Log('tasks_to_sleep: %s' % tasks_to_sleep, debug)
if tasks_to_stop and task.priority < 100:
## Only tasks with priority higher than 100 have the power
## to put task to stop.
Log("Cannot run because there are task to stop: %s and priority < 100" % tasks_to_stop, debug)
return False
for t in tasks_to_sleep:
if not t.priority < task.priority:
Log("Cannot run because #%s with priority %s cannot be slept by this task" % (t.id, t.priority), debug)
return False
procname = task.proc.split(':')[0]
if not tasks_to_stop and not tasks_to_sleep:
if is_monotask(task.proc) and self.active_tasks_all_nodes:
Log("Cannot run because this is a monotask and there are other tasks running: %s" % (self.active_tasks_all_nodes, ), debug)
return False
if task.proc not in CFG_BIBTASK_FIXEDTIMETASKS and len(self.node_active_tasks) >= CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS:
Log("Cannot run because all resources (%s) are used (%s), active: %s" % (CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS, len(self.node_active_tasks), self.node_active_tasks), debug)
return False
for t in self.waiting_tasks_all_nodes:
if self.is_task_non_concurrent(task, t) and task.priority < t.priority:
Log("Cannot run because %s is non-concurrent and has higher priority" % t, debug)
return False
if task.status in ("SCHEDULED",):
Log("Task is already scheduled", debug)
return False
elif task.status in ("SLEEPING", "ABOUT TO SLEEP"):
if task.host != self.hostname:
Log("We can't wake up tasks that are not in the same node", debug)
return False
## We can only wake up tasks that are running on our own host
for t in self.node_active_tasks:
## But only if there are not other tasks still going to sleep, otherwise
## we might end up stealing the slot for an higher priority task.
if t.id != task.id and t.status in ('ABOUT TO SLEEP', 'ABOUT TO STOP'):
Log("Not yet waking up task #%d since there are other tasks (%s #%d) going to sleep (higher priority task incoming?)" % (task.id, t.proc, t.id), debug)
return False
bibsched_set_status(task.id, "CONTINUING", task.status)
if not bibsched_send_signal(task.id, signal.SIGCONT):
bibsched_set_status(task.id, "ERROR", "CONTINUING")
Log("Task #%d (%s) woken up but didn't existed anymore" % (task.id, task.proc))
return True
Log("Task #%d (%s) woken up" % (task.id, task.proc))
return True
elif procname in self.helper_modules:
program = os.path.join(CFG_BINDIR, procname)
## Trick to log in bibsched.log the task exiting
exit_str = '&& echo "`date "+%%Y-%%m-%%d %%H:%%M:%%S"` --> Task #%d (%s) exited" >> %s' % (task.id, task.proc, os.path.join(CFG_LOGDIR, 'bibsched.log'))
command = "%s %s %s" % (program, str(task.id), exit_str)
### Set the task to scheduled and tie it to this host
if self.tie_task_to_host(task.id):
Log("Task #%d (%s) started" % (task.id, task.proc))
### Relief the lock for the BibTask, it is safe now to do so
spawn_task(command, wait=is_monotask(task.proc))
count = 10
while run_sql("""SELECT status FROM schTASK
WHERE id=%s AND status='SCHEDULED'""",
(task.id, )):
## Polling to wait for the task to really start,
## in order to avoid race conditions.
if count <= 0:
Log("Process %s (task_id: %s) was launched but seems not to be able to reach RUNNING status." % (task.proc, task.id))
bibsched_set_status(task.id, "ERROR", "SCHEDULED")
return True
time.sleep(CFG_BIBSCHED_REFRESHTIME)
count -= 1
return True
else:
raise StandardError("%s is not in the allowed modules" % procname)
else:
## It's not still safe to run the task.
## We first need to stop task that should be stopped
## and to put to sleep task that should be put to sleep
changes = False
for t in tasks_to_stop:
if t.status not in ('ABOUT TO STOP', 'SCHEDULED'):
changes = True
stop_task(t)
else:
Log("Cannot run because we are waiting for #%s to stop" % t.id, debug)
for t in tasks_to_sleep:
if t.status not in ('ABOUT TO SLEEP', 'SCHEDULED', 'ABOUT TO STOP'):
changes = True
sleep_task(t)
else:
Log("Cannot run because we are waiting for #%s to sleep" % t.id, debug)
if changes:
time.sleep(CFG_BIBSCHED_REFRESHTIME)
return changes
def check_errors(self):
errors = run_sql("""SELECT id,proc,status FROM schTASK
WHERE status = 'ERROR'
OR status = 'DONE WITH ERRORS'
OR status = 'CERROR'""")
if errors:
error_msgs = []
error_recoverable = True
for e_id, e_proc, e_status in errors:
if run_sql("""UPDATE schTASK
SET status='ERRORS REPORTED'
WHERE id = %s AND (status='CERROR'
OR status='ERROR'
OR status='DONE WITH ERRORS')""", [e_id]):
msg = " #%s %s -> %s" % (e_id, e_proc, e_status)
error_msgs.append(msg)
if e_status in ('ERROR', 'DONE WITH ERRORS'):
error_recoverable = False
if error_msgs:
msg = "BibTask with ERRORS:\n%s" % '\n'.join(error_msgs)
if error_recoverable or CFG_BIBSCHED_NEVER_STOPS:
raise RecoverableError(msg)
else:
raise StandardError(msg)
def calculate_rows(self):
"""Return all the node relevant tasks for the algorithm to work on."""
if not CFG_INSPIRE_SITE:
max_bibupload_priority, min_bibupload_priority = run_sql(
"""SELECT MAX(priority), MIN(priority)
FROM schTASK
WHERE status IN ('WAITING', 'RUNNING', 'SLEEPING',
'ABOUT TO STOP', 'ABOUT TO SLEEP',
'SCHEDULED', 'CONTINUING')
AND proc = 'bibupload'
AND runtime <= NOW()""")[0]
if max_bibupload_priority > min_bibupload_priority:
run_sql(
"""UPDATE schTASK SET priority = %s
WHERE status IN ('WAITING', 'RUNNING', 'SLEEPING',
'ABOUT TO STOP', 'ABOUT TO SLEEP',
'SCHEDULED', 'CONTINUING')
AND proc = 'bibupload'
AND runtime <= NOW()
AND priority < %s""", (max_bibupload_priority,
max_bibupload_priority))
# The bibupload tasks are sorted by id,
# which means by the order they were scheduled
self.node_relevant_bibupload_tasks = Task.from_resultset(run_sql(
"""SELECT id, proc, runtime, status, priority, host, sequenceid
FROM schTASK WHERE status IN ('WAITING', 'SLEEPING')
AND proc = 'bibupload'
AND runtime <= NOW()
ORDER BY FIELD(status, 'SLEEPING', 'WAITING'),
id ASC LIMIT 1""", n=1))
## The other tasks are sorted by priority
self.waiting_tasks_all_nodes = Task.from_resultset(run_sql(
"""SELECT id, proc, runtime, status, priority, host, sequenceid
FROM schTASK WHERE (status = 'WAITING' AND runtime <= NOW())
OR status = 'SLEEPING'
ORDER BY priority DESC, runtime ASC, id ASC"""))
self.sleeping_tasks_all_nodes = Task.from_resultset(run_sql(
"""SELECT id, proc, runtime, status, priority, host, sequenceid
FROM schTASK WHERE status = 'SLEEPING'
ORDER BY priority DESC, runtime ASC, id ASC"""))
self.active_tasks_all_nodes = Task.from_resultset(run_sql(
"""SELECT id, proc, runtime, status, priority, host, sequenceid
FROM schTASK WHERE status IN ('RUNNING', 'CONTINUING',
'SCHEDULED', 'ABOUT TO STOP',
'ABOUT TO SLEEP')"""))
self.mono_tasks_all_nodes = tuple(t for t in
chain(self.waiting_tasks_all_nodes, self.active_tasks_all_nodes)
if is_monotask(t.proc))
## Remove tasks that can not be executed on this host
def filter_by_host(tasks):
return tuple(t for t in tasks if t.host == self.hostname or not t.host)
self.node_active_tasks = filter_by_host(self.active_tasks_all_nodes)
self.node_sleeping_tasks = filter_by_host(self.sleeping_tasks_all_nodes)
self.filter_for_allowed_tasks()
def check_auto_mode(self):
"""Check if the queue is in automatic or manual mode"""
r = run_sql('SELECT value FROM schSTATUS WHERE name = "auto_mode"')
try:
status = int(r[0][0])
except (ValueError, IndexError):
# We insert the missing configuration variable in the DB
run_sql('INSERT INTO schSTATUS (name, value) VALUES ("auto_mode", "1")')
status = 1
if not status:
r = run_sql('SELECT value FROM schSTATUS WHERE name = "resume_after"')
try:
date_string = r[0][0]
except IndexError:
pass
else:
if date_string:
resume_after = datetime(*(time.strptime(date_string, "%Y-%m-%d %H:%M:%S")[0:6]))
if datetime.now() > resume_after:
run_sql('UPDATE schSTATUS SET value = "" WHERE name = "resume_after"')
run_sql('UPDATE schSTATUS SET value = "1" WHERE name = "auto_mode"')
status = 1
return status
def check_for_crashed_tasks(self):
for task in self.node_active_tasks:
Log('Checking %s' % task.id)
pid = get_task_pid(task.id)
if not pid:
Log('Task crashed %s' % task.id)
run_sql("""UPDATE schTASK SET status = 'CERROR'
WHERE id = %%s AND status IN (%s)"""
% ','.join("'%s'" % s for s in ACTIVE_STATUS),
[task.id])
def check_debug_mode(self):
debug_mode = fetch_debug_mode()
if debug_mode and not self.debug:
Log('Switching to debug mode')
elif self.debug and not debug_mode:
Log('Switching out of debug mode')
self.debug = debug_mode
def tick(self):
Log("New bibsched cycle", self.debug)
self.cycles_count += 1
self.check_debug_mode()
if self.cycles_count % 50 == 0:
self.check_for_crashed_tasks()
try:
self.check_errors()
except RecoverableError, msg:
register_emergency('Light emergency from %s: BibTask failed: %s'
% (CFG_SITE_URL, msg))
# Update our tasks list (to know who is running, sleeping, etc.)
self.calculate_rows()
# Let's first handle running tasks running on this node.
for task in self.node_active_tasks:
if self.handle_task(task):
break
else:
# If nothing has changed we can go on to run tasks.
for task in self.node_relevant_waiting_tasks:
if task.proc == 'bibupload' \
and self.node_relevant_bibupload_tasks:
## We switch in bibupload serial mode!
## which means we execute the first next bibupload.
if self.handle_task(self.node_relevant_bibupload_tasks[0]):
## Something has changed
break
elif self.handle_task(task):
## Something has changed
break
else:
time.sleep(CFG_BIBSCHED_REFRESHTIME)
def watch_loop(self):
## Cleaning up scheduled task not run because of bibsched being
## interrupted in the middle.
run_sql("""UPDATE schTASK
SET status = 'WAITING'
WHERE status = 'SCHEDULED'
AND host = %s""", (self.hostname, ))
try:
while True:
auto_mode = self.check_auto_mode()
if auto_mode:
self.tick()
else:
time.sleep(CFG_BIBSCHED_REFRESHTIME)
except Exception, err:
register_exception(alert_admin=True)
try:
register_emergency('Emergency from %s: BibSched halted: %s'
% (CFG_SITE_URL, err))
except NotImplementedError:
pass
raise
def Log(message, debug=None):
if debug is False:
return
log = open(CFG_LOGDIR + "/bibsched.log", "a")
log.write(time.strftime("%Y-%m-%d %H:%M:%S --> ", time.localtime()))
log.write(message)
log.write("\n")
log.close()
def redirect_stdout_and_stderr():
"This function redirects stdout and stderr to bibsched.log and bibsched.err file."
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(CFG_LOGDIR + "/bibsched.log", "a")
sys.stderr = open(CFG_LOGDIR + "/bibsched.err", "a")
return old_stdout, old_stderr
def restore_stdout_and_stderr(stdout, stderr):
sys.stdout = stdout
sys.stderr = stderr
def usage(exitcode=1, msg=""):
"""Prints usage info."""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
sys.stderr.write("""\
Usage: %s [options] [start|stop|restart|monitor|status]
The following commands are available for bibsched:
start start bibsched in background
stop stop running bibtasks and the bibsched daemon safely
halt halt running bibsched while keeping bibtasks running
restart restart running bibsched
monitor enter the interactive monitor
status get report about current status of the queue
purge purge the scheduler queue from old tasks
General options:
-h, --help \t Print this help.
-V, --version \t Print version information.
-q, --quiet \t Quiet mode
-d, --debug \t Write debugging information in bibsched.log
Status options:
-s, --status=LIST\t Which BibTask status should be considered (default is Running,waiting)
-S, --since=TIME\t Since how long time to consider tasks e.g.: 30m, 2h, 1d (default
is all)
-t, --tasks=LIST\t Comma separated list of BibTask to consider (default
\t is all)
Purge options:
-s, --status=LIST\t Which BibTask status should be considered (default is DONE)
-S, --since=TIME\t Since how long time to consider tasks e.g.: 30m, 2h, 1d (default
is %s days)
-t, --tasks=LIST\t Comma separated list of BibTask to consider (default
\t is %s)
""" % (sys.argv[0], CFG_BIBSCHED_GC_TASKS_OLDER_THAN, ','.join(CFG_BIBSCHED_GC_TASKS_TO_REMOVE + CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE)))
sys.exit(exitcode)
pidfile = os.path.join(CFG_PREFIX, 'var', 'run', 'bibsched.pid')
def error(msg):
print >> sys.stderr, "error: %s" % msg
sys.exit(1)
def warning(msg):
print >> sys.stderr, "warning: %s" % msg
def server_pid(ping_the_process=True, check_is_really_bibsched=True):
# The pid must be stored on the filesystem
try:
pid = int(open(pidfile).read())
except IOError:
return None
if ping_the_process:
# Even if the pid is available, we check if it corresponds to an
# actual process, as it might have been killed externally
try:
os.kill(pid, signal.SIGCONT)
except OSError:
warning("pidfile %s found referring to pid %s which is not running" % (pidfile, pid))
return None
if check_is_really_bibsched:
output = run_shell_command("ps p %s -o args=", (str(pid), ))[1]
if not 'bibsched' in output:
warning("pidfile %s found referring to pid %s which does not correspond to bibsched: cmdline is %s" % (pidfile, pid, output))
return None
return pid
def write_server_pid(pid):
open(pidfile, 'w').write('%d' % pid)
def start(verbose=True, debug=False):
""" Fork this process in the background and start processing
requests. The process PID is stored in a pid file, so that it can
be stopped later on."""
if verbose:
sys.stdout.write("starting bibsched: ")
sys.stdout.flush()
pid = server_pid(ping_the_process=False)
if pid:
pid2 = server_pid()
if pid2:
error("another instance of bibsched (pid %d) is running" % pid2)
else:
warning("%s exist but the corresponding bibsched (pid %s) seems not be running" % (pidfile, pid))
warning("erasing %s and continuing..." % (pidfile, ))
os.remove(pidfile)
if debug:
pid = os.getpid()
write_server_pid(pid)
else:
# start the child process using the "double fork" technique
pid = os.fork()
if pid > 0:
sys.exit(0)
os.setsid()
os.chdir('/')
pid = os.fork()
if pid > 0:
if verbose:
sys.stdout.write('pid %d\n' % pid)
Log("daemon started (pid %d)" % pid)
write_server_pid(pid)
return
sys.stdin.close()
redirect_stdout_and_stderr()
sched = BibSched(debug=debug)
try:
sched.watch_loop()
finally:
try:
os.remove(pidfile)
except OSError:
pass
def halt(verbose=True, soft=False, debug=False): # pylint: disable=W0613
pid = server_pid()
if not pid:
if soft:
print >> sys.stderr, 'bibsched seems not to be running.'
return
else:
error('bibsched seems not to be running.')
try:
os.kill(pid, signal.SIGKILL)
except OSError:
print >> sys.stderr, 'no bibsched process found'
Log("daemon stopped (pid %d)" % pid)
if verbose:
print "stopping bibsched: pid %d" % pid
os.unlink(pidfile)
def write_message(msg, stream=None, verbose=1): # pylint: disable=W0613
"""Write message and flush output stream (may be sys.stdout or sys.stderr).
Useful for debugging stuff."""
if stream is None:
stream = sys.stdout
if msg:
if stream == sys.stdout or stream == sys.stderr:
stream.write(time.strftime("%Y-%m-%d %H:%M:%S --> ",
time.localtime()))
try:
stream.write("%s\n" % msg)
except UnicodeEncodeError:
stream.write("%s\n" % msg.encode('ascii', 'backslashreplace'))
stream.flush()
else:
sys.stderr.write("Unknown stream %s. [must be sys.stdout or sys.stderr]\n" % stream)
def report_queue_status(verbose=True, status=None, since=None, tasks=None): # pylint: disable=W0613
"""
Report about the current status of BibSched queue on standard output.
"""
def report_about_processes(status='RUNNING', since=None, tasks=None):
"""
Helper function to report about processes with the given status.
"""
if tasks is None:
task_query = ''
else:
task_query = 'AND proc IN (%s)' % (
','.join([repr(real_escape_string(task)) for task in tasks]))
if since is None:
since_query = ''
else:
# We're not interested in future task
if since.startswith('+') or since.startswith('-'):
since = since[1:]
since = '-' + since
since_query = "AND runtime >= '%s'" % get_datetime(since)
res = run_sql("""SELECT id, proc, runtime, status, priority, host,
sequenceid
FROM schTASK WHERE status=%%s %(task_query)s
%(since_query)s ORDER BY id ASC""" % {
'task_query': task_query,
'since_query' : since_query},
(status,))
write_message("%s processes: %d" % (status, len(res)))
for t in Task.from_resultset(res):
write_message(' * %s' % t)
return
write_message("BibSched queue status report for %s:" % gethostname())
daemon_status = server_pid() and "UP" or "DOWN"
write_message("BibSched daemon status: %s" % daemon_status)
if run_sql("show tables like 'schSTATUS'"):
r = run_sql('SELECT value FROM schSTATUS WHERE name = "auto_mode"')
try:
mode = bool(int(r[0][0]))
except (ValueError, IndexError):
mode = True
else:
mode = False
mode_str = mode and 'AUTOMATIC' or 'MANUAL'
write_message("BibSched queue running mode: %s" % mode_str)
if status is None:
report_about_processes('Running', since, tasks)
report_about_processes('Waiting', since, tasks)
else:
for state in status:
report_about_processes(state, since, tasks)
write_message("Done.")
def restart(verbose=True, debug=False):
halt(verbose, soft=True, debug=debug)
start(verbose, debug=debug)
def stop(verbose=True, debug=False):
"""
* Stop bibsched
* Send stop signal to all the running tasks
* wait for all the tasks to stop
* return
"""
if verbose:
print "Stopping BibSched if running"
halt(verbose, soft=True, debug=debug)
run_sql("UPDATE schTASK SET status='WAITING' WHERE status='SCHEDULED'")
res = run_sql("""SELECT id, status FROM schTASK
WHERE status NOT LIKE 'DONE'
AND status NOT LIKE '%_DELETED'
AND (status='RUNNING'
OR status='ABOUT TO STOP'
OR status='ABOUT TO SLEEP'
OR status='SLEEPING'
OR status='CONTINUING')""")
if verbose:
print "Stopping all running BibTasks"
for task_id, status in res:
if status == 'SLEEPING':
bibsched_send_signal(task_id, signal.SIGCONT)
time.sleep(CFG_BIBSCHED_REFRESHTIME)
bibsched_set_status(task_id, 'ABOUT TO STOP')
while run_sql("""SELECT id FROM schTASK
WHERE status NOT LIKE 'DONE'
AND status NOT LIKE '%_DELETED'
AND (status='RUNNING'
OR status='ABOUT TO STOP'
OR status='ABOUT TO SLEEP'
OR status='SLEEPING'
OR status='CONTINUING')"""):
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(CFG_BIBSCHED_REFRESHTIME)
if verbose:
print "\nStopped"
Log("BibSched and all BibTasks stopped")
def main():
from invenio.bibsched_monitor import monitor
from invenio.bibtask import check_running_process_user
check_running_process_user()
verbose = True
status = None
since = None
tasks = None
debug = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hVdqS:s:t:", [
"help", "version", "debug", "quiet", "since=", "status=", "task="])
except getopt.GetoptError, err:
Log("Error: %s" % err)
usage(1, err)
for opt, arg in opts:
if opt in ["-h", "--help"]:
usage(0)
elif opt in ["-V", "--version"]:
print CFG_VERSION
sys.exit(0)
elif opt in ['-q', '--quiet']:
verbose = False
elif opt in ['-s', '--status']:
status = arg.split(',')
elif opt in ['-S', '--since']:
since = arg
elif opt in ['-t', '--task']:
tasks = arg.split(',')
elif opt in ['-d', '--debug']:
debug = True
else:
usage(1)
try:
cmd = args[0]
except IndexError:
cmd = 'monitor'
try:
if cmd in ('status', 'purge'):
{'status' : report_queue_status,
'purge' : gc_tasks}[cmd](verbose, status, since, tasks)
else:
{'start': start,
'halt': halt,
'stop': stop,
'restart': restart,
'monitor': monitor}[cmd](verbose=verbose, debug=debug)
except KeyError:
usage(1, 'unkown command: %s' % cmd)
if __name__ == '__main__':
main()
| gpl-2.0 |
nhicher/ansible | lib/ansible/modules/windows/win_template.py | 52 | 4306 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a virtual module that is entirely implemented server side
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_template
version_added: "1.9.2"
short_description: Templates a file out to a remote server
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
required: yes
dest:
description:
- Location to render the template to on the remote machine.
required: yes
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
choices: [ '\n', '\r', '\r\n' ]
default: '\r\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
default: '}}'
version_added: '2.4'
trim_blocks:
description:
- If this is set to C(yes) the first newline after a block is removed (block, not variable tag!).
type: bool
default: 'no'
version_added: '2.4'
force:
description:
- If C(yes), will replace the remote file when contents are different
from the source.
- If C(no), the file will only be transferred if the destination does
not exist.
type: bool
default: 'yes'
version_added: '2.4'
notes:
- For other platforms you can use M(template) which uses '\n' as C(newline_sequence).
- Templates are loaded with C(trim_blocks=True).
- Beware fetching files from windows machines when creating templates
because certain tools, such as Powershell ISE, and regedit's export facility
add a Byte Order Mark as the first character of the file, which can cause tracebacks.
- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>) on Linux.
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: no)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Create a file from a Jinja2 template
win_template:
src: /mytemplates/file.conf.j2
dest: C:\Temp\file.conf
- name: Create a Unix-style file from a Jinja2 template
win_template:
src: unix/config.conf.j2
dest: C:\share\unix\config.conf
newline_sequence: '\n'
'''
| gpl-3.0 |
Kitware/girder | girder/models/assetstore.py | 2 | 6444 | # -*- coding: utf-8 -*-
import datetime
from .model_base import Model
from girder.constants import AssetstoreType, SortDir
from girder.exceptions import ValidationException, GirderException, NoAssetstoreAdapter
from girder.utility import assetstore_utilities
from girder.utility.abstract_assetstore_adapter import AbstractAssetstoreAdapter
class Assetstore(Model):
"""
This model represents an assetstore, an abstract repository of Files.
"""
def initialize(self):
self.name = 'assetstore'
def validate(self, doc):
# Ensure no duplicate names
q = {'name': doc['name']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
duplicate = self.findOne(q, fields=['_id'])
if duplicate is not None:
raise ValidationException('An assetstore with that name already '
'exists.', 'name')
# Name must not be empty
if not doc['name']:
raise ValidationException('Name must not be empty.', 'name')
# Adapter classes validate each type internally
adapter = assetstore_utilities.getAssetstoreAdapter(doc, instance=False)
adapter.validateInfo(doc)
# If no current assetstore exists yet, set this one as the current.
current = self.findOne({'current': True}, fields=['_id'])
if current is None:
doc['current'] = True
if 'current' not in doc:
doc['current'] = False
# If we are setting this as current, we should unmark all other
# assetstores that have the current flag.
if doc['current'] is True:
self.update({'current': True}, {'$set': {'current': False}})
return doc
def remove(self, assetstore, **kwargs):
"""
Delete an assetstore. If there are any files within this assetstore,
a validation exception is raised.
:param assetstore: The assetstore document to delete.
:type assetstore: dict
"""
from .file import File
files = File().findOne({'assetstoreId': assetstore['_id']})
if files is not None:
raise ValidationException('You may not delete an assetstore that contains files.')
# delete partial uploads before we delete the store.
try:
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
adapter.untrackedUploads([], delete=True)
except (NoAssetstoreAdapter, ValidationException):
# this assetstore is currently unreachable, so skip this step
pass
# now remove the assetstore
super().remove(assetstore)
# If after removal there is no current assetstore, then pick a
# different assetstore to be the current one.
current = self.findOne({'current': True})
if current is None:
first = self.findOne(sort=[('created', SortDir.DESCENDING)])
if first is not None:
first['current'] = True
self.save(first)
def list(self, limit=0, offset=0, sort=None):
"""
List all assetstores.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:returns: List of users.
"""
cursor = self.find({}, limit=limit, offset=offset, sort=sort)
for assetstore in cursor:
self.addComputedInfo(assetstore)
yield assetstore
def addComputedInfo(self, assetstore):
"""
Add all runtime-computed properties about an assetstore to its document.
:param assetstore: The assetstore object.
:type assetstore: dict
"""
from .file import File
try:
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
except NoAssetstoreAdapter:
# If the adapter doesn't exist, use the abstract adapter, since
# this will just give the default capacity information
adapter = AbstractAssetstoreAdapter(assetstore)
assetstore['capacity'] = adapter.capacityInfo()
assetstore['hasFiles'] = File().findOne({'assetstoreId': assetstore['_id']}) is not None
def createFilesystemAssetstore(self, name, root, perms=None):
return self.save({
'type': AssetstoreType.FILESYSTEM,
'created': datetime.datetime.utcnow(),
'name': name,
'root': root,
'perms': perms
})
def createGridFsAssetstore(self, name, db, mongohost=None,
replicaset=None):
return self.save({
'type': AssetstoreType.GRIDFS,
'created': datetime.datetime.utcnow(),
'name': name,
'db': db,
'mongohost': mongohost,
'replicaset': replicaset
})
def createS3Assetstore(self, name, bucket, accessKeyId, secret, prefix='',
service='', readOnly=False, region=None, inferCredentials=False,
serverSideEncryption=False):
return self.save({
'type': AssetstoreType.S3,
'created': datetime.datetime.utcnow(),
'name': name,
'accessKeyId': accessKeyId,
'secret': secret,
'readOnly': readOnly,
'prefix': prefix,
'bucket': bucket,
'service': service,
'region': region,
'inferCredentials': inferCredentials,
'serverSideEncryption': serverSideEncryption
})
def getCurrent(self):
"""
Returns the current assetstore. If none exists, this will raise a 500
exception.
"""
current = self.findOne({'current': True})
if current is None:
raise GirderException(
'No current assetstore is set.',
'girder.model.assetstore.no-current-assetstore')
return current
def importData(self, assetstore, parent, parentType, params, progress,
user, **kwargs):
"""
Calls the importData method of the underlying assetstore adapter.
"""
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
return adapter.importData(
parent=parent, parentType=parentType, params=params,
progress=progress, user=user, **kwargs)
| apache-2.0 |
jmvrbanac/Specter | tests/test_util.py | 1 | 1929 | from unittest import TestCase
from specter import util, spec, metadata, skip
class TestSpecterUtil(TestCase):
def test_convert_camelcase_error(self):
result = util.convert_camelcase(None)
self.assertEqual(result, '')
def test_get_numbered_source_error(self):
result = util.get_numbered_source(None, 1)
self.assertIn('Error finding traceback!', result)
def test_find_by_metadata(self):
wrap1 = spec.CaseWrapper(None, None, metadata={'test': 'smoke'})
wrap2 = spec.CaseWrapper(None, None, metadata={'test': 'bam'})
test_list = {wrap1.id: wrap1, wrap2.id: wrap2}
found = util.find_by_metadata({'test': 'smoke'}, test_list)
self.assertEqual(len(found), 1)
self.assertIn(wrap1.id, found)
def test_extract_metadata(self):
@metadata(type='testing')
def sample_func():
pass
func, meta = util.extract_metadata(sample_func)
self.assertEqual(func.__name__, 'sample_func')
self.assertEqual(meta.get('type'), 'testing')
def test_extract_metadata_w_skip_before(self):
@skip('testing_skip')
@metadata(type='testing')
def sample_func():
pass
func, meta = util.extract_metadata(sample_func)
self.assertEqual(meta.get('type'), 'testing')
def test_extract_metadata_w_skip_after(self):
@metadata(type='testing')
@skip('testing_skip')
def sample_func():
pass
func, meta = util.extract_metadata(sample_func)
self.assertEqual(meta.get('type'), 'testing')
def test_get_real_last_traceback_w_exception_in_old_style_class(self):
class OldStyleClass:
def throw(self):
raise Exception('exception in OldStyleClass')
try:
OldStyleClass().throw()
except Exception as e:
util.get_real_last_traceback(e)
| mit |
cfg2015/EPT-2015-2 | addons/hr_timesheet/wizard/__init__.py | 381 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sign_in_out
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ztane/jaspyx | jaspyx/visitor/base_visitor.py | 1 | 4005 | from __future__ import absolute_import, division, print_function
import ast
from jaspyx.context.module import ModuleContext
from jaspyx.compat import basestring
class BaseVisitor(ast.NodeVisitor):
def __init__(self, path, registry, indent=0):
self.path = path
self.registry = registry
self.default_indent = indent
self.stack = []
self.module = None
def push(self, context):
"""
Push a new context on the stack.
:param context: An instance of one of the available context from
the jaspyx.context package.
"""
self.stack.append(context)
def pop(self):
"""
Pop the current context from the stack and append it to the previous
context as content.
"""
self.stack[-2].add(self.stack.pop())
def output(self, s):
"""
Append literal output to the current context.
This will also automatically prepend indention.
"""
self.stack[-1].add(s)
def indent(self):
self.output(' ' * (self.stack[-1].indent + 2))
def finish(self):
self.output(';\n')
def group(self, values, prefix='(', infix=' ', infix_node=None, suffix=')'):
"""
Append a group of values with a configurable prefix, suffix and infix
to the output buffer. This is used to render a list of AST nodes with
fixed surroundings.
:param values: A list of AST nodes.
:param prefix: Text to prepend before the output.
:param infix: Text to put between the rendered AST nodes. If
infix_node is also specified, infix_node will be
surrounded by infix.
:param infix_node: An AST node to render in between the values.
:param suffix: Text to append after the output.
"""
self.output(prefix)
first = True
for value in values:
if not first:
if infix:
self.output(infix)
if infix_node is not None:
self.visit(infix_node)
if infix:
self.output(infix)
else:
first = False
if isinstance(value, basestring):
self.output(value)
else:
self.visit(value)
self.output(suffix)
def block(self, nodes, context=None):
"""
Process a block of AST nodes and treat all of them as statements. It
will also control automatic indention and appending semicolons and
carriage returns to the output. Can optionally push a context on the
stack before processing and pop it after it's done.
:param nodes: A list of AST nodes to render.
:param context: An optional context to push / pop.
"""
if context is not None:
self.push(context)
for node in nodes:
self.visit(node)
if context is not None:
self.pop()
def visit_Module(self, node):
"""
Handler for top-level AST nodes. Sets this visitor's module
attribute to a newly generated ModuleContext.
:param node: The current AST node being visited.
"""
self.module = ModuleContext()
self.module.indent = self.default_indent
self.push(self.module)
self.block(node.body)
def visit_Expr(self, node):
self.indent()
self.visit(node.value)
self.finish()
def visit_Pass(self, node):
pass
def visit_NameConstant(self, node):
self.visit(ast.Name(str(node.value), ast.Load()))
def generic_visit(self, node):
"""
Generic AST node handlers. Raises an exception. This is called by
ast.NodeVisitor when no suitable visit_<name> method is found.
:param node: The current AST node being visited.
"""
raise NotImplementedError('Unsupported AST node %s' % node)
| mit |
grpc/grpc | src/python/grpcio_tests/tests/csds/test_csds.py | 5 | 4318 | # Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple test to ensure that the Python wrapper can get xDS config."""
import logging
import os
import time
from six.moves import queue
import unittest
from concurrent.futures import ThreadPoolExecutor
import grpc
import grpc_csds
from google.protobuf import json_format
try:
from envoy.service.status.v3 import csds_pb2, csds_pb2_grpc
except ImportError:
from src.proto.grpc.testing.xds.v3 import csds_pb2, csds_pb2_grpc
_DUMMY_XDS_ADDRESS = 'xds:///foo.bar'
_DUMMY_BOOTSTRAP_FILE = """
{
\"xds_servers\": [
{
\"server_uri\": \"fake:///xds_server\",
\"channel_creds\": [
{
\"type\": \"fake\"
}
],
\"server_features\": [\"xds_v3\"]
}
],
\"node\": {
\"id\": \"python_test_csds\",
\"cluster\": \"test\",
\"metadata\": {
\"foo\": \"bar\"
},
\"locality\": {
\"region\": \"corp\",
\"zone\": \"svl\",
\"sub_zone\": \"mp3\"
}
}
}\
"""
class TestCsds(unittest.TestCase):
def setUp(self):
os.environ['GRPC_XDS_BOOTSTRAP_CONFIG'] = _DUMMY_BOOTSTRAP_FILE
self._server = grpc.server(ThreadPoolExecutor())
port = self._server.add_insecure_port('localhost:0')
grpc_csds.add_csds_servicer(self._server)
self._server.start()
self._channel = grpc.insecure_channel('localhost:%s' % port)
self._stub = csds_pb2_grpc.ClientStatusDiscoveryServiceStub(
self._channel)
def tearDown(self):
self._channel.close()
self._server.stop(0)
os.environ.pop('GRPC_XDS_BOOTSTRAP_CONFIG', None)
def get_xds_config_dump(self):
return self._stub.FetchClientStatus(csds_pb2.ClientStatusRequest())
def test_has_node(self):
resp = self.get_xds_config_dump()
self.assertEqual(1, len(resp.config))
self.assertEqual(4, len(resp.config[0].xds_config))
self.assertEqual('python_test_csds', resp.config[0].node.id)
self.assertEqual('test', resp.config[0].node.cluster)
def test_no_lds_found(self):
dummy_channel = grpc.insecure_channel(_DUMMY_XDS_ADDRESS)
# Force the XdsClient to initialize and request a resource
with self.assertRaises(grpc.RpcError) as rpc_error:
dummy_channel.unary_unary('')(b'', wait_for_ready=False)
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
rpc_error.exception.code())
# The resource request will fail with DOES_NOT_EXIST (after 15s)
while True:
resp = self.get_xds_config_dump()
config = json_format.MessageToDict(resp)
ok = False
try:
for xds_config in config["config"][0]["xdsConfig"]:
if "listenerConfig" in xds_config:
listener = xds_config["listenerConfig"][
"dynamicListeners"][0]
if listener['clientStatus'] == 'DOES_NOT_EXIST':
ok = True
break
except KeyError as e:
logging.debug("Invalid config: %s\n%s: %s", config, type(e), e)
pass
if ok:
break
time.sleep(1)
dummy_channel.close()
class TestCsdsStream(TestCsds):
def get_xds_config_dump(self):
if not hasattr(self, 'request_queue'):
request_queue = queue.Queue()
response_iterator = self._stub.StreamClientStatus(
iter(request_queue.get, None))
request_queue.put(csds_pb2.ClientStatusRequest())
return next(response_iterator)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
| apache-2.0 |
ekmartin/abakaffe-irc | abakaffe.py | 1 | 4298 | import irc.bot
import irc.strings
import os
import urllib2
import simplejson
from urlparse import urljoin
from datetime import datetime
class AbakusCoffeeBot(irc.bot.SingleServerIRCBot):
API_URL = "http://kaffe.abakus.no/api/"
def __init__(self, channelList, nickname, server, port=6667):
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname.lower())
self.channelList = channelList
def get_file(self, api_base, api_module):
try:
url = urljoin(api_base, api_module)
req = urllib2.Request(url)
opener = urllib2.build_opener()
f = opener.open(req)
return f
except:
return ""
def get_status(self, time_delta):
try:
message = ""
if int(time_delta.days):
message += "Det er ingen som har traktet kaffe i dag."
else:
hours = time_delta.seconds // (60 * 60)
minutes = (time_delta.seconds // 60) % 60
if not hours and not minutes:
return "Kaffen ble nettopp traktet!"
message += "Kaffen ble sist traktet for "
if hours:
if hours == 1:
message += "en time"
else:
message += str(hours) + " timer"
if hours and minutes:
message += " og "
if minutes:
if minutes == 1:
message += "ett minutt "
else:
message += str(minutes) + " minutter "
message += "siden."
return message
except:
return ""
def print_kaffe(self, target):
try:
connection = self.connection
f = self.get_file(self.API_URL, 'status')
status_json = simplejson.load(f)
coffee = status_json['coffee']
on = coffee['status']
last_start = coffee['last_start']
last_start = datetime.strptime(last_start, "%Y-%m-%d %H:%M")
time_delta = datetime.now() - last_start
if on:
connection.privmsg(target, "Kaffetrakteren er startet!")
connection.privmsg(target, self.get_status(time_delta))
except:
pass
def on_nicknameinuse(self, connection, event):
connection.nick(connection.get_nickname() + "_")
def on_privmsg(self, connection, event):
command = event.arguments[0].split()
if command[0] == "!join":
if len(command) > 1:
connection.join(command[1])
elif command[0] == "!kaffe":
self.print_kaffe(event.target)
def on_welcome(self, connection, event):
for chan in self.channelList:
try:
connection.join(chan)
except:
pass
def on_pubmsg(self, connection, event):
command = event.arguments[0].split()
if command[0] == "!kaffe":
self.print_kaffe(event.target)
elif command[0] == "!join":
if len(command) > 1:
connection.join(command[1])
return
def main():
while True:
try:
import sys
if len(sys.argv) != 4:
print("Usage: python abacoffee.py <server[:port]> <channel1,channel2,channel3..> <nickname>")
sys.exit(1)
s = sys.argv[1].split(":", 1)
channelList = sys.argv[2].split(",")
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
nickname = sys.argv[3]
for i,chan in enumerate(channelList):
channelList[i] = '#'+chan
print channelList, nickname, server,
bot = AbakusCoffeeBot(channelList, nickname, server, port)
bot.start()
except KeyboardInterrupt:
print "Exiting."
raise
except:
pass
if __name__ == "__main__":
main() | mit |
sbidoul/odoo | addons/l10n_cn/__init__.py | 339 | 1061 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2007-2014 Jeff Wang(<http://[email protected]>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
LUTAN/tensorflow | tensorflow/contrib/tfprof/python/tools/tfprof/print_model_analysis_test.py | 25 | 7217 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'device_regexes': ['.*'],
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'viz': False
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testPrintModelAnalysis(self):
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = TEST_OPTIONS['max_depth']
opts.min_bytes = TEST_OPTIONS['min_bytes']
opts.min_micros = TEST_OPTIONS['min_micros']
opts.min_params = TEST_OPTIONS['min_params']
opts.min_float_ops = TEST_OPTIONS['min_float_ops']
for p in TEST_OPTIONS['device_regexes']:
opts.device_regexes.append(p)
opts.order_by = TEST_OPTIONS['order_by']
for p in TEST_OPTIONS['account_type_regexes']:
opts.account_type_regexes.append(p)
for p in TEST_OPTIONS['start_name_regexes']:
opts.start_name_regexes.append(p)
for p in TEST_OPTIONS['trim_name_regexes']:
opts.trim_name_regexes.append(p)
for p in TEST_OPTIONS['show_name_regexes']:
opts.show_name_regexes.append(p)
for p in TEST_OPTIONS['hide_name_regexes']:
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = TEST_OPTIONS['account_displayed_op_only']
for p in TEST_OPTIONS['select']:
opts.select.append(p)
opts.viz = TEST_OPTIONS['viz']
with session.Session() as sess, ops.device('/cpu:0'):
_ = self._BuildSmallModel()
tfprof_pb = tfprof_output_pb2.TFProfNode()
tfprof_pb.ParseFromString(
print_mdl.PrintModelAnalysis(sess.graph.as_graph_def(
).SerializeToString(), b'', b'', b'scope', opts.SerializeToString()))
expected_pb = tfprof_output_pb2.TFProfNode()
text_format.Merge(r"""name: "_TFProfRoot"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "Conv2D"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW"
exec_micros: 0
requested_bytes: 0
parameters: 648
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
device: "/device:CPU:0"
children {
name: "DW/Assign"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal/RandomStandardNormal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mean"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mul"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/shape"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/stddev"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/read"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "zeros"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0""", expected_pb)
self.assertEqual(expected_pb, tfprof_pb)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Arc-Team/android_kernel_samsung_afyonlte | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
fooelisa/ansible-modules-extras | notification/slack.py | 1 | 7642 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ramon de la Fuente <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: slack
short_description: Send Slack notifications
description:
- The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
version_added: 1.6
author: Ramon de la Fuente <[email protected]>
options:
domain:
description:
- Slack (sub)domain for your environment without protocol. (i.e.
C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may
be ignored. See token documentation for information.
required: false
token:
description:
- Slack integration token. This authenticates you to the slack service.
Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
1.8 and above, ansible adapts to the new slack API where tokens look
like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
are in the new format then slack will ignore any value of domain. If
the token is in the old format the domain is required. Ansible has no
control of when slack will get rid of the old API. When slack does
that the old format will stop working.
required: true
msg:
description:
- Message to send.
required: true
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
required: false
username:
description:
- This is the sender of the message.
required: false
default: ansible
icon_url:
description:
- Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico))
required: false
icon_emoji:
description:
- Emoji for the message sender. See Slack documentation for options.
(if I(icon_emoji) is set, I(icon_url) will not be used)
required: false
link_names:
description:
- Automatically create links for channels and usernames in I(msg).
required: false
default: 1
choices:
- 1
- 0
parse:
description:
- Setting for the message parser at Slack
required: false
choices:
- 'full'
- 'none'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices:
- 'yes'
- 'no'
color:
version_added: 2.0
description:
- Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
required: false
default: 'normal'
choices:
- 'normal'
- 'good'
- 'warning'
- 'danger'
"""
EXAMPLES = """
- name: Send notification message via Slack
local_action:
module: slack
domain: future500.slack.com
token: thetokengeneratedbyslack
msg: "{{ inventory_hostname }} completed"
- name: Send notification message via Slack all options
local_action:
module: slack
domain: future500.slack.com
token: thetokengeneratedbyslack
msg: "{{ inventory_hostname }} completed"
channel: "#ansible"
username: "Ansible on {{ inventory_hostname }}"
icon_url: "http://www.example.com/some-image-file.png"
link_names: 0
parse: 'none'
- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
slack:
domain: future500.slack.com
token: thetokengeneratedbyslack
msg: "{{ inventory_hostname }} is alive!"
color: good
username: ""
icon_url: ""
"""
OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color):
if color == 'normal':
payload = dict(text=text)
else:
payload = dict(attachments=[dict(text=text, color=color)])
if channel is not None:
if (channel[0] == '#') or (channel[0] == '@')
payload['channel'] = channel
else
payload['channel'] = '#'+channel
if username is not None:
payload['username'] = username
if icon_emoji is not None:
payload['icon_emoji'] = icon_emoji
else:
payload['icon_url'] = icon_url
if link_names is not None:
payload['link_names'] = link_names
if parse is not None:
payload['parse'] = parse
payload="payload=" + module.jsonify(payload)
return payload
def do_notify_slack(module, domain, token, payload):
if token.count('/') >= 2:
# New style token
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token)
else:
if not domain:
module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook")
slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
response, info = fetch_url(module, slack_incoming_webhook, data=payload)
if info['status'] != 200:
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
def main():
module = AnsibleModule(
argument_spec = dict(
domain = dict(type='str', required=False, default=None),
token = dict(type='str', required=True),
msg = dict(type='str', required=True),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'),
icon_emoji = dict(type='str', default=None),
link_names = dict(type='int', default=1, choices=[0,1]),
parse = dict(type='str', default=None, choices=['none', 'full']),
validate_certs = dict(default='yes', type='bool'),
color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger'])
)
)
domain = module.params['domain']
token = module.params['token']
text = module.params['msg']
channel = module.params['channel']
username = module.params['username']
icon_url = module.params['icon_url']
icon_emoji = module.params['icon_emoji']
link_names = module.params['link_names']
parse = module.params['parse']
color = module.params['color']
payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color)
do_notify_slack(module, domain, token, payload)
module.exit_json(msg="OK")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
munhanha/mtg-random | django/middleware/http.py | 154 | 1696 | from django.core.exceptions import MiddlewareNotUsed
from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
| bsd-3-clause |
miconof/CouchPotatoServer | couchpotato/core/downloaders/blackhole.py | 41 | 7378 | from __future__ import with_statement
import os
import traceback
from couchpotato.core._base.downloader.main import DownloaderBase
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import getDownloadDir
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'Blackhole'
class Blackhole(DownloaderBase):
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
# The folder needs to exist
if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('protocol'))
else:
try:
# Filedata can be empty, which probably means it a magnet link
if not filedata or len(filedata) < 50:
try:
if data.get('protocol') == 'torrent_magnet':
filedata = self.magnetToTorrent(data.get('url'))
data['protocol'] = 'torrent'
except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
# If it's still empty, don't know what to do!
if not filedata or len(filedata) < 50:
log.error('No nzb/torrent available: %s', data.get('url'))
return False
# Create filename with imdb id and other nice stuff
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
# People want thinks nice and tidy, create a subdir
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
if not os.path.exists(new_path):
os.makedirs(new_path)
full_path = os.path.join(new_path, file_name)
except:
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
# Make sure the file doesn't exist yet, no need in overwriting it
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(full_path, Env.getPermission('file'))
return self.downloadReturnId('')
else:
log.info('File %s already exists.', full_path)
return self.downloadReturnId('')
except:
log.error('Failed to download to blackhole %s', traceback.format_exc())
pass
except:
log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
return False
return False
def test(self):
""" Test and see if the directory is writable
:return: boolean
"""
directory = self.conf('directory')
if directory and os.path.isdir(directory):
test_file = sp(os.path.join(directory, 'couchpotato_test.txt'))
# Check if folder is writable
self.createFile(test_file, 'This is a test file')
if os.path.isfile(test_file):
os.remove(test_file)
return True
return False
def getEnabledProtocol(self):
""" What protocols is this downloaded used for
:return: list with protocols
"""
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual = False, data = None):
""" Check if protocol is used (and enabled)
:param manual: The user has clicked to download a link through the webUI
:param data: dict returned from provider
Contains the release information
:return: boolean
"""
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_protocol.append(data.get('protocol'))
return super(Blackhole, self).isEnabled(manual, data) and \
((self.conf('use_for') in for_protocol))
config = [{
'name': 'blackhole',
'order': 30,
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'blackhole',
'label': 'Black hole',
'description': 'Download the NZB/Torrent to a specific folder. <em>Note: Seeding and copying/linking features do <strong>not</strong> work with Black hole</em>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': True,
'type': 'enabler',
'radio_group': 'nzb,torrent',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Directory where the .nzb (or .torrent) file is saved to.',
'default': getDownloadDir()
},
{
'name': 'use_for',
'label': 'Use for',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'create_subdir',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Create a sub directory when saving the .nzb (or .torrent).',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]
| gpl-3.0 |
kilon/sverchok | old_nodes/circle.py | 4 | 4642 | from math import sin, cos, pi, degrees, radians
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (fullList, match_long_repeat, updateNode,
SvSetSocketAnyType, SvGetSocketAnyType)
class CircleNode(bpy.types.Node, SverchCustomTreeNode):
''' Circle '''
bl_idname = 'CircleNode'
bl_label = 'Circle'
bl_icon = 'OUTLINER_OB_EMPTY'
rad_ = FloatProperty(name='rad_', description='Radius',
default=1.0, options={'ANIMATABLE'},
update=updateNode)
vert_ = IntProperty(name='vert_', description='Vertices',
default=24, min=3, options={'ANIMATABLE'},
update=updateNode)
degr_ = FloatProperty(name='degr_', description='Degrees',
default=360, min=0, max=360,
options={'ANIMATABLE'}, update=updateNode)
mode_ = BoolProperty(name='mode_', description='Mode',
default=0, options={'ANIMATABLE'},
update=updateNode)
def sv_init(self, context):
self.inputs.new('StringsSocket', "Radius", "Radius")
self.inputs.new('StringsSocket', "Nº Vertices", "Nº Vertices")
self.inputs.new('StringsSocket', "Degrees", "Degrees")
self.outputs.new('VerticesSocket', "Vertices", "Vertices")
self.outputs.new('StringsSocket', "Edges", "Edges")
self.outputs.new('StringsSocket', "Polygons", "Polygons")
def draw_buttons(self, context, layout):
layout.prop(self, "rad_", text="Radius")
layout.prop(self, "vert_", text="Nº Vert")
layout.prop(self, "degr_", text="Degrees")
layout.prop(self, "mode_", text="Mode")
def make_verts(self, Angle, Vertices, Radius):
if Angle < 360:
theta = Angle/(Vertices-1)
else:
theta = Angle/Vertices
listVertX = []
listVertY = []
for i in range(Vertices):
listVertX.append(Radius*cos(radians(theta*i)))
listVertY.append(Radius*sin(radians(theta*i)))
if Angle < 360 and self.mode_ == 0:
sigma = radians(Angle)
listVertX[-1] = Radius*cos(sigma)
listVertY[-1] = Radius*sin(sigma)
elif Angle < 360 and self.mode_ == 1:
listVertX.append(0.0)
listVertY.append(0.0)
X = listVertX
Y = listVertY
Z = [0.0]
max_num = max(len(X), len(Y), len(Z))
fullList(X, max_num)
fullList(Y, max_num)
fullList(Z, max_num)
points = list(zip(X, Y, Z))
return points
def make_edges(self, Vertices, Angle):
listEdg = [(i, i+1) for i in range(Vertices-1)]
if Angle < 360 and self.mode_ == 1:
listEdg.append((0, Vertices))
listEdg.append((Vertices-1, Vertices))
else:
listEdg.append((0, Vertices-1))
return listEdg
def make_faces(self, Angle, Vertices):
listPlg = list(range(Vertices))
if Angle < 360 and self.mode_ == 1:
listPlg.insert(0, Vertices)
return [listPlg]
def process(self):
# inputs
if self.inputs['Radius'].links:
Radius = SvGetSocketAnyType(self, self.inputs['Radius'])[0]
else:
Radius = [self.rad_]
if self.inputs['Nº Vertices'].links:
Vertices = SvGetSocketAnyType(self, self.inputs['Nº Vertices'])[0]
Vertices = list(map(lambda x: max(3, int(x)), Vertices))
else:
Vertices = [self.vert_]
if self.inputs['Degrees'].links:
Angle = SvGetSocketAnyType(self, self.inputs['Degrees'])[0]
Angle = list(map(lambda x: min(360, max(0, x)), Angle))
else:
Angle = [self.degr_]
parameters = match_long_repeat([Angle, Vertices, Radius])
if self.outputs['Vertices'].links:
points = [self.make_verts(a, v, r) for a, v, r in zip(*parameters)]
SvSetSocketAnyType(self, 'Vertices', points)
if self.outputs['Edges'].links:
edg = [self.make_edges(v, a) for a, v, r in zip(*parameters)]
SvSetSocketAnyType(self, 'Edges', edg)
if self.outputs['Polygons'].links:
plg = [self.make_faces(a, v) for a, v, r in zip(*parameters)]
SvSetSocketAnyType(self, 'Polygons', plg)
def register():
bpy.utils.register_class(CircleNode)
def unregister():
bpy.utils.unregister_class(CircleNode)
| gpl-3.0 |
franklinsales/udacity-data-analyst-nanodegree | project3/class-works/data-wrangling/data-quality/location.py | 1 | 2265 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat May 20 13:42:35 2017
@author: franklin
"""
"""
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.
If you look at the full city data, you will notice that there are couple of
values that seem to provide the same information in different formats: "point"
seems to be the combination of "wgs84_pos#lat" and "wgs84_pos#long". However,
we do not know if that is the case and should check if they are equivalent.
Finish the function check_loc(). It will recieve 3 strings: first, the combined
value of "point" followed by the separate "wgs84_pos#" values. You have to
extract the lat and long values from the "point" argument and compare them to
the "wgs84_pos# values, returning True or False.
Note that you do not have to fix the values, only determine if they are
consistent. To fix them in this case you would need more information. Feel free
to discuss possible strategies for fixing this on the discussion forum.
The rest of the code is just an example on how this function can be used.
Changes to "process_file" function will not be taken into account for grading.
"""
import csv
import pprint
CITIES = 'data/cities.csv'
def check_loc(point, lat, longi):
contentPoint = point.split(" ")
if contentPoint[0] == lat and contentPoint[1] == longi:
return True
else:
return False
def process_file(filename):
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to check the location
result = check_loc(line["point"], line["wgs84_pos#lat"], line["wgs84_pos#long"])
if not result:
print "{}: {} != {} {}".format(line["name"], line["point"], line["wgs84_pos#lat"], line["wgs84_pos#long"])
data.append(line)
return data
def test():
assert check_loc("33.08 75.28", "33.08", "75.28") == True
assert check_loc("44.57833333333333 -91.21833333333333", "44.5783", "-91.2183") == False
if __name__ == "__main__":
test() | mit |
dpyro/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/treebuilders/_base.py | 715 | 13699 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
| mpl-2.0 |
VanirAOSP/external_chromium_org | tools/clang/scripts/run_tool.py | 72 | 10556 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to help run clang tools across Chromium code.
How to use this tool:
If you want to run the tool across all Chromium code:
run_tool.py <tool> <path/to/compiledb>
If you only want to run the tool across just chrome/browser and content/browser:
run_tool.py <tool> <path/to/compiledb> chrome/browser content/browser
Please see https://code.google.com/p/chromium/wiki/ClangToolRefactoring for more
information, which documents the entire automated refactoring flow in Chromium.
Why use this tool:
The clang tool implementation doesn't take advantage of multiple cores, and if
it fails mysteriously in the middle, all the generated replacements will be
lost.
Unfortunately, if the work is simply sharded across multiple cores by running
multiple RefactoringTools, problems arise when they attempt to rewrite a file at
the same time. To work around that, clang tools that are run using this tool
should output edits to stdout in the following format:
==== BEGIN EDITS ====
r:<file path>:<offset>:<length>:<replacement text>
r:<file path>:<offset>:<length>:<replacement text>
...etc...
==== END EDITS ====
Any generated edits are applied once the clang tool has finished running
across Chromium, regardless of whether some instances failed or not.
"""
import collections
import functools
import multiprocessing
import os.path
import subprocess
import sys
Edit = collections.namedtuple(
'Edit', ('edit_type', 'offset', 'length', 'replacement'))
def _GetFilesFromGit(paths = None):
"""Gets the list of files in the git repository.
Args:
paths: Prefix filter for the returned paths. May contain multiple entries.
"""
args = ['git', 'ls-files']
if paths:
args.extend(paths)
command = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _ = command.communicate()
return output.splitlines()
def _ExtractEditsFromStdout(build_directory, stdout):
"""Extracts generated list of edits from the tool's stdout.
The expected format is documented at the top of this file.
Args:
build_directory: Directory that contains the compile database. Used to
normalize the filenames.
stdout: The stdout from running the clang tool.
Returns:
A dictionary mapping filenames to the associated edits.
"""
lines = stdout.splitlines()
start_index = lines.index('==== BEGIN EDITS ====')
end_index = lines.index('==== END EDITS ====')
edits = collections.defaultdict(list)
for line in lines[start_index + 1:end_index]:
try:
edit_type, path, offset, length, replacement = line.split(':', 4)
# Normalize the file path emitted by the clang tool to be relative to the
# current working directory.
path = os.path.relpath(os.path.join(build_directory, path))
edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
except ValueError:
print 'Unable to parse edit: %s' % line
return edits
def _ExecuteTool(toolname, build_directory, filename):
"""Executes the tool.
This is defined outside the class so it can be pickled for the multiprocessing
module.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filename: The file to run the tool over.
Returns:
A dictionary that must contain the key "status" and a boolean value
associated with it.
If status is True, then the generated edits are stored with the key "edits"
in the dictionary.
Otherwise, the filename and the output from stderr are associated with the
keys "filename" and "stderr" respectively.
"""
command = subprocess.Popen((toolname, '-p', build_directory, filename),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = command.communicate()
if command.returncode != 0:
return {'status': False, 'filename': filename, 'stderr': stderr}
else:
return {'status': True,
'edits': _ExtractEditsFromStdout(build_directory, stdout)}
class _CompilerDispatcher(object):
"""Multiprocessing controller for running clang tools in parallel."""
def __init__(self, toolname, build_directory, filenames):
"""Initializer method.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filenames: The files to run the tool over.
"""
self.__toolname = toolname
self.__build_directory = build_directory
self.__filenames = filenames
self.__success_count = 0
self.__failed_count = 0
self.__edits = collections.defaultdict(list)
@property
def edits(self):
return self.__edits
@property
def failed_count(self):
return self.__failed_count
def Run(self):
"""Does the grunt work."""
pool = multiprocessing.Pool()
result_iterator = pool.imap_unordered(
functools.partial(_ExecuteTool, self.__toolname,
self.__build_directory),
self.__filenames)
for result in result_iterator:
self.__ProcessResult(result)
sys.stdout.write('\n')
sys.stdout.flush()
def __ProcessResult(self, result):
"""Handles result processing.
Args:
result: The result dictionary returned by _ExecuteTool.
"""
if result['status']:
self.__success_count += 1
for k, v in result['edits'].iteritems():
self.__edits[k].extend(v)
else:
self.__failed_count += 1
sys.stdout.write('\nFailed to process %s\n' % result['filename'])
sys.stdout.write(result['stderr'])
sys.stdout.write('\n')
percentage = (
float(self.__success_count + self.__failed_count) /
len(self.__filenames)) * 100
sys.stdout.write('Succeeded: %d, Failed: %d [%.2f%%]\r' % (
self.__success_count, self.__failed_count, percentage))
sys.stdout.flush()
def _ApplyEdits(edits, clang_format_diff_path):
"""Apply the generated edits.
Args:
edits: A dict mapping filenames to Edit instances that apply to that file.
clang_format_diff_path: Path to the clang-format-diff.py helper to help
automatically reformat diffs to avoid style violations. Pass None if the
clang-format step should be skipped.
"""
edit_count = 0
for k, v in edits.iteritems():
# Sort the edits and iterate through them in reverse order. Sorting allows
# duplicate edits to be quickly skipped, while reversing means that
# subsequent edits don't need to have their offsets updated with each edit
# applied.
v.sort()
last_edit = None
with open(k, 'rb+') as f:
contents = bytearray(f.read())
for edit in reversed(v):
if edit == last_edit:
continue
last_edit = edit
contents[edit.offset:edit.offset + edit.length] = edit.replacement
if not edit.replacement:
_ExtendDeletionIfElementIsInList(contents, edit.offset)
edit_count += 1
f.seek(0)
f.truncate()
f.write(contents)
if clang_format_diff_path:
if subprocess.call('git diff -U0 %s | python %s -style=Chromium' % (
k, clang_format_diff_path), shell=True) != 0:
print 'clang-format failed for %s' % k
print 'Applied %d edits to %d files' % (edit_count, len(edits))
_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
def _ExtendDeletionIfElementIsInList(contents, offset):
"""Extends the range of a deletion if the deleted element was part of a list.
This rewriter helper makes it easy for refactoring tools to remove elements
from a list. Even if a matcher callback knows that it is removing an element
from a list, it may not have enough information to accurately remove the list
element; for example, another matcher callback may end up removing an adjacent
list element, or all the list elements may end up being removed.
With this helper, refactoring tools can simply remove the list element and not
worry about having to include the comma in the replacement.
Args:
contents: A bytearray with the deletion already applied.
offset: The offset in the bytearray where the deleted range used to be.
"""
char_before = char_after = None
left_trim_count = 0
for byte in reversed(contents[:offset]):
left_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte in (ord(','), ord(':'), ord('('), ord('{')):
char_before = chr(byte)
break
right_trim_count = 0
for byte in contents[offset:]:
right_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte == ord(','):
char_after = chr(byte)
break
if char_before:
if char_after:
del contents[offset:offset + right_trim_count]
elif char_before in (',', ':'):
del contents[offset - left_trim_count:offset]
def main(argv):
if len(argv) < 2:
print 'Usage: run_tool.py <clang tool> <compile DB> <path 1> <path 2> ...'
print ' <clang tool> is the clang tool that should be run.'
print ' <compile db> is the directory that contains the compile database'
print ' <path 1> <path2> ... can be used to filter what files are edited'
return 1
clang_format_diff_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../third_party/llvm/tools/clang/tools/clang-format',
'clang-format-diff.py')
# TODO(dcheng): Allow this to be controlled with a flag as well.
if not os.path.isfile(clang_format_diff_path):
clang_format_diff_path = None
filenames = frozenset(_GetFilesFromGit(argv[2:]))
# Filter out files that aren't C/C++/Obj-C/Obj-C++.
extensions = frozenset(('.c', '.cc', '.m', '.mm'))
dispatcher = _CompilerDispatcher(argv[0], argv[1],
[f for f in filenames
if os.path.splitext(f)[1] in extensions])
dispatcher.Run()
# Filter out edits to files that aren't in the git repository, since it's not
# useful to modify files that aren't under source control--typically, these
# are generated files or files in a git submodule that's not part of Chromium.
_ApplyEdits({k : v for k, v in dispatcher.edits.iteritems()
if k in filenames},
clang_format_diff_path)
if dispatcher.failed_count != 0:
return 2
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
google/contentbox | third_party/django/contrib/gis/maps/google/gmap.py | 174 | 9102 | from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import xrange
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {0} {1}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{0}.{1}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{0}{1}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{0}\n <script type="text/javascript">\n//<![CDATA[\n{1}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{0}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {0}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
| apache-2.0 |
robbiet480/home-assistant | homeassistant/components/konnected/config_flow.py | 5 | 33122 | """Config flow for konnected.io integration."""
import asyncio
import copy
import logging
import random
import string
from urllib.parse import urlparse
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASSES_SCHEMA,
)
from homeassistant.components.ssdp import ATTR_UPNP_MANUFACTURER, ATTR_UPNP_MODEL_NAME
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_DEFAULT_OPTIONS,
CONF_DISCOVERY,
CONF_INVERSE,
CONF_MODEL,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
CONF_REPEAT,
DOMAIN,
STATE_HIGH,
STATE_LOW,
ZONES,
)
from .errors import CannotConnect
from .panel import KONN_MODEL, KONN_MODEL_PRO, get_status
_LOGGER = logging.getLogger(__name__)
ATTR_KONN_UPNP_MODEL_NAME = "model_name" # standard upnp is modelName
CONF_IO = "io"
CONF_IO_DIS = "Disabled"
CONF_IO_BIN = "Binary Sensor"
CONF_IO_DIG = "Digital Sensor"
CONF_IO_SWI = "Switchable Output"
CONF_MORE_STATES = "more_states"
CONF_YES = "Yes"
CONF_NO = "No"
CONF_OVERRIDE_API_HOST = "override_api_host"
KONN_MANUFACTURER = "konnected.io"
KONN_PANEL_MODEL_NAMES = {
KONN_MODEL: "Konnected Alarm Panel",
KONN_MODEL_PRO: "Konnected Alarm Panel Pro",
}
OPTIONS_IO_ANY = vol.In([CONF_IO_DIS, CONF_IO_BIN, CONF_IO_DIG, CONF_IO_SWI])
OPTIONS_IO_INPUT_ONLY = vol.In([CONF_IO_DIS, CONF_IO_BIN, CONF_IO_DIG])
OPTIONS_IO_OUTPUT_ONLY = vol.In([CONF_IO_DIS, CONF_IO_SWI])
# Config entry schemas
IO_SCHEMA = vol.Schema(
{
vol.Optional("1", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("2", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("3", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("4", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("5", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("6", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("7", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("8", default=CONF_IO_DIS): OPTIONS_IO_ANY,
vol.Optional("9", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("10", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("11", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("12", default=CONF_IO_DIS): OPTIONS_IO_INPUT_ONLY,
vol.Optional("out", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
vol.Optional("alarm1", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
vol.Optional("out1", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
vol.Optional("alarm2_out2", default=CONF_IO_DIS): OPTIONS_IO_OUTPUT_ONLY,
}
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(ZONES),
vol.Required(CONF_TYPE, default=DEVICE_CLASS_DOOR): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(ZONES),
vol.Required(CONF_TYPE, default="dht"): vol.All(
vol.Lower, vol.In(["dht", "ds18b20"])
),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL, default=3): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(ZONES),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.In([STATE_HIGH, STATE_LOW])
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
)
OPTIONS_SCHEMA = vol.Schema(
{
vol.Required(CONF_IO): IO_SCHEMA,
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA]
),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_API_HOST, default=""): vol.Any("", cv.url),
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
},
extra=vol.REMOVE_EXTRA,
)
CONFIG_ENTRY_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_MODEL): vol.Any(*KONN_PANEL_MODEL_NAMES),
vol.Required(CONF_ACCESS_TOKEN): cv.matches_regex("[a-zA-Z0-9]+"),
vol.Required(CONF_DEFAULT_OPTIONS): OPTIONS_SCHEMA,
},
extra=vol.REMOVE_EXTRA,
)
class KonnectedFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for NEW_NAME."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
# class variable to store/share discovered host information
discovered_hosts = {}
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
def __init__(self):
"""Initialize the Konnected flow."""
self.data = {}
self.options = OPTIONS_SCHEMA({CONF_IO: {}})
async def async_gen_config(self, host, port):
"""Populate self.data based on panel status.
This will raise CannotConnect if an error occurs
"""
self.data[CONF_HOST] = host
self.data[CONF_PORT] = port
try:
status = await get_status(self.hass, host, port)
self.data[CONF_ID] = status.get("chipId", status["mac"].replace(":", ""))
except (CannotConnect, KeyError):
raise CannotConnect
else:
self.data[CONF_MODEL] = status.get("model", KONN_MODEL)
self.data[CONF_ACCESS_TOKEN] = "".join(
random.choices(f"{string.ascii_uppercase}{string.digits}", k=20)
)
async def async_step_import(self, device_config):
"""Import a configuration.yaml config.
This flow is triggered by `async_setup` for configured panels.
"""
_LOGGER.debug(device_config)
# save the data and confirm connection via user step
await self.async_set_unique_id(device_config["id"])
self.options = device_config[CONF_DEFAULT_OPTIONS]
# config schema ensures we have port if we have host
if device_config.get(CONF_HOST):
# automatically connect if we have host info
return await self.async_step_user(
user_input={
CONF_HOST: device_config[CONF_HOST],
CONF_PORT: device_config[CONF_PORT],
}
)
# if we have no host info wait for it or abort if previously configured
self._abort_if_unique_id_configured()
return await self.async_step_import_confirm()
async def async_step_import_confirm(self, user_input=None):
"""Confirm the user wants to import the config entry."""
if user_input is None:
return self.async_show_form(
step_id="import_confirm",
description_placeholders={"id": self.unique_id},
)
# if we have ssdp discovered applicable host info use it
if KonnectedFlowHandler.discovered_hosts.get(self.unique_id):
return await self.async_step_user(
user_input={
CONF_HOST: KonnectedFlowHandler.discovered_hosts[self.unique_id][
CONF_HOST
],
CONF_PORT: KonnectedFlowHandler.discovered_hosts[self.unique_id][
CONF_PORT
],
}
)
return await self.async_step_user()
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered konnected panel.
This flow is triggered by the SSDP component. It will check if the
device is already configured and attempt to finish the config if not.
"""
_LOGGER.debug(discovery_info)
try:
if discovery_info[ATTR_UPNP_MANUFACTURER] != KONN_MANUFACTURER:
return self.async_abort(reason="not_konn_panel")
if not any(
name in discovery_info[ATTR_UPNP_MODEL_NAME]
for name in KONN_PANEL_MODEL_NAMES
):
_LOGGER.warning(
"Discovered unrecognized Konnected device %s",
discovery_info.get(ATTR_UPNP_MODEL_NAME, "Unknown"),
)
return self.async_abort(reason="not_konn_panel")
# If MAC is missing it is a bug in the device fw but we'll guard
# against it since the field is so vital
except KeyError:
_LOGGER.error("Malformed Konnected SSDP info")
else:
# extract host/port from ssdp_location
netloc = urlparse(discovery_info["ssdp_location"]).netloc.split(":")
return await self.async_step_user(
user_input={CONF_HOST: netloc[0], CONF_PORT: int(netloc[1])}
)
return self.async_abort(reason="unknown")
async def async_step_user(self, user_input=None):
"""Connect to panel and get config."""
errors = {}
if user_input:
# build config info and wait for user confirmation
self.data[CONF_HOST] = user_input[CONF_HOST]
self.data[CONF_PORT] = user_input[CONF_PORT]
# brief delay to allow processing of recent status req
await asyncio.sleep(0.1)
try:
status = await get_status(
self.hass, self.data[CONF_HOST], self.data[CONF_PORT]
)
except CannotConnect:
errors["base"] = "cannot_connect"
else:
self.data[CONF_ID] = status.get(
"chipId", status["mac"].replace(":", "")
)
self.data[CONF_MODEL] = status.get("model", KONN_MODEL)
# save off our discovered host info
KonnectedFlowHandler.discovered_hosts[self.data[CONF_ID]] = {
CONF_HOST: self.data[CONF_HOST],
CONF_PORT: self.data[CONF_PORT],
}
return await self.async_step_confirm()
return self.async_show_form(
step_id="user",
description_placeholders={
"host": self.data.get(CONF_HOST, "Unknown"),
"port": self.data.get(CONF_PORT, "Unknown"),
},
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=self.data.get(CONF_HOST)): str,
vol.Required(CONF_PORT, default=self.data.get(CONF_PORT)): int,
}
),
errors=errors,
)
async def async_step_confirm(self, user_input=None):
"""Attempt to link with the Konnected panel.
Given a configured host, will ask the user to confirm and finalize
the connection.
"""
if user_input is None:
# abort and update an existing config entry if host info changes
await self.async_set_unique_id(self.data[CONF_ID])
self._abort_if_unique_id_configured(updates=self.data)
return self.async_show_form(
step_id="confirm",
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.data[CONF_MODEL]],
"id": self.unique_id,
"host": self.data[CONF_HOST],
"port": self.data[CONF_PORT],
},
)
# Create access token, attach default options and create entry
self.data[CONF_DEFAULT_OPTIONS] = self.options
self.data[CONF_ACCESS_TOKEN] = self.hass.data.get(DOMAIN, {}).get(
CONF_ACCESS_TOKEN
) or "".join(random.choices(f"{string.ascii_uppercase}{string.digits}", k=20))
return self.async_create_entry(
title=KONN_PANEL_MODEL_NAMES[self.data[CONF_MODEL]], data=self.data,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return the Options Flow."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for a Konnected Panel."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.entry = config_entry
self.model = self.entry.data[CONF_MODEL]
self.current_opt = self.entry.options or self.entry.data[CONF_DEFAULT_OPTIONS]
# as config proceeds we'll build up new options and then replace what's in the config entry
self.new_opt = {CONF_IO: {}}
self.active_cfg = None
self.io_cfg = {}
self.current_states = []
self.current_state = 1
@callback
def get_current_cfg(self, io_type, zone):
"""Get the current zone config."""
return next(
(
cfg
for cfg in self.current_opt.get(io_type, [])
if cfg[CONF_ZONE] == zone
),
{},
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
return await self.async_step_options_io()
async def async_step_options_io(self, user_input=None):
"""Configure legacy panel IO or first half of pro IO."""
errors = {}
current_io = self.current_opt.get(CONF_IO, {})
if user_input is not None:
# strip out disabled io and save for options cfg
for key, value in user_input.items():
if value != CONF_IO_DIS:
self.new_opt[CONF_IO][key] = value
return await self.async_step_options_io_ext()
if self.model == KONN_MODEL:
return self.async_show_form(
step_id="options_io",
data_schema=vol.Schema(
{
vol.Required(
"1", default=current_io.get("1", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"2", default=current_io.get("2", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"3", default=current_io.get("3", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"4", default=current_io.get("4", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"5", default=current_io.get("5", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"6", default=current_io.get("6", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"out", default=current_io.get("out", CONF_IO_DIS)
): OPTIONS_IO_OUTPUT_ONLY,
}
),
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.model],
"host": self.entry.data[CONF_HOST],
},
errors=errors,
)
# configure the first half of the pro board io
if self.model == KONN_MODEL_PRO:
return self.async_show_form(
step_id="options_io",
data_schema=vol.Schema(
{
vol.Required(
"1", default=current_io.get("1", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"2", default=current_io.get("2", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"3", default=current_io.get("3", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"4", default=current_io.get("4", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"5", default=current_io.get("5", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"6", default=current_io.get("6", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"7", default=current_io.get("7", CONF_IO_DIS)
): OPTIONS_IO_ANY,
}
),
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.model],
"host": self.entry.data[CONF_HOST],
},
errors=errors,
)
return self.async_abort(reason="not_konn_panel")
async def async_step_options_io_ext(self, user_input=None):
"""Allow the user to configure the extended IO for pro."""
errors = {}
current_io = self.current_opt.get(CONF_IO, {})
if user_input is not None:
# strip out disabled io and save for options cfg
for key, value in user_input.items():
if value != CONF_IO_DIS:
self.new_opt[CONF_IO].update({key: value})
self.io_cfg = copy.deepcopy(self.new_opt[CONF_IO])
return await self.async_step_options_binary()
if self.model == KONN_MODEL:
self.io_cfg = copy.deepcopy(self.new_opt[CONF_IO])
return await self.async_step_options_binary()
if self.model == KONN_MODEL_PRO:
return self.async_show_form(
step_id="options_io_ext",
data_schema=vol.Schema(
{
vol.Required(
"8", default=current_io.get("8", CONF_IO_DIS)
): OPTIONS_IO_ANY,
vol.Required(
"9", default=current_io.get("9", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"10", default=current_io.get("10", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"11", default=current_io.get("11", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"12", default=current_io.get("12", CONF_IO_DIS)
): OPTIONS_IO_INPUT_ONLY,
vol.Required(
"alarm1", default=current_io.get("alarm1", CONF_IO_DIS)
): OPTIONS_IO_OUTPUT_ONLY,
vol.Required(
"out1", default=current_io.get("out1", CONF_IO_DIS)
): OPTIONS_IO_OUTPUT_ONLY,
vol.Required(
"alarm2_out2",
default=current_io.get("alarm2_out2", CONF_IO_DIS),
): OPTIONS_IO_OUTPUT_ONLY,
}
),
description_placeholders={
"model": KONN_PANEL_MODEL_NAMES[self.model],
"host": self.entry.data[CONF_HOST],
},
errors=errors,
)
return self.async_abort(reason="not_konn_panel")
async def async_step_options_binary(self, user_input=None):
"""Allow the user to configure the IO options for binary sensors."""
errors = {}
if user_input is not None:
zone = {"zone": self.active_cfg}
zone.update(user_input)
self.new_opt[CONF_BINARY_SENSORS] = self.new_opt.get(
CONF_BINARY_SENSORS, []
) + [zone]
self.io_cfg.pop(self.active_cfg)
self.active_cfg = None
if self.active_cfg:
current_cfg = self.get_current_cfg(CONF_BINARY_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_binary",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE,
default=current_cfg.get(CONF_TYPE, DEVICE_CLASS_DOOR),
): DEVICE_CLASSES_SCHEMA,
vol.Optional(
CONF_NAME, default=current_cfg.get(CONF_NAME, vol.UNDEFINED)
): str,
vol.Optional(
CONF_INVERSE, default=current_cfg.get(CONF_INVERSE, False)
): bool,
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper
},
errors=errors,
)
# find the next unconfigured binary sensor
for key, value in self.io_cfg.items():
if value == CONF_IO_BIN:
self.active_cfg = key
current_cfg = self.get_current_cfg(CONF_BINARY_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_binary",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE,
default=current_cfg.get(CONF_TYPE, DEVICE_CLASS_DOOR),
): DEVICE_CLASSES_SCHEMA,
vol.Optional(
CONF_NAME,
default=current_cfg.get(CONF_NAME, vol.UNDEFINED),
): str,
vol.Optional(
CONF_INVERSE,
default=current_cfg.get(CONF_INVERSE, False),
): bool,
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper
},
errors=errors,
)
return await self.async_step_options_digital()
async def async_step_options_digital(self, user_input=None):
"""Allow the user to configure the IO options for digital sensors."""
errors = {}
if user_input is not None:
zone = {"zone": self.active_cfg}
zone.update(user_input)
self.new_opt[CONF_SENSORS] = self.new_opt.get(CONF_SENSORS, []) + [zone]
self.io_cfg.pop(self.active_cfg)
self.active_cfg = None
if self.active_cfg:
current_cfg = self.get_current_cfg(CONF_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_digital",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE, default=current_cfg.get(CONF_TYPE, "dht")
): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(
CONF_NAME, default=current_cfg.get(CONF_NAME, vol.UNDEFINED)
): str,
vol.Optional(
CONF_POLL_INTERVAL,
default=current_cfg.get(CONF_POLL_INTERVAL, 3),
): vol.All(vol.Coerce(int), vol.Range(min=1)),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper()
},
errors=errors,
)
# find the next unconfigured digital sensor
for key, value in self.io_cfg.items():
if value == CONF_IO_DIG:
self.active_cfg = key
current_cfg = self.get_current_cfg(CONF_SENSORS, self.active_cfg)
return self.async_show_form(
step_id="options_digital",
data_schema=vol.Schema(
{
vol.Required(
CONF_TYPE, default=current_cfg.get(CONF_TYPE, "dht")
): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(
CONF_NAME,
default=current_cfg.get(CONF_NAME, vol.UNDEFINED),
): str,
vol.Optional(
CONF_POLL_INTERVAL,
default=current_cfg.get(CONF_POLL_INTERVAL, 3),
): vol.All(vol.Coerce(int), vol.Range(min=1)),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper()
},
errors=errors,
)
return await self.async_step_options_switch()
async def async_step_options_switch(self, user_input=None):
"""Allow the user to configure the IO options for switches."""
errors = {}
if user_input is not None:
zone = {"zone": self.active_cfg}
zone.update(user_input)
del zone[CONF_MORE_STATES]
self.new_opt[CONF_SWITCHES] = self.new_opt.get(CONF_SWITCHES, []) + [zone]
# iterate through multiple switch states
if self.current_states:
self.current_states.pop(0)
# only go to next zone if all states are entered
self.current_state += 1
if user_input[CONF_MORE_STATES] == CONF_NO:
self.io_cfg.pop(self.active_cfg)
self.active_cfg = None
if self.active_cfg:
current_cfg = next(iter(self.current_states), {})
return self.async_show_form(
step_id="options_switch",
data_schema=vol.Schema(
{
vol.Optional(
CONF_NAME, default=current_cfg.get(CONF_NAME, vol.UNDEFINED)
): str,
vol.Optional(
CONF_ACTIVATION,
default=current_cfg.get(CONF_ACTIVATION, STATE_HIGH),
): vol.All(vol.Lower, vol.In([STATE_HIGH, STATE_LOW])),
vol.Optional(
CONF_MOMENTARY,
default=current_cfg.get(CONF_MOMENTARY, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_PAUSE,
default=current_cfg.get(CONF_PAUSE, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_REPEAT,
default=current_cfg.get(CONF_REPEAT, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=-1)),
vol.Required(
CONF_MORE_STATES,
default=CONF_YES
if len(self.current_states) > 1
else CONF_NO,
): vol.In([CONF_YES, CONF_NO]),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper(),
"state": str(self.current_state),
},
errors=errors,
)
# find the next unconfigured switch
for key, value in self.io_cfg.items():
if value == CONF_IO_SWI:
self.active_cfg = key
self.current_states = [
cfg
for cfg in self.current_opt.get(CONF_SWITCHES, [])
if cfg[CONF_ZONE] == self.active_cfg
]
current_cfg = next(iter(self.current_states), {})
self.current_state = 1
return self.async_show_form(
step_id="options_switch",
data_schema=vol.Schema(
{
vol.Optional(
CONF_NAME,
default=current_cfg.get(CONF_NAME, vol.UNDEFINED),
): str,
vol.Optional(
CONF_ACTIVATION,
default=current_cfg.get(CONF_ACTIVATION, STATE_HIGH),
): vol.In(["low", "high"]),
vol.Optional(
CONF_MOMENTARY,
default=current_cfg.get(CONF_MOMENTARY, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_PAUSE,
default=current_cfg.get(CONF_PAUSE, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(
CONF_REPEAT,
default=current_cfg.get(CONF_REPEAT, vol.UNDEFINED),
): vol.All(vol.Coerce(int), vol.Range(min=-1)),
vol.Required(
CONF_MORE_STATES,
default=CONF_YES
if len(self.current_states) > 1
else CONF_NO,
): vol.In([CONF_YES, CONF_NO]),
}
),
description_placeholders={
"zone": f"Zone {self.active_cfg}"
if len(self.active_cfg) < 3
else self.active_cfg.upper(),
"state": str(self.current_state),
},
errors=errors,
)
return await self.async_step_options_misc()
async def async_step_options_misc(self, user_input=None):
"""Allow the user to configure the LED behavior."""
errors = {}
if user_input is not None:
# config schema only does basic schema val so check url here
try:
if user_input[CONF_OVERRIDE_API_HOST]:
cv.url(user_input.get(CONF_API_HOST, ""))
else:
user_input[CONF_API_HOST] = ""
except vol.Invalid:
errors["base"] = "bad_host"
else:
# no need to store the override - can infer
del user_input[CONF_OVERRIDE_API_HOST]
self.new_opt.update(user_input)
return self.async_create_entry(title="", data=self.new_opt)
return self.async_show_form(
step_id="options_misc",
data_schema=vol.Schema(
{
vol.Required(
CONF_BLINK, default=self.current_opt.get(CONF_BLINK, True)
): bool,
vol.Required(
CONF_OVERRIDE_API_HOST,
default=bool(self.current_opt.get(CONF_API_HOST)),
): bool,
vol.Optional(
CONF_API_HOST, default=self.current_opt.get(CONF_API_HOST, "")
): str,
}
),
errors=errors,
)
| apache-2.0 |
vaygr/ansible | lib/ansible/module_utils/openstack.py | 20 | 6346 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from ansible.module_utils.six import iteritems
def openstack_argument_spec():
# DEPRECATED: This argument spec is only used for the deprecated old
# OpenStack modules. It turns out that modern OpenStack auth is WAY
# more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
OS_AUTH_URL = os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
OS_PASSWORD = os.environ.get('OS_PASSWORD', None)
OS_REGION_NAME = os.environ.get('OS_REGION_NAME', None)
OS_USERNAME = os.environ.get('OS_USERNAME', 'admin')
OS_TENANT_NAME = os.environ.get('OS_TENANT_NAME', OS_USERNAME)
spec = dict(
login_username=dict(default=OS_USERNAME),
auth_url=dict(default=OS_AUTH_URL),
region_name=dict(default=OS_REGION_NAME),
availability_zone=dict(),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
else:
spec['login_password'] = dict(required=True)
if OS_TENANT_NAME:
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
else:
spec['login_tenant_name'] = dict(required=True)
return spec
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret = []
for (k, v) in iteritems(addresses):
if key_name and k == key_name:
ret.extend([addrs['addr'] for addrs in v])
else:
for interface_spec in v:
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
ret.append(interface_spec['addr'])
return ret
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None, type='raw'),
auth_type=dict(default=None),
auth=dict(default=None, type='dict', no_log=True),
region_name=dict(default=None),
availability_zone=dict(default=None),
verify=dict(default=None, type='bool', aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None, no_log=True),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
interface=dict(
default='public', choices=['public', 'internal', 'admin'],
aliases=['endpoint_type']),
)
spec.update(kwargs)
return spec
def openstack_module_kwargs(**kwargs):
ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
ret[key].extend(kwargs[key])
else:
ret[key] = kwargs[key]
return ret
def openstack_cloud_from_module(module, min_version=None):
from distutils.version import StrictVersion
try:
import shade
except ImportError:
module.fail_json(msg='shade is required for this module')
if min_version:
if StrictVersion(shade.__version__) < StrictVersion(min_version):
module.fail_json(
msg="To utilize this module, the installed version of"
"the shade library MUST be >={min_version}".format(
min_version=min_version))
cloud_config = module.params.pop('cloud', None)
if isinstance(cloud_config, dict):
fail_message = (
"A cloud config dict was provided to the cloud parameter"
" but also a value was provided for {param}. If a cloud"
" config dict is provided, {param} should be"
" excluded.")
for param in (
'auth', 'region_name', 'verify',
'cacert', 'key', 'api_timeout', 'interface'):
if module.params[param] is not None:
module.fail_json(fail_message.format(param=param))
if module.params['auth_type'] != 'password':
module.fail_json(fail_message.format(param='auth_type'))
return shade, shade.operator_cloud(**cloud_config)
else:
return shade, shade.operator_cloud(
cloud=cloud_config,
auth_type=module.params['auth_type'],
auth=module.params['auth'],
region_name=module.params['region_name'],
verify=module.params['verify'],
cacert=module.params['cacert'],
key=module.params['key'],
api_timeout=module.params['api_timeout'],
interface=module.params['interface'],
)
| gpl-3.0 |
zaffra/Donate | django/contrib/localflavor/pt/forms.py | 309 | 1561 | """
PT-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(\d{9}|(00|\+)\d*)$')
class PTZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PTZipCodeField, self).__init__(r'^(\d{4}-\d{3}|\d{7})$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
cleaned = super(PTZipCodeField, self).clean(value)
if len(cleaned) == 7:
return u'%s-%s' % (cleaned[:4],cleaned[4:])
else:
return cleaned
class PTPhoneNumberField(Field):
"""
Validate local Portuguese phone number (including international ones)
It should have 9 digits (may include spaces) or start by 00 or + (international)
"""
default_error_messages = {
'invalid': _('Phone numbers must have 9 digits, or start by + or 00.'),
}
def clean(self, value):
super(PTPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s' % value
raise ValidationError(self.error_messages['invalid'])
| bsd-3-clause |
allanm84/linux-imx | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
jrg365/gpytorch | gpytorch/utils/contour_integral_quad.py | 1 | 5563 | import math
import warnings
import torch
from .. import settings
from .broadcasting import _mul_broadcast_shape
from .linear_cg import linear_cg
from .minres import minres
from .warnings import NumericalWarning
def contour_integral_quad(
lazy_tensor,
rhs,
inverse=False,
weights=None,
shifts=None,
max_lanczos_iter=20,
num_contour_quadrature=None,
shift_offset=0,
):
r"""
Performs :math:`\mathbf K^{1/2} \mathbf b` or `\mathbf K^{-1/2} \mathbf b`
using contour integral quadrature.
:param gpytorch.lazy.LazyTensor lazy_tensor: LazyTensor representing :math:`\mathbf K`
:param torch.Tensor rhs: Right hand side tensor :math:`\mathbf b`
:param bool inverse: (default False) whether to compute :math:`\mathbf K^{1/2} \mathbf b` (if False)
or `\mathbf K^{-1/2} \mathbf b` (if True)
:param int max_lanczos_iter: (default 10) Number of Lanczos iterations to run (to estimate eigenvalues)
:param int num_contour_quadrature: How many quadrature samples to use for approximation. Default is in settings.
:rtype: torch.Tensor
:return: Approximation to :math:`\mathbf K^{1/2} \mathbf b` or :math:`\mathbf K^{-1/2} \mathbf b`.
"""
import numpy as np
from scipy.special import ellipj, ellipk
if num_contour_quadrature is None:
num_contour_quadrature = settings.num_contour_quadrature.value()
output_batch_shape = _mul_broadcast_shape(lazy_tensor.batch_shape, rhs.shape[:-2])
preconditioner, preconditioner_lt, _ = lazy_tensor._preconditioner()
def sqrt_precond_matmul(rhs):
if preconditioner_lt is not None:
solves, weights, _, _ = contour_integral_quad(preconditioner_lt, rhs, inverse=False)
return (solves * weights).sum(0)
else:
return rhs
# if not inverse:
rhs = sqrt_precond_matmul(rhs)
if shifts is None:
# Determine if init_vecs has extra_dimensions
num_extra_dims = max(0, rhs.dim() - lazy_tensor.dim())
lanczos_init = rhs.__getitem__(
(*([0] * num_extra_dims), Ellipsis, slice(None, None, None), slice(None, 1, None))
).expand(*lazy_tensor.shape[:-1], 1)
with warnings.catch_warnings(), torch.no_grad():
warnings.simplefilter("ignore", NumericalWarning) # Supress CG stopping warning
_, lanczos_mat = linear_cg(
lambda v: lazy_tensor._matmul(v),
rhs=lanczos_init,
n_tridiag=1,
max_iter=max_lanczos_iter,
tolerance=1e-5,
max_tridiag_iter=max_lanczos_iter,
preconditioner=preconditioner,
)
"""
K^{-1/2} b = 2/pi \int_0^\infty (K - t^2 I)^{-1} dt
We'll approximate this integral as a sum using quadrature
We'll determine the appropriate values of t, as well as their weights using elliptical integrals
"""
# Compute an approximate condition number
# We'll do this with Lanczos
try:
approx_eigs = lanczos_mat.symeig()[0]
if approx_eigs.min() <= 0:
raise RuntimeError
except RuntimeError:
approx_eigs = lazy_tensor.diag()
max_eig = approx_eigs.max(dim=-1)[0]
min_eig = approx_eigs.min(dim=-1)[0]
k2 = (min_eig / max_eig).squeeze(-1)
# Compute the shifts needed for the contour
flat_shifts = torch.zeros(num_contour_quadrature + 1, k2.numel(), dtype=k2.dtype, device=k2.device)
flat_weights = torch.zeros(num_contour_quadrature, k2.numel(), dtype=k2.dtype, device=k2.device)
# For loop because numpy
for i, (sub_k2, sub_min_eig) in enumerate(zip(k2.flatten().tolist(), min_eig.flatten().tolist())):
# Compute shifts
Kp = ellipk(1 - sub_k2) # Elliptical integral of the first kind
N = num_contour_quadrature
t = 1j * (np.arange(1, N + 1) - 0.5) * Kp / N
sn, cn, dn, _ = ellipj(np.imag(t), 1 - sub_k2) # Jacobi elliptic functions
cn = 1.0 / cn
dn = dn * cn
sn = 1j * sn * cn
w = np.sqrt(sub_min_eig) * sn
w_pow2 = np.real(np.power(w, 2))
sub_shifts = torch.tensor(w_pow2, dtype=rhs.dtype, device=rhs.device)
# Compute weights
constant = -2 * Kp * np.sqrt(sub_min_eig) / (math.pi * N)
dzdt = torch.tensor(cn * dn, dtype=rhs.dtype, device=rhs.device)
dzdt.mul_(constant)
sub_weights = dzdt
# Store results
flat_shifts[1:, i].copy_(sub_shifts)
flat_weights[:, i].copy_(sub_weights)
weights = flat_weights.view(num_contour_quadrature, *k2.shape, 1, 1)
shifts = flat_shifts.view(num_contour_quadrature + 1, *k2.shape)
shifts.sub_(shift_offset)
# Make sure we have the right shape
if k2.shape != output_batch_shape:
weights = torch.stack([w.expand(*output_batch_shape, 1, 1) for w in weights], 0)
shifts = torch.stack([s.expand(output_batch_shape) for s in shifts], 0)
# Compute the solves at the given shifts
# Do one more matmul if we don't want to include the inverse
with torch.no_grad():
solves = minres(lambda v: lazy_tensor._matmul(v), rhs, value=-1, shifts=shifts, preconditioner=preconditioner)
no_shift_solves = solves[0]
solves = solves[1:]
if not inverse:
solves = lazy_tensor._matmul(solves)
return solves, weights, no_shift_solves, shifts
| mit |
willcodefortea/wagtail | wagtail/wagtailsearch/management/commands/update_index.py | 3 | 2495 | from django.core.management.base import BaseCommand
from django.db import models
from wagtail.wagtailsearch.indexed import Indexed
from wagtail.wagtailsearch.backends import get_search_backend
class Command(BaseCommand):
def handle(self, **options):
# Print info
self.stdout.write("Getting object list")
# Get list of indexed models
indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]
# Object set
object_set = {}
# Add all objects to object set and detect any duplicates
# Duplicates are caused when both a model and a derived model are indexed
# Eg, if BlogPost inherits from Page and both of these models are indexed
# If we were to add all objects from both models into the index, all the BlogPosts will have two entries
for model in indexed_models:
# Get toplevel content type
toplevel_content_type = model.indexed_get_toplevel_content_type()
# Loop through objects
for obj in model.get_indexed_objects():
# Get key for this object
key = toplevel_content_type + ':' + str(obj.pk)
# Check if this key already exists
if key in object_set:
# Conflict, work out who should get this space
# The object with the longest content type string gets the space
# Eg, "wagtailcore.Page-myapp.BlogPost" kicks out "wagtailcore.Page"
if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):
# Take the spot
object_set[key] = obj
else:
# Space free, take it
object_set[key] = obj
# Search backend
if 'backend' in options:
s = options['backend']
else:
s = get_search_backend()
# Reset the index
self.stdout.write("Reseting index")
s.reset_index()
# Add types
self.stdout.write("Adding types")
for model in indexed_models:
s.add_type(model)
# Add objects to index
self.stdout.write("Adding objects")
for result in s.add_bulk(object_set.values()):
self.stdout.write(result[0] + ' ' + str(result[1]))
# Refresh index
self.stdout.write("Refreshing index")
s.refresh_index()
| bsd-3-clause |
PowerShellEmpire/Empire | lib/modules/python/collection/osx/native_screenshot_mss.py | 9 | 3876 | import base64
import os
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'NativeScreenshotMSS',
# list of one or more authors for the module
'Author': ['@xorrior'],
# more verbose multi-line description of the module
'Description': ('Takes a screenshot of an OSX desktop using the Python mss module. The python-mss module utilizes ctypes and the CoreFoundation library.'),
# True if the module needs to run in the background
'Background': False,
# File extension to save the file as
'OutputExtension': "png",
# if the module needs administrative privileges
'NeedsAdmin': False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent': {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'SavePath': {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Monitor to obtain a screenshot. 0 represents all.',
'Required' : True,
'Value' : '/tmp/debug.png'
},
'Monitor': {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Monitor to obtain a screenshot. -1 represents all.',
'Required' : True,
'Value' : '-1'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
path = self.mainMenu.installPath + "data/misc/python_modules/mss.zip"
filename = os.path.basename(path).rstrip('.zip')
open_file = open(path, 'rb')
module_data = open_file.read()
open_file.close()
module_data = base64.b64encode(module_data)
script = """
import os
import base64
data = "%s"
def run(data):
rawmodule = base64.b64decode(data)
zf = zipfile.ZipFile(io.BytesIO(rawmodule), "r")
if "mss" not in moduleRepo.keys():
moduleRepo["mss"] = zf
install_hook("mss")
from mss import mss
m = mss()
file = m.shot(mon=%s,output='%s')
raw = open(file, 'rb').read()
run_command('rm -f %%s' %% (file))
print raw
run(data)
""" % (module_data, self.options['Monitor']['Value'], self.options['SavePath']['Value'])
return script
| bsd-3-clause |
linas/atomspace | examples/python/values.py | 3 | 1134 | #! /usr/bin/env python
#
# values.py
#
"""
An example of using values via Python API
"""
from opencog.atomspace import AtomSpace, TruthValue
from opencog.type_constructors import *
from opencog.scheme_wrapper import scheme_eval_v
atomspace = AtomSpace()
set_default_atomspace(atomspace)
a = FloatValue([1.0, 2.0, 3.0])
b = FloatValue([1.0, 2.0, 3.0])
c = FloatValue(1.0)
print('{} == {}: {}'.format(a, b, a == b))
print('{} == {}: {}'.format(a, c, a == c))
featureValue = FloatValue([1.0, 2])
print('new value created: {}'.format(featureValue))
boundingBox = ConceptNode('boundingBox')
featureKey = PredicateNode('features')
boundingBox.set_value(featureKey, featureValue)
print('set value to atom: {}'.format(boundingBox))
value = boundingBox.get_value(featureKey)
print('get value from atom: {}'.format(value))
list = value.to_list()
print('get python list from value: {}'.format(list))
value = scheme_eval_v(atomspace, '(ValueOf (ConceptNode "boundingBox") '
'(PredicateNode "features"))')
value = boundingBox.get_value(featureKey)
print('get value from atom using Scheme program: {}'.format(value))
| agpl-3.0 |
openiitbombayx/edx-platform | common/lib/xmodule/xmodule/partitions/tests/test_partitions.py | 46 | 16043 | """
Test the partitions and partitions service
"""
from unittest import TestCase
from mock import Mock
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from stevedore.extension import Extension, ExtensionManager
from xmodule.partitions.partitions import (
Group, UserPartition, UserPartitionError, NoSuchUserPartitionGroupError, USER_PARTITION_SCHEME_NAMESPACE
)
from xmodule.partitions.partitions_service import PartitionService
class TestGroup(TestCase):
"""Test constructing groups"""
def test_construct(self):
test_id = 10
name = "Grendel"
group = Group(test_id, name)
self.assertEqual(group.id, test_id) # pylint: disable=no-member
self.assertEqual(group.name, name)
def test_string_id(self):
test_id = "10"
name = "Grendel"
group = Group(test_id, name)
self.assertEqual(group.id, 10) # pylint: disable=no-member
def test_to_json(self):
test_id = 10
name = "Grendel"
group = Group(test_id, name)
jsonified = group.to_json()
act_jsonified = {
"id": test_id,
"name": name,
"version": group.VERSION
}
self.assertEqual(jsonified, act_jsonified)
def test_from_json(self):
test_id = 5
name = "Grendel"
jsonified = {
"id": test_id,
"name": name,
"version": Group.VERSION
}
group = Group.from_json(jsonified)
self.assertEqual(group.id, test_id) # pylint: disable=no-member
self.assertEqual(group.name, name)
def test_from_json_broken(self):
test_id = 5
name = "Grendel"
# Bad version
jsonified = {
"id": test_id,
"name": name,
"version": 9001
}
with self.assertRaisesRegexp(TypeError, "has unexpected version"):
Group.from_json(jsonified)
# Missing key "id"
jsonified = {
"name": name,
"version": Group.VERSION
}
with self.assertRaisesRegexp(TypeError, "missing value key 'id'"):
Group.from_json(jsonified)
# Has extra key - should not be a problem
jsonified = {
"id": test_id,
"name": name,
"version": Group.VERSION,
"programmer": "Cale"
}
group = Group.from_json(jsonified)
self.assertNotIn("programmer", group.to_json())
class MockUserPartitionScheme(object):
"""
Mock user partition scheme
"""
def __init__(self, name="mock", current_group=None, **kwargs):
super(MockUserPartitionScheme, self).__init__(**kwargs)
self.name = name
self.current_group = current_group
def get_group_for_user(self, course_id, user, user_partition, assign=True, track_function=None): # pylint: disable=unused-argument
"""
Returns the current group if set, else the first group from the specified user partition.
"""
if self.current_group:
return self.current_group
groups = user_partition.groups
if not groups or len(groups) == 0:
return None
return groups[0]
class PartitionTestCase(TestCase):
"""Base class for test cases that require partitions"""
TEST_ID = 0
TEST_NAME = "Mock Partition"
TEST_DESCRIPTION = "for testing purposes"
TEST_GROUPS = [Group(0, 'Group 1'), Group(1, 'Group 2')]
TEST_SCHEME_NAME = "mock"
def setUp(self):
super(PartitionTestCase, self).setUp()
# Set up two user partition schemes: mock and random
self.non_random_scheme = MockUserPartitionScheme(self.TEST_SCHEME_NAME)
self.random_scheme = MockUserPartitionScheme("random")
extensions = [
Extension(
self.non_random_scheme.name, USER_PARTITION_SCHEME_NAMESPACE, self.non_random_scheme, None
),
Extension(
self.random_scheme.name, USER_PARTITION_SCHEME_NAMESPACE, self.random_scheme, None
),
]
UserPartition.scheme_extensions = ExtensionManager.make_test_instance(
extensions, namespace=USER_PARTITION_SCHEME_NAMESPACE
)
# Create a test partition
self.user_partition = UserPartition(
self.TEST_ID,
self.TEST_NAME,
self.TEST_DESCRIPTION,
self.TEST_GROUPS,
extensions[0].plugin
)
# Make sure the names are set on the schemes (which happens normally in code, but may not happen in tests).
self.user_partition.get_scheme(self.non_random_scheme.name)
self.user_partition.get_scheme(self.random_scheme.name)
class TestUserPartition(PartitionTestCase):
"""Test constructing UserPartitions"""
def test_construct(self):
user_partition = UserPartition(
self.TEST_ID, self.TEST_NAME, self.TEST_DESCRIPTION, self.TEST_GROUPS, MockUserPartitionScheme()
)
self.assertEqual(user_partition.id, self.TEST_ID) # pylint: disable=no-member
self.assertEqual(user_partition.name, self.TEST_NAME)
self.assertEqual(user_partition.description, self.TEST_DESCRIPTION) # pylint: disable=no-member
self.assertEqual(user_partition.groups, self.TEST_GROUPS) # pylint: disable=no-member
self.assertEquals(user_partition.scheme.name, self.TEST_SCHEME_NAME) # pylint: disable=no-member
def test_string_id(self):
user_partition = UserPartition(
"70", self.TEST_NAME, self.TEST_DESCRIPTION, self.TEST_GROUPS
)
self.assertEqual(user_partition.id, 70) # pylint: disable=no-member
def test_to_json(self):
jsonified = self.user_partition.to_json()
act_jsonified = {
"id": self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": self.user_partition.VERSION,
"scheme": self.TEST_SCHEME_NAME
}
self.assertEqual(jsonified, act_jsonified)
def test_from_json(self):
jsonified = {
"id": self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": UserPartition.VERSION,
"scheme": "mock",
}
user_partition = UserPartition.from_json(jsonified)
self.assertEqual(user_partition.id, self.TEST_ID) # pylint: disable=no-member
self.assertEqual(user_partition.name, self.TEST_NAME) # pylint: disable=no-member
self.assertEqual(user_partition.description, self.TEST_DESCRIPTION) # pylint: disable=no-member
for act_group in user_partition.groups: # pylint: disable=no-member
self.assertIn(act_group.id, [0, 1])
exp_group = self.TEST_GROUPS[act_group.id]
self.assertEqual(exp_group.id, act_group.id)
self.assertEqual(exp_group.name, act_group.name)
def test_version_upgrade(self):
# Version 1 partitions did not have a scheme specified
jsonified = {
"id": self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": 1,
}
user_partition = UserPartition.from_json(jsonified)
self.assertEqual(user_partition.scheme.name, "random") # pylint: disable=no-member
def test_from_json_broken(self):
# Missing field
jsonified = {
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": UserPartition.VERSION,
"scheme": self.TEST_SCHEME_NAME,
}
with self.assertRaisesRegexp(TypeError, "missing value key 'id'"):
UserPartition.from_json(jsonified)
# Missing scheme
jsonified = {
'id': self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": UserPartition.VERSION,
}
with self.assertRaisesRegexp(TypeError, "missing value key 'scheme'"):
UserPartition.from_json(jsonified)
# Invalid scheme
jsonified = {
'id': self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": UserPartition.VERSION,
"scheme": "no_such_scheme",
}
with self.assertRaisesRegexp(UserPartitionError, "Unrecognized scheme"):
UserPartition.from_json(jsonified)
# Wrong version (it's over 9000!)
# Wrong version (it's over 9000!)
jsonified = {
'id': self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": 9001,
"scheme": self.TEST_SCHEME_NAME,
}
with self.assertRaisesRegexp(TypeError, "has unexpected version"):
UserPartition.from_json(jsonified)
# Has extra key - should not be a problem
jsonified = {
'id': self.TEST_ID,
"name": self.TEST_NAME,
"description": self.TEST_DESCRIPTION,
"groups": [group.to_json() for group in self.TEST_GROUPS],
"version": UserPartition.VERSION,
"scheme": "mock",
"programmer": "Cale",
}
user_partition = UserPartition.from_json(jsonified)
self.assertNotIn("programmer", user_partition.to_json())
def test_get_group(self):
"""
UserPartition.get_group correctly returns the group referenced by the
`group_id` parameter, or raises NoSuchUserPartitionGroupError when
the lookup fails.
"""
self.assertEqual(
self.user_partition.get_group(self.TEST_GROUPS[0].id), # pylint: disable=no-member
self.TEST_GROUPS[0]
)
self.assertEqual(
self.user_partition.get_group(self.TEST_GROUPS[1].id), # pylint: disable=no-member
self.TEST_GROUPS[1]
)
with self.assertRaises(NoSuchUserPartitionGroupError):
self.user_partition.get_group(3)
class StaticPartitionService(PartitionService):
"""
Mock PartitionService for testing.
"""
def __init__(self, partitions, **kwargs):
super(StaticPartitionService, self).__init__(**kwargs)
self._partitions = partitions
@property
def course_partitions(self):
return self._partitions
class TestPartitionService(PartitionTestCase):
"""
Test getting a user's group out of a partition
"""
def setUp(self):
super(TestPartitionService, self).setUp()
self.course = Mock(id=SlashSeparatedCourseKey('org_0', 'course_0', 'run_0'))
self.partition_service = self._create_service("ma")
def _create_service(self, username, cache=None):
"""Convenience method to generate a StaticPartitionService for a user."""
# Derive a "user_id" from the username, just so we don't have to add an
# extra param to this method. Just has to be unique per user.
user_id = abs(hash(username))
return StaticPartitionService(
[self.user_partition],
user=Mock(
username=username, email='{}@edx.org'.format(username), is_staff=False, is_active=True, id=user_id
),
course_id=self.course.id,
track_function=Mock(),
cache=cache
)
def test_get_user_group_id_for_partition(self):
# assign the first group to be returned
user_partition_id = self.user_partition.id # pylint: disable=no-member
groups = self.user_partition.groups # pylint: disable=no-member
self.user_partition.scheme.current_group = groups[0] # pylint: disable=no-member
# get a group assigned to the user
group1_id = self.partition_service.get_user_group_id_for_partition(user_partition_id)
self.assertEqual(group1_id, groups[0].id) # pylint: disable=no-member
# switch to the second group and verify that it is returned for the user
self.user_partition.scheme.current_group = groups[1] # pylint: disable=no-member
group2_id = self.partition_service.get_user_group_id_for_partition(user_partition_id)
self.assertEqual(group2_id, groups[1].id) # pylint: disable=no-member
def test_caching(self):
username = "psvc_cache_user"
user_partition_id = self.user_partition.id # pylint: disable=no-member
shared_cache = {}
# Two StaticPartitionService objects that share the same cache:
ps_shared_cache_1 = self._create_service(username, shared_cache)
ps_shared_cache_2 = self._create_service(username, shared_cache)
# A StaticPartitionService with its own local cache
ps_diff_cache = self._create_service(username, {})
# A StaticPartitionService that never uses caching.
ps_uncached = self._create_service(username)
# Set the group we expect users to be placed into
first_group = self.user_partition.groups[0]
self.user_partition.scheme.current_group = first_group # pylint: disable=no-member
# Make sure our partition services all return the right thing, but skip
# ps_shared_cache_2 so we can see if its cache got updated anyway.
for part_svc in [ps_shared_cache_1, ps_diff_cache, ps_uncached]:
self.assertEqual(
first_group.id,
part_svc.get_user_group_id_for_partition(user_partition_id)
)
# Now select a new target group
second_group = self.user_partition.groups[1]
self.user_partition.scheme.current_group = second_group
# Both of the shared cache entries should return the old value, even
# ps_shared_cache_2, which was never asked for the value the first time
# Likewise, our separately cached piece should return the original answer
for part_svc in [ps_shared_cache_1, ps_shared_cache_2, ps_diff_cache]:
self.assertEqual(
first_group.id,
part_svc.get_user_group_id_for_partition(user_partition_id)
)
# Our uncached service should be accurate.
self.assertEqual(
second_group.id,
ps_uncached.get_user_group_id_for_partition(user_partition_id)
)
# And a newly created service should see the right thing
ps_new_cache = self._create_service(username, {})
self.assertEqual(
second_group.id,
ps_new_cache.get_user_group_id_for_partition(user_partition_id)
)
def test_get_group(self):
"""
Test that a partition group is assigned to a user.
"""
groups = self.user_partition.groups # pylint: disable=no-member
# assign first group and verify that it is returned for the user
self.user_partition.scheme.current_group = groups[0] # pylint: disable=no-member
group1 = self.partition_service.get_group(self.user_partition)
self.assertEqual(group1, groups[0]) # pylint: disable=no-member
# switch to the second group and verify that it is returned for the user
self.user_partition.scheme.current_group = groups[1] # pylint: disable=no-member
group2 = self.partition_service.get_group(self.user_partition)
self.assertEqual(group2, groups[1]) # pylint: disable=no-member
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.