repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
berserkerbernhard/Lidskjalv | code/networkmonitor/modules/groups/grouplistmenu.py | 1 | 2968 | import os
import time
import dialog
from modules.sitegrouphosttools import SiteGroupHostTools
from modules.groups.group import Group
from modules.groups.groupform import GroupForm
from modules.groups.groupmenu import GroupMenu
from modules.sitegrouphosttools import get_group_members
class GroupListMenu(SiteGroupHostTools):
def __init__(self):
self.d = dialog.Dialog(dialog="dialog")
self.storage_path = os.path.expanduser("~/LidskjalvData")
self.g = Group()
self.gf = GroupForm()
self.gm = GroupMenu()
self.sght = SiteGroupHostTools()
def show_menu(self, site):
while True:
menu = self.build_menu(site)
sz = os.get_terminal_size()
s = "Select a group or action in site '%s'" % site
code, tag = self.d.menu(s,
title="Site: '%s' - Groups menu" % site,
height=sz.lines - 5,
width=sz.columns - 8,
menu_height=sz.lines - 15,
backtitle="Lidskjalv",
choices=menu)
if code == self.d.OK:
r = self.process_menu(site, tag)
if r is None:
break
else:
break
def build_menu(self, site):
sp = self.storage_path
if not self.g.group_exist(site, 'Cisco Switches'):
self.g.create_group(site,
'Cisco Switches',
"",
int(time.time()),
[])
if not self.g.group_exist(site, 'MAC exempt'):
self.g.create_group(site,
'MAC exempt',
"",
int(time.time()),
[])
if not self.g.group_exist(site, 'Nagios'):
self.g.create_group(site,
'Nagios',
"",
int(time.time()),
[])
listofgroups = self.g.list_of_groups_by_name(site)
menu = []
menu.append(["AG", "Add group"])
menu.append(["Q", "Quit"])
menu.append(["", " "])
for group in listofgroups:
memberslist = get_group_members(sp, site, group)
ml = len(memberslist)
gd = self.g.get_group_description(site, group)
d = "%s Member(s) - %s" % (str(ml).rjust(3), gd)
menu.append([group, d])
return menu
def process_menu(self, site, tag):
if tag == "Q":
return None
if tag == "AG":
self.gf.group_form(site, None)
if tag in self.g.list_of_groups(site):
self.gm.show_menu(site, tag)
return True
| gpl-3.0 | -3,861,360,141,004,099,600 | 36.56962 | 76 | 0.453504 | false | 4.332847 | false | false | false |
bielawb/PSConfAsia17-Linux | Scripts/httpsWinRM.py | 1 | 2125 | #!/usr/bin/env python
# coding: utf-8
import getpass
from re import search
from subprocess import Popen, PIPE
from winrm import Session
from sys import exit, argv
if len(argv) < 2 :
exit('Sposób użycia: %s <polecenie>' % argv[0])
polecenie = " ".join(argv[1:])
exitCode = 0
class PowerShellError(Exception):
pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def uruchom_ps(polecenie):
sesja = Session(
'https://jumpbox.monad.net:5986',
auth = (None, None),
transport = 'kerberos',
kerberos_delegation = True,
server_cert_validation = 'ignore'
)
try:
wynik = sesja.run_ps(polecenie)
print wynik.std_out
if wynik.status_code > 0:
raise PowerShellError(wynik.std_err)
else:
print "%sPolecenie zwróciło kod 0 %s" % (bcolors.OKGREEN, bcolors.ENDC)
except:
raise
def zaloguj():
login = "%[email protected]" % getpass.getuser()
kinit = Popen(['kinit', login, '-l', '1h', '-f'], stdin = PIPE, stdout = PIPE, stderr = PIPE)
kinit.stdin.write('%s\n' % getpass.getpass('Podaj hasło: '))
kinit.wait()
try:
uruchom_ps(polecenie)
except PowerShellError as pse:
print "PowerShell zwrócił błąd:\n%s%s%s" % (bcolors.FAIL, pse, bcolors.ENDC)
exitCode = 1
except Exception as e:
print "Wyjątek:\n%s%s%s" % (bcolors.FAIL, e, bcolors.ENDC)
if search('No Kerberos credentials available', e.message):
print "Błąd wskazuje na konieczność zalogowania..."
try:
zaloguj()
uruchom_ps(polecenie)
except Exception as e:
print "%sNie udało się uruchomić polecenia '%s'. Prawdopodobnie podano nieprawidłowe hasło, bądź użytkownik nie ma odpowiednich uprawnień." % (bcolors.FAIL, polecenie)
print "Błąd: %s %s" % (e, bcolors.ENDC)
exitCode = 2
else:
exitCode = 3
finally:
exit(exitCode)
| mit | 7,049,195,507,617,604,000 | 28.577465 | 179 | 0.603333 | false | 2.720207 | false | false | false |
adityagilra/2015_spiking_population_response | ExcInhNetflex.py | 1 | 5096 | # -*- coding: utf-8 -*-
"""
Spiking neural net of LIF/SRM neurons with AI firing
written by Aditya Gilra (c) July 2015.
"""
from brian2 import * # also does 'from pylab import *'
from embedded_consts import *
import random
## Cannot make this network a Class,
## since brian standalone mode wants all Brian objects to be in the same scope.
###### neuronal constants
#nrn_type = 'LIF' # Leaky Integrate-and-Fire
#nrn_type = 'SRM' # Spike Response Model
nrn_type = 'SRM0' # Spike Response Model exact renewal
R = 1.0e8*ohm
tausynE = 100.0*ms # synaptic tau exc->exc
tausyn = 10.0*ms # synaptic tau for all else
tau0 = 20.0*ms # membrane tau
tau0SI = tau0/second
noise = 20.0*mV
uth = 10.0*mV
uth_base = 0.0*mV
refrT = 0.5*ms
###### network constants
C = 100 # Number of incoming connections on each neuron (exc or inh)
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
excC = int(fC*C) # number of exc incoming connections
if nrn_type == "LIF":
I0base = 10.5*mV/R # base current to all neurons at all times
J = 0.8*mV/R*(10*ms/tausynE)
else:
I0base = 0.0*mV/R # base current to all neurons at all times
J = 0.8*mV/R*(10*ms/tausynE)
# exc strength is J (/R as we multiply by R in eqn)
# Critical J (for LIF network with delta synapses) is
# ~ 0.45e-3 V in paper for N = 10000, C = 1000
# Note individual rate fluctuations
# for J = 0.2e-3 V vs J = 0.8e-3 V
# For SRM/SRM0, synaptic filtering but no u integration
# In Ostojic 2014 / Brunel 2000, u integration,
# but no synaptic filtering.
# Both are equivalent if tausyn and membrane tau are same.
# But LIF with synaptic filtering is different
g = 5.0*tausynE/tausyn # if all exc syns have tausynE
#g = 5.0*(tausynE/tausyn)**2 # if only exc->exc syns have tausynE, but exc->inh is tausyn
# -gJ is the inh strength. For exc-inh balance g >~ f(1-f)=4
# a tausynE/tausyn factor is also needed to compensate tau-s
# ###########################################
# Brian network creation
# ###########################################
# reset eta acts as a threshold increase
if nrn_type == "LIF": # LIF
model_eqns = """
du/dt = 1/tau0*(-u + (Ibase + KE + K) * R + deltaItimed( t, i )) : volt
Ibase : amp
dKE/dt = -KE/tausynE : amp
dK/dt = -K/tausyn : amp
"""
threshold_eqns = "u>=uth"
reset_eqns = "u=0*mV"
else: # SRM
model_eqns = """
u = (Ibase + KE + K) * R + deltaItimed( t, i ): volt
Ibase : amp
deta/dt = -eta/tau0 : volt
dKE/dt = -KE/tausynE : amp
dK/dt = -K/tausyn : amp
"""
threshold_eqns = "rand()<=1.0/tau0*exp((u-(eta+uth_base))/noise)*tstep"
if nrn_type == "SRM0": # SRM0 (exact renewal process)
reset_eqns = "eta=uth"
else: # usual SRM (approx as quasi-renewal process)
reset_eqns = "eta+=uth"
# the hazard function rho is the firing rate,
# in time dt the probability to fire is rho*dt.
# noise below is only the output noise,
# input spiking noise comes from spiking during the simulation
Nrns = NeuronGroup(Nbig, model_eqns, \
threshold=threshold_eqns,\
reset=reset_eqns,
refractory = refrT)
Nrns.Ibase = I0base # constant input to all inputs
# there is also transient input above
if nrn_type == 'LIF':
Nrns.u = uniform(0.0,uth/volt,size=Nbig)*volt
# for LIF, u is distibuted
else:
Nrns.eta = uth # initially, all SRM neurons are as if just reset
# brain2 code to make, connect and weight the background synapses
con = Synapses(Nrns,Nrns,'''w : amp
useSynE : 1''',\
pre='KE += useSynE*w; K += (1-useSynE)*w')
## Connections from some Exc/Inh neurons to each neuron
random.seed(100) # set seed for reproducibility of simulations
seed(100)
conn_i = []
conn_j = []
for jidx in range(0,Nbig):
## draw excC number of neuron indices out of NmaxExc neurons
preIdxsE = random.sample(range(NEbig),excC)
## draw inhC=C-excC number of neuron indices out of inhibitory neurons
preIdxsI = random.sample(range(NEbig,Nbig),C-excC)
## connect these presynaptically to i-th post-synaptic neuron
## choose the synapses object based on whether post-syn nrn is exc or inh
conn_i += preIdxsE
conn_j += [jidx]*excC
conn_i += preIdxsI
conn_j += [jidx]*(C-excC)
con.connect(conn_i,conn_j)
con.delay = syndelay
con.useSynE['i<NEbig'] = 1.0
con.w['i<NEbig'] = J
con.w['i>=NEbig'] = -g*J
#con.w = -g*J # kind of winner take all, gives switching
| gpl-3.0 | 5,565,042,228,597,522,000 | 39.768 | 89 | 0.562991 | false | 3.132145 | false | false | false |
HazenBabcock/brigl | test/driver.py | 1 | 2249 | #!/usr/bin/env python
"""
This returns an automated web browser to use for automated testing. It also
includes some utility functions.
https://www.seleniumhq.org/
http://selenium-python.readthedocs.io/
"""
import time
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# Web browser interface.
def getDriver():
"""
This is configured to use Chrome, change as desired.
"""
desired = DesiredCapabilities.CHROME
desired['loggingPrefs'] = {'browser' : 'ALL'}
options = webdriver.ChromeOptions()
options.add_argument("--window-size=1000,1000")
driver = webdriver.Chrome(executable_path="./chromedriver",
desired_capabilities = desired,
chrome_options = options)
return driver
# Utility functions.
class BRIGLTestException(Exception):
pass
def noSevereErrors(driver, ignore_404 = []):
"""
ignore_404 - A list of files for which it is okay if they are missing.
"""
ignore_404.append("favicon.ico")
log_data = driver.get_log('browser')
severe_errors = parseLog(log_data)
if (len(severe_errors) > 0):
is_severe = False
for s_err in severe_errors:
is_ignored = False
for i_404 in ignore_404:
if (i_404 in s_err['message']):
is_ignored = True
break
if not is_ignored:
is_severe = True
break
if is_severe:
print("Severe error(s) detected:")
for elt in severe_errors:
print(elt)
raise BRIGLTestException("Severe error(s) detected.")
def parseLog(log_data, level = 'SEVERE'):
"""
Return only those messages with the specified level.
"""
temp = []
for elt in log_data:
if (elt['level'] == level):
temp.append(elt)
return temp
def pprintLog(log_data):
"""
Pretty print log messages.
"""
for elt in log_data:
print(elt)
if (__name__ == "__main__"):
driver = getDriver()
print("version is", driver.capabilities['version'])
driver.close()
| gpl-3.0 | -7,560,753,491,243,775,000 | 24.850575 | 80 | 0.582926 | false | 4.044964 | false | false | false |
ctogle/dilapidator | test/geometry/quat_tests.py | 1 | 6809 | from dilap.geometry.quat import quat
from dilap.geometry.vec3 import vec3
import dilap.geometry.tools as dpr
import matplotlib.pyplot as plt
import unittest,numpy,math
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
class test_quat(unittest.TestCase):
def test_av(self):
a = 3*dpr.PI4
u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1)
q1,q2 = quat(0,0,0,0).av(a,u1),quat(0,0,0,0).av(a,u2)
q3,q4 = quat(0,0,0,0).av(-a,u3),quat(0,0,0,0).av(-a,u2)
self.assertTrue(q1.w > 0.1)
self.assertTrue(q1.x > 0.1)
self.assertTrue(dpr.isnear(q1.y,0))
self.assertTrue(dpr.isnear(q1.z,0))
self.assertTrue(q2.w > 0.1)
self.assertTrue(dpr.isnear(q2.x,0))
self.assertTrue(q2.y < -0.1)
self.assertTrue(dpr.isnear(q2.z,0))
self.assertTrue(q3.w > 0.1)
self.assertTrue(dpr.isnear(q3.x,0))
self.assertTrue(dpr.isnear(q3.y,0))
self.assertTrue(q3.z < -0.1)
self.assertFalse(q2 == q4.cp().flp())
self.assertTrue(q2 == q4.cnj())
def test_uu(self):
u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1)
q1,q2 = quat(0,0,0,0).uu(u1,u2),quat(0,0,0,0).uu(u1,u3)
q3,q4 = quat(0,0,0,0).uu(u2,u3),quat(0,0,0,0).uu(u3,u2)
self.assertTrue(q1.w > 0.1)
self.assertTrue(dpr.isnear(q1.x,0))
self.assertTrue(dpr.isnear(q1.y,0))
self.assertTrue(q1.z < -0.1)
self.assertTrue(q2.w > 0.1)
self.assertTrue(dpr.isnear(q2.x,0))
self.assertTrue(q2.y < -0.1)
self.assertTrue(dpr.isnear(q2.z,0))
self.assertTrue(q3 == q4.cnj())
def test_toxy(self):
q1 = quat(0,0,0,0).toxy(vec3(0,0,-1))
#print('toxy\v\t',q1)
self.assertEqual(q1.w,0)
self.assertEqual(q1.x,1)
def test_cp(self):
q1 = quat(1,2,3,4)
self.assertTrue(q1 is q1)
self.assertFalse(q1 is q1.cp())
self.assertTrue(q1 == q1.cp())
#def test_cpf(self):
def test_isnear(self):
q1,q2 = quat(1,1,1,0),quat(1,1,1,0.1)
q3,q4 = quat(1,1,1,1),quat(1,1.000001,1,1)
self.assertEqual(q1.isnear(q1),1)
self.assertEqual(q3.isnear(q3),1)
self.assertEqual(q1.isnear(q2),0)
self.assertEqual(q2.isnear(q1),0)
self.assertEqual(q1.isnear(q3),0)
self.assertEqual(q2.isnear(q3),0)
self.assertEqual(q2.isnear(q4),0)
self.assertEqual(q3.isnear(q4),1)
def test_mag2(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.mag2(),1),1)
self.assertEqual(dpr.isnear(q2.mag2(),3),1)
self.assertEqual(dpr.isnear(q3.mag2(),150),1)
def test_mag(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.mag(),1),1)
self.assertEqual(dpr.isnear(q2.mag(),math.sqrt(3)),1)
self.assertEqual(dpr.isnear(q3.mag(),math.sqrt(150)),1)
def test_nrm(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.cp().nrm().mag(),1),1)
self.assertEqual(dpr.isnear(q2.cp().nrm().mag(),1),1)
self.assertEqual(dpr.isnear(q3.cp().nrm().mag(),1),1)
self.assertTrue(q1.cp().nrm().mag() == q1.mag())
self.assertTrue(q1.nrm() is q1)
self.assertFalse(q2.cp().nrm().mag() == q2.mag())
self.assertTrue(q2.nrm() is q2)
self.assertFalse(q3.cp().nrm().mag() == q3.mag())
self.assertTrue(q3.nrm() is q3)
def test_flp(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(0,2,5,11),quat(-1,1,1,0)
self.assertFalse(q1.cp().flp() == q1)
self.assertFalse(q2.cp().flp() == q2)
self.assertTrue(q3.cp().flp() == q3)
self.assertFalse(q4.cp().flp() == q4)
self.assertTrue(q2.cp().flp() == q4)
self.assertTrue(q1.flp() is q1)
self.assertTrue(q2.flp() is q2)
self.assertTrue(q3.flp() is q3)
self.assertTrue(q4.flp() is q4)
def test_uscl(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(0,2,5,11),quat(0,1,2.5,5.5)
self.assertTrue(q1.cp().uscl(1) == q1)
self.assertFalse(q1.cp().uscl(3) == q1)
self.assertTrue(q2.cp().uscl(1) == q2)
self.assertFalse(q2.cp().uscl(3) == q2)
self.assertTrue(q3.cp().uscl(0.5) == q4)
self.assertTrue(q1.uscl(1) is q1)
def test_cnj(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(-1,2,5,11),quat(1,-2,-5,-11)
self.assertTrue(q1.cp().cnj() == q1)
self.assertTrue(q1.cnj() is q1)
self.assertFalse(q2.cp().cnj() == q2)
self.assertFalse(q3.cnj() == q4)
def test_inv(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.threePI4,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v2)
self.assertEqual(q1.cp().cnj(),q1.inv())
self.assertEqual(q2.cp().cnj(),q2.inv())
self.assertFalse(q1.inv() is q1)
def test_add(self):
q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5)
q3 = quat(1.5,1.4,-0.2,2.5)
self.assertEqual(q1.add(q2),q3)
self.assertFalse(q1.add(q2) is q1)
def test_sub(self):
q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5)
q3 = quat(-0.5,-0.8,-4.2,3.5)
self.assertEqual(q1.sub(q2),q3)
self.assertFalse(q1.sub(q2) is q1)
def test_mul(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.threePI4,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v1)
q3 = quat(0,1,0,0).av(a1+a2,v2)
self.assertTrue(q1.mul(q2) == q3)
self.assertFalse(q1.mul(q2) is q1)
def test_rot(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI2,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
self.assertTrue(q1.rot(q2) == q3)
self.assertTrue(q1.rot(q2) is q1)
#def test_rotps(self):
def test_dot(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI2,vec3(0,1,0)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
q4 = quat(0,1,0,0).av(0,v1)
self.assertTrue(dpr.isnear(q1.dot(q2),q1.mag2()))
self.assertFalse(dpr.isnear(q1.dot(q3),0))
self.assertTrue(dpr.isnear(q3.dot(q4),q3.w))
def test_slerp(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(0,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
self.assertEqual(q1.slerp(q3,0.25),q2)
self.assertFalse(q1.slerp(q3,0.25) is q1)
if __name__ == '__main__':
unittest.main()
| mit | -8,675,543,976,464,371,000 | 33.21608 | 63 | 0.545455 | false | 2.185875 | true | false | false |
MrSwiss/SpockBot | spock/plugins/core/auth.py | 1 | 5050 | """
Provides authorization functions for Mojang's login and session servers
"""
import hashlib
import json
# This is for python2 compatibility
try:
import urllib.request as request
from urllib.error import URLError
except ImportError:
import urllib2 as request
from urllib2 import URLError
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from spock.mcp import yggdrasil
from spock.plugins.base import PluginBase
from spock.utils import pl_announce
logger = logging.getLogger('spock')
backend = default_backend()
# This function courtesy of barneygale
def java_hex_digest(digest):
d = int(digest.hexdigest(), 16)
if d >> 39 * 4 & 0x8:
d = "-%x" % ((-d) & (2 ** (40 * 4) - 1))
else:
d = "%x" % d
return d
class AuthCore(object):
def __init__(self, authenticated, event):
self.event = event
self.authenticated = authenticated
self.username = None
self.selected_profile = None
self.shared_secret = None
self.ygg = yggdrasil.YggAuth()
def start_session(self, username, password=''):
rep = {}
if self.authenticated:
logger.info("AUTHCORE: Attempting login with username: %s",
username)
rep = self.ygg.authenticate(username, password)
if rep is None or 'error' in rep:
logger.error('AUTHCORE: Login Unsuccessful, Response: %s', rep)
self.event.emit('AUTH_ERR')
return rep
if 'selectedProfile' in rep:
self.selected_profile = rep['selectedProfile']
self.username = rep['selectedProfile']['name']
logger.info("AUTHCORE: Logged in as: %s", self.username)
logger.info("AUTHCORE: Selected Profile: %s",
self.selected_profile)
else:
self.username = username
else:
self.username = username
return rep
def gen_shared_secret(self):
self.shared_secret = os.urandom(16)
return self.shared_secret
@pl_announce('Auth')
class AuthPlugin(PluginBase):
requires = ('Event', 'Net')
defaults = {
'authenticated': True,
'auth_quit': True,
'sess_quit': True,
}
events = {
'AUTH_ERR': 'handle_auth_error',
'SESS_ERR': 'handle_session_error',
'LOGIN<Encryption Request': 'handle_encryption_request',
}
def __init__(self, ploader, settings):
super(AuthPlugin, self).__init__(ploader, settings)
self.authenticated = self.settings['authenticated']
self.auth_quit = self.settings['auth_quit']
self.sess_quit = self.settings['sess_quit']
self.auth = AuthCore(self.authenticated, self.event)
self.auth.gen_shared_secret()
ploader.provides('Auth', self.auth)
def handle_auth_error(self, name, data):
if self.auth_quit:
self.event.kill()
def handle_session_error(self, name, data):
if self.sess_quit:
self.event.kill()
# Encryption Key Request - Request for client to start encryption
def handle_encryption_request(self, name, packet):
pubkey_raw = packet.data['public_key']
if self.authenticated:
serverid = java_hex_digest(hashlib.sha1(
packet.data['server_id'].encode('ascii') +
self.auth.shared_secret +
pubkey_raw
))
logger.info(
"AUTHPLUGIN: Attempting to authenticate session with "
"sessionserver.mojang.com")
url = "https://sessionserver.mojang.com/session/minecraft/join"
data = json.dumps({
'accessToken': self.auth.ygg.access_token,
'selectedProfile': self.auth.selected_profile,
'serverId': serverid,
}).encode('utf-8')
headers = {'Content-Type': 'application/json'}
req = request.Request(url, data, headers)
try:
rep = request.urlopen(req).read().decode('ascii')
except URLError:
rep = 'Couldn\'t connect to sessionserver.mojang.com'
if rep != "":
logger.warning("AUTHPLUGIN: %s", rep)
self.event.emit('SESS_ERR')
else:
logger.info("AUTHPLUGIN: Session authentication successful")
pubkey = serialization.load_der_public_key(pubkey_raw, backend)
def encrypt(data):
return pubkey.encrypt(data, padding.PKCS1v15())
self.net.push_packet(
'LOGIN>Encryption Response',
{
'shared_secret': encrypt(self.auth.shared_secret),
'verify_token': encrypt(packet.data['verify_token']),
}
)
self.net.enable_crypto(self.auth.shared_secret)
| mit | -2,816,174,595,662,378,500 | 33.121622 | 79 | 0.588515 | false | 4.129191 | false | false | false |
britcey/ansible | lib/ansible/modules/network/junos/junos_config.py | 1 | 12228 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on devices running Juniper JUNOS
description:
- This module provides an implementation for working with the active
configuration running on Juniper JUNOS devices. It provides a set
of arguments for loading configuration, performing rollback operations
and zeroing the active configuration on the device.
extends_documentation_fragment: junos
options:
lines:
description:
- This argument takes a list of C(set) or C(delete) configuration
lines to push into the remote device. Each line must start with
either C(set) or C(delete). This argument is mutually exclusive
with the I(src) argument.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) argument.
required: false
default: null
version_added: "2.2"
src_format:
description:
- The I(src_format) argument specifies the format of the configuration
found int I(src). If the I(src_format) argument is not provided,
the module will attempt to determine the format of the configuration
file specified in I(src).
required: false
default: null
choices: ['xml', 'set', 'text', 'json']
version_added: "2.2"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
required: false
default: null
zeroize:
description:
- The C(zeroize) argument is used to completely sanitize the
remote device configuration back to initial defaults. This
argument will effectively remove all current configuration
statements on the remote device.
required: false
default: null
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_config
replace:
description:
- The C(replace) argument will instruct the remote device to
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuration loaded
from this module.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the I(update) argument to C(replace). This argument
will be removed in a future release. The C(replace) and C(update) argument
is mutually exclusive.
required: false
choices: ['yes', 'no']
default: false
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
update:
description:
- This argument will decide how to load the configuration
data particulary when the candidate configuration and loaded
configuration contain conflicting statements. Following are
accepted values.
C(merge) combines the data in the loaded configuration with the
candidate configuration. If statements in the loaded configuration
conflict with statements in the candidate configuration, the loaded
statements replace the candidate ones.
C(override) discards the entire candidate configuration and replaces
it with the loaded configuration.
C(replace) substitutes each hierarchy level in the loaded configuration
for the corresponding level.
required: false
default: merge
choices: ['merge', 'override', 'replace']
version_added: "2.3"
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Loading JSON-formatted configuration I(json) is supported
starting in Junos OS Release 16.1 onwards.
"""
EXAMPLES = """
- name: load configure file into device
junos_config:
src: srx.cfg
comment: update config
provider: "{{ netconf }}"
- name: load configure lines into device
junos_config:
lines:
- set interfaces ge-0/0/1 unit 0 description "Test interface"
- set vlans vlan01 description "Test vlan"
comment: update config
provider: "{{ netconf }}"
- name: rollback the configuration to id 10
junos_config:
rollback: 10
provider: "{{ netconf }}"
- name: zero out the current configuration
junos_config:
zeroize: yes
provider: "{{ netconf }}"
- name: confirm a previous commit
junos_config:
provider: "{{ netconf }}"
"""
RETURN = """
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
"""
import re
import json
import sys
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import get_diff, load_config, get_configuration
from ansible.module_utils.junos import junos_argument_spec
from ansible.module_utils.junos import check_args as junos_check_args
from ansible.module_utils.netconf import send_request
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text, to_native
if sys.version_info < (2, 7):
from xml.parsers.expat import ExpatError
ParseError = ExpatError
else:
ParseError = ElementTree.ParseError
USE_PERSISTENT_CONNECTION = True
DEFAULT_COMMENT = 'configured by junos_config'
def check_args(module, warnings):
junos_check_args(module, warnings)
if module.params['replace'] is not None:
module.fail_json(msg='argument replace is deprecated, use update')
zeroize = lambda x: send_request(x, ElementTree.Element('request-system-zeroize'))
rollback = lambda x: get_diff(x)
def guess_format(config):
try:
json.loads(config)
return 'json'
except ValueError:
pass
try:
ElementTree.fromstring(config)
return 'xml'
except ParseError:
pass
if config.startswith('set') or config.startswith('delete'):
return 'set'
return 'text'
def filter_delete_statements(module, candidate):
reply = get_configuration(module, format='set')
match = reply.find('.//configuration-set')
if match is None:
# Could not find configuration-set in reply, perhaps device does not support it?
return candidate
config = to_native(match.text, encoding='latin1')
modified_candidate = candidate[:]
for index, line in reversed(list(enumerate(candidate))):
if line.startswith('delete'):
newline = re.sub('^delete', 'set', line)
if newline not in config:
del modified_candidate[index]
return modified_candidate
def configure_device(module, warnings):
candidate = module.params['lines'] or module.params['src']
kwargs = {
'comment': module.params['comment'],
'commit': not module.check_mode
}
if module.params['confirm'] > 0:
kwargs.update({
'confirm': True,
'confirm_timeout': module.params['confirm']
})
config_format = None
if module.params['src']:
config_format = module.params['src_format'] or guess_format(str(candidate))
if config_format == 'set':
kwargs.update({'format': 'text', 'action': 'set'})
else:
kwargs.update({'format': config_format, 'action': module.params['update']})
if isinstance(candidate, string_types):
candidate = candidate.split('\n')
# this is done to filter out `delete ...` statements which map to
# nothing in the config as that will cause an exception to be raised
if any((module.params['lines'], config_format == 'set')):
candidate = filter_delete_statements(module, candidate)
kwargs['format'] = 'text'
kwargs['action'] = 'set'
return load_config(module, candidate, warnings, **kwargs)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
lines=dict(type='list'),
src=dict(type='path'),
src_format=dict(choices=['xml', 'text', 'set', 'json']),
# update operations
update=dict(default='merge', choices=['merge', 'override', 'replace', 'update']),
# deprecated replace in Ansible 2.3
replace=dict(type='bool'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
# config operations
backup=dict(type='bool', default=False),
rollback=dict(type='int'),
zeroize=dict(default=False, type='bool'),
)
argument_spec.update(junos_argument_spec)
mutually_exclusive = [('lines', 'src', 'rollback', 'zeroize')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
if module.params['backup']:
for conf_format in ['set', 'text']:
reply = get_configuration(module, format=conf_format)
match = reply.find('.//configuration-%s' % conf_format)
if match is not None:
break
else:
module.fail_json(msg='unable to retrieve device configuration')
result['__backup__'] = match.text.strip()
if module.params['rollback']:
if not module.check_mode:
diff = rollback(module)
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
elif module.params['zeroize']:
if not module.check_mode:
zeroize(module)
result['changed'] = True
else:
diff = configure_device(module, warnings)
if diff:
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,767,133,261,227,598,000 | 32.966667 | 89 | 0.66454 | false | 4.343872 | true | false | false |
Alwnikrotikz/paimei | console/modules/_PAIMEIdiff/DiffModules/crc.py | 1 | 2243 | #
# $Id$
#
from defines import *
class crc:
def __init__(self, parent=None):
self.attributes = {} # initialize attributes
self.attributes["Match"] = 1 # Match attribute set to 1 tells the main program we can be used to match
self.attributes["Diff"] = 1 # Diff attribute set to 1 tells the main program we can be used to diff
self.attributes["Level"] = FUNCTION_LEVEL | BASIC_BLOCK_LEVEL # these flags indicated we can diff/match both functions and basic blocks
self.parent = parent # set up the parent
self.module_name = "CRC" # give the module a name
self.author = "Peter Silberman" # author name
self.description = "CRC module uses the crc signature"
self.date = "09/22/06"
self.homepage = "http://www.openrce.org"
self.contact = "[email protected]"
self.accuracy = ACCURACY_HIGH
self.parent.register_match_function( self.match_function_by_crc, self ) # register a function matching routine
self.parent.register_match_basic_block( self.match_basic_block_by_crc, self ) # register a basic block matching routine
self.parent.register_diff_function( self.diff_function_by_crc, self ) # register a function diffing routine
self.parent.register_module(self) # register our module in the module table
def match_function_by_crc(self, function_a, function_b):
if function_a.ext["PAIMEIDiffFunction"].crc == function_b.ext["PAIMEIDiffFunction"].crc:
return 1
else:
return 0
def match_basic_block_by_crc(self, bb_a, bb_b):
if bb_a.ext["PAIMEIDiffBasicBlock"].crc == bb_b.ext["PAIMEIDiffBasicBlock"].crc:
return 1
else:
return 0
def diff_function_by_crc(self, function_a, function_b):
if function_a.ext["PAIMEIDiffFunction"].crc != function_b.ext["PAIMEIDiffFunction"].crc:
return 0
else:
return 0
| gpl-2.0 | -5,157,040,256,780,206,000 | 46.76087 | 145 | 0.57111 | false | 4.07078 | false | false | false |
legoktm/legobot-old | toolserver/pywp/timedate.py | 1 | 2209 | #!usr/bin/python
# (C) Legoktm 2008-2011, MIT License
import time, datetime
"""
Not to be run as a file
Contains lists and dictionaries to help with dates
Only for English Language, however translations are welcome.
"""
MonthNames = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ]
def monthname(number):
"""
Returns the month name
for the given integer.
"""
return MonthNames[int(number)-1]
days_in_month = {
1: 31,
2: 29,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
num_to_month = {
1:'January',
2:'February',
3:'March',
4:'April',
5:'May',
6:'June',
7:'July',
8:'August',
9:'September',
10:'October',
11:'November',
12:'December',
}
month_to_num = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12,
}
def daysinmonth(var):
"""
Returns the number of days in a month.
var = month name or number
"""
try:
int(var)
num = True
except ValueError:
num = False
if num:
return days_in_month[int(var)]
number = month_to_num[var]
return days_in_month[number]
def currtime():
"""
Returns a time.time() object
"""
return time.time()
def currentmonth():
"""
Returns the integer of the current month.
To get the current month name, use monthname(currentmonth())
"""
return time.gmtime(currtime()).tm_mon
def currentyear():
return time.gmtime(currtime()).tm_year
def numwithzero(num):
"""
Returns a str where their is a
'0' in front of a number
"""
num = int(num)
if num >= 10:
return str(num)
else:
return '0%' + str(num)
def monthname(num):
"""
Returns the name of the month based on the integer.
"""
return num_to_month[int(num)]
def convertts(ts):
"""
Converts MediaWiki timestamps (ISO 8601)
to a human readable one.
"""
epochts = int(time.mktime(time.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')))
st = time.gmtime(epochts)
year = str(st.tm_year)
hour = str(st.tm_hour)
min = str(st.tm_min)
monthname1 = monthname(st.tm_mon)
day = str(st.tm_mday)
return '%s:%s, %s %s %s' %(hour, min, day, monthname1, year)
| mit | 8,878,393,515,859,586,000 | 17.720339 | 139 | 0.634224 | false | 2.44629 | false | false | false |
vmuriart/grako | grako/contexts.py | 1 | 22161 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import functools
from collections import namedtuple
from contextlib import contextmanager
from grako.util import notnone, ustr, prune_dict, is_list, info, safe_name
from grako.ast import AST
from grako import buffering
from grako import color
from grako.exceptions import (
FailedCut,
FailedLeftRecursion,
FailedLookahead,
FailedParse,
FailedPattern,
FailedSemantics,
FailedKeywordSemantics,
FailedToken,
OptionSucceeded
)
__all__ = ['ParseInfo', 'ParseContext']
ParseInfo = namedtuple(
'ParseInfo',
[
'buffer',
'rule',
'pos',
'endpos'
]
)
# decorator for rule implementation methods
def graken(*params, **kwparams):
def decorator(rule):
@functools.wraps(rule)
def wrapper(self):
name = rule.__name__
# remove the single leading and trailing underscore
# that the parser generator added
name = name[1:-1]
return self._call(rule, name, params, kwparams)
return wrapper
return decorator
class Closure(list):
pass
class ParseContext(object):
def __init__(self,
semantics=None,
parseinfo=False,
trace=False,
encoding='utf-8',
comments_re=None,
eol_comments_re=None,
whitespace=None,
ignorecase=False,
nameguard=None,
memoize_lookaheads=True,
left_recursion=True,
trace_length=72,
trace_separator=':',
trace_filename=False,
colorize=False,
keywords=None,
namechars='',
**kwargs):
super(ParseContext, self).__init__()
self._buffer = None
self.semantics = semantics
self.encoding = encoding
self.parseinfo = parseinfo
self.trace = trace
self.trace_length = trace_length
self.trace_separator = trace_separator
self.trace_filename = trace_filename
self.comments_re = comments_re
self.eol_comments_re = eol_comments_re
self.whitespace = whitespace
self.ignorecase = ignorecase
self.nameguard = nameguard
self.memoize_lookaheads = memoize_lookaheads
self.left_recursion = left_recursion
self.namechars = namechars
self._ast_stack = [AST()]
self._concrete_stack = [None]
self._rule_stack = []
self._cut_stack = [False]
self._memoization_cache = dict()
self._last_node = None
self._state = None
self._lookahead = 0
self._recursive_results = dict()
self._recursive_eval = []
self._recursive_head = []
self.colorize = colorize
self.keywords = set(keywords or [])
def _reset(self,
text=None,
filename=None,
semantics=None,
trace=None,
comments_re=None,
eol_comments_re=None,
whitespace=None,
ignorecase=None,
nameguard=None,
memoize_lookaheads=None,
left_recursion=None,
colorize=False,
namechars='',
**kwargs):
if ignorecase is None:
ignorecase = self.ignorecase
if nameguard is None:
nameguard = self.nameguard
if memoize_lookaheads is not None:
self.memoize_lookaheads = memoize_lookaheads
if left_recursion is not None:
self.left_recursion = left_recursion
if trace is not None:
self.trace = trace
if semantics is not None:
self.semantics = semantics
if colorize is not None:
self.colorize = colorize
if self.colorize:
color.init()
if isinstance(text, buffering.Buffer):
buffer = text
else:
buffer = buffering.Buffer(
text,
filename=filename,
comments_re=comments_re or self.comments_re,
eol_comments_re=eol_comments_re or self.eol_comments_re,
whitespace=notnone(whitespace, default=self.whitespace),
ignorecase=ignorecase,
nameguard=nameguard,
namechars=namechars or self.namechars,
**kwargs)
self._buffer = buffer
self._ast_stack = [AST()]
self._concrete_stack = [None]
self._rule_stack = []
self._cut_stack = [False]
self._memoization_cache = dict()
self._last_node = None
self._state = None
self._lookahead = 0
self._recursive_results = dict()
self._recursive_eval = []
self._recursive_head = []
def parse(self,
text,
rule_name='start',
filename=None,
semantics=None,
trace=False,
whitespace=None,
**kwargs):
try:
self.parseinfo = kwargs.pop('parseinfo', self.parseinfo)
self._reset(
text=text,
filename=filename,
semantics=semantics,
trace=trace or self.trace,
whitespace=whitespace if whitespace is not None else self.whitespace,
**kwargs
)
rule = self._find_rule(rule_name)
result = rule()
self.ast[rule_name] = result
return result
except FailedCut as e:
raise e.nested
finally:
self._clear_cache()
def goto(self, pos):
self._buffer.goto(pos)
@property
def last_node(self):
return self._last_node
@last_node.setter
def last_node(self, value):
self._last_node = value
@property
def _pos(self):
return self._buffer.pos
def _clear_cache(self):
self._memoization_cache = dict()
self._recursive_results = dict()
def _goto(self, pos):
self._buffer.goto(pos)
def _next_token(self):
self._buffer.next_token()
@property
def ast(self):
return self._ast_stack[-1]
@ast.setter
def ast(self, value):
self._ast_stack[-1] = value
def name_last_node(self, name):
self.ast[name] = self.last_node
def add_last_node_to_name(self, name):
self.ast.setlist(name, self.last_node)
def _push_ast(self):
self._push_cst()
self._ast_stack.append(AST())
def _pop_ast(self):
self._pop_cst()
return self._ast_stack.pop()
@property
def cst(self):
return self._concrete_stack[-1]
@cst.setter
def cst(self, value):
self._concrete_stack[-1] = value
def _push_cst(self):
self._concrete_stack.append(None)
def _pop_cst(self):
return self._concrete_stack.pop()
def _add_cst_node(self, node):
if node is None:
return
previous = self.cst
if previous is None:
self.cst = self._copy_node(node)
elif is_list(previous):
previous.append(node)
else:
self.cst = [previous, node]
def _extend_cst(self, node):
if node is None:
return
previous = self.cst
if previous is None:
self.cst = self._copy_node(node)
elif is_list(node):
if is_list(previous):
previous.extend(node)
else:
self.cst = [previous] + node
elif is_list(previous):
previous.append(node)
else:
self.cst = [previous, node]
def _copy_node(self, node):
if node is None:
return None
elif is_list(node):
return node[:]
else:
return node
def _is_cut_set(self):
return self._cut_stack[-1]
def _cut(self):
self._cut_stack[-1] = True
# Kota Mizushima et al say that we can throw away
# memos for previous positions in the buffer under
# certain circumstances, without affecting the linearity
# of PEG parsing.
# http://goo.gl/VaGpj
#
# We adopt the heuristic of always dropping the cache for
# positions less than the current cut position. It remains to
# be proven if doing it this way affects linearity. Empirically,
# it hasn't.
cutpos = self._pos
def prune_cache(cache):
prune_dict(cache, lambda k, _: k[0] < cutpos)
prune_cache(self._memoization_cache)
prune_cache(self._recursive_results)
def _push_cut(self):
self._cut_stack.append(False)
def _pop_cut(self):
return self._cut_stack.pop()
def _enter_lookahead(self):
self._lookahead += 1
def _leave_lookahead(self):
self._lookahead -= 1
def _memoization(self):
return self.memoize_lookaheads or self._lookahead == 0
def _rulestack(self):
stack = self.trace_separator.join(self._rule_stack)
if len(stack) > self.trace_length:
stack = '...' + stack[-self.trace_length:].lstrip(self.trace_separator)
return stack
def _find_rule(self, name):
return None
def _find_semantic_rule(self, name):
if self.semantics is None:
return None, None
postproc = getattr(self.semantics, '_postproc', None)
if not callable(postproc):
postproc = None
rule = getattr(self.semantics, safe_name(name), None)
if callable(rule):
return rule, postproc
rule = getattr(self.semantics, '_default', None)
if callable(rule):
return rule, postproc
return None, postproc
def _trace(self, msg, *params):
if self.trace:
msg = msg % params
info(ustr(msg), file=sys.stderr)
def _trace_event(self, event):
if self.trace:
fname = ''
if self.trace_filename:
fname = self._buffer.line_info().filename + '\n'
self._trace('%s \n%s%s \n',
event + ' ' + self._rulestack(),
color.Style.DIM + fname,
color.Style.NORMAL + self._buffer.lookahead().rstrip('\r\n')
)
def _trace_match(self, token, name=None, failed=False):
if self.trace:
fname = ''
if self.trace_filename:
fname = self._buffer.line_info().filename + '\n'
name = '/%s/' % name if name else ''
fgcolor = color.Fore.GREEN + '< 'if not failed else color.Fore.RED + '! '
self._trace(
color.Style.BRIGHT + fgcolor + '"%s" %s\n%s%s\n',
token,
name,
color.Style.DIM + fname,
color.Style.NORMAL + self._buffer.lookahead().rstrip('\r\n')
)
def _error(self, item, etype=FailedParse):
raise etype(
self._buffer,
list(reversed(self._rule_stack[:])),
item
)
def _fail(self):
self._error('fail')
def _get_parseinfo(self, node, name, start):
return ParseInfo(
self._buffer,
name,
start,
self._pos
)
def _call(self, rule, name, params, kwparams):
self._rule_stack.append(name)
pos = self._pos
try:
self._trace_event(color.Fore.YELLOW + color.Style.BRIGHT + '>')
self._last_node = None
node, newpos, newstate = self._invoke_rule(rule, name, params, kwparams)
self._goto(newpos)
self._state = newstate
self._trace_event(color.Fore.GREEN + color.Style.BRIGHT + '<')
self._add_cst_node(node)
self._last_node = node
return node
except FailedPattern:
self._error('Expecting <%s>' % name)
except FailedParse:
self._trace_event(color.Fore.RED + color.Style.BRIGHT + '!')
self._goto(pos)
raise
finally:
self._rule_stack.pop()
def _invoke_rule(self, rule, name, params, kwparams):
cache = self._memoization_cache
pos = self._pos
key = (pos, rule, self._state)
if key in cache:
memo = cache[key]
memo = self._left_recursion_check(name, key, memo)
if isinstance(memo, Exception):
raise memo
return memo
self._set_left_recursion_guard(name, key)
self._push_ast()
try:
if name[0].islower():
self._next_token()
rule(self)
node = self.ast
if not node:
node = self.cst
elif '@' in node:
node = node['@'] # override the AST
elif self.parseinfo:
node._parseinfo = self._get_parseinfo(
node,
name,
pos
)
node = self._invoke_semantic_rule(name, node, params, kwparams)
result = (node, self._pos, self._state)
result = self._left_recurse(rule, name, pos, key, result, params, kwparams)
if self._memoization() and not self._in_recursive_loop():
cache[key] = result
return result
except FailedParse as e:
if self._memoization():
cache[key] = e
raise
finally:
self._pop_ast()
def _set_left_recursion_guard(self, name, key):
exception = FailedLeftRecursion(
self._buffer,
list(reversed(self._rule_stack[:])),
name
)
# Alessandro Warth et al say that we can deal with
# direct and indirect left-recursion by seeding the
# memoization cache with a parse failure.
#
# http://www.vpri.org/pdf/tr2007002_packrat.pdf
#
if self._memoization():
self._memoization_cache[key] = exception
def _left_recursion_check(self, name, key, memo):
if isinstance(memo, FailedLeftRecursion) and self.left_recursion:
# At this point we know we've already seen this rule
# at this position. Either we've got a potential
# result from a previous pass that we can return, or
# we make a note of the rule so that we can take
# action as we unwind the rule stack.
if key in self._recursive_results:
memo = self._recursive_results[key]
else:
self._recursive_head.append(name)
return memo
def _in_recursive_loop(self):
head = self._recursive_head
return head and head[-1] in self._rule_stack
def _left_recurse(self, rule, name, pos, key, result, params, kwparams):
if self._memoization():
self._recursive_results[key] = result
# If the current name is in the head, then we've just
# unwound to the highest rule in the recursion
cache = self._memoization_cache
last_pos = pos
if (
[name] == self._recursive_head[-1:] and
self._recursive_head[-1:] != self._recursive_eval[-1:]
):
# Repeatedly apply the rule until it can't consume any
# more. We store the last good result each time. Prior
# to doing so we reset the position and remove any
# failures from the cache.
last_result = result
self._recursive_eval.append(name)
while self._pos > last_pos:
last_result = result
last_pos = self._pos
self._goto(pos)
prune_dict(cache, lambda _, v: isinstance(v, FailedParse))
try:
result = self._invoke_rule(rule, name, params, kwparams)
except FailedParse:
pass
result = last_result
self._recursive_results = dict()
self._recursive_head.pop()
self._recursive_eval.pop()
return result
def _invoke_semantic_rule(self, name, node, params, kwparams):
semantic_rule, postproc = self._find_semantic_rule(name)
try:
if semantic_rule:
node = semantic_rule(node, *(params or ()), **(kwparams or {}))
if postproc is not None:
postproc(self, node)
return node
except FailedSemantics as e:
self._error(str(e), FailedParse)
def _token(self, token):
self._next_token()
if self._buffer.match(token) is None:
self._trace_match(token, failed=True)
self._error(token, etype=FailedToken)
self._trace_match(token)
self._add_cst_node(token)
self._last_node = token
return token
def _constant(self, literal):
self._next_token()
self._trace_match(literal)
self._add_cst_node(literal)
self._last_node = literal
return literal
def _pattern(self, pattern):
token = self._buffer.matchre(pattern)
if token is None:
self._trace_match('', pattern, failed=True)
self._error(pattern, etype=FailedPattern)
self._trace_match(token, pattern)
self._add_cst_node(token)
self._last_node = token
return token
def _eof(self):
return self._buffer.atend()
def _eol(self):
return self._buffer.ateol()
def _check_eof(self):
self._next_token()
if not self._buffer.atend():
self._error('Expecting end of text.')
@contextmanager
def _try(self):
p = self._pos
s = self._state
ast_copy = self.ast.copy()
self._push_ast()
self.last_node = None
try:
self.ast = ast_copy
yield
ast = self.ast
cst = self.cst
except:
self._goto(p)
self._state = s
raise
finally:
self._pop_ast()
self.ast = ast
self._extend_cst(cst)
self.last_node = cst
@contextmanager
def _option(self):
self.last_node = None
self._push_cut()
try:
with self._try():
yield
raise OptionSucceeded()
except FailedCut:
raise
except FailedParse as e:
if self._is_cut_set():
raise FailedCut(e)
finally:
self._pop_cut()
@contextmanager
def _choice(self):
self.last_node = None
with self._try():
try:
yield
except OptionSucceeded:
pass
@contextmanager
def _optional(self):
self.last_node = None
with self._choice():
with self._option():
yield
@contextmanager
def _group(self):
self._push_cst()
try:
yield
cst = self.cst
finally:
self._pop_cst()
self._extend_cst(cst)
self.last_node = cst
@contextmanager
def _if(self):
p = self._pos
s = self._state
self._push_ast()
self._enter_lookahead()
try:
yield
finally:
self._leave_lookahead()
self._goto(p)
self._state = s
self._pop_ast() # simply discard
@contextmanager
def _ifnot(self):
try:
with self._if():
yield
except FailedParse:
pass
else:
self._error('', etype=FailedLookahead)
@contextmanager
def _ignore(self):
self._push_cst()
try:
self.cst = None
yield
finally:
self._pop_cst()
def _repeater(self, block, prefix=None):
while True:
self._push_cut()
self._push_cst()
try:
p = self._pos
with self._try():
if prefix:
with self._ignore():
prefix()
self._cut()
block()
cst = self.cst
if self._pos == p:
self._error('empty closure')
except FailedCut:
raise
except FailedParse as e:
if self._is_cut_set():
raise FailedCut(e)
break
finally:
self._pop_cst()
self._pop_cut()
self._add_cst_node(cst)
def _closure(self, block):
self._push_cst()
try:
self.cst = []
self._repeater(block)
cst = Closure(self.cst)
finally:
self._pop_cst()
self._add_cst_node(cst)
self.last_node = cst
return cst
def _positive_closure(self, block, prefix=None):
self._push_cst()
try:
self.cst = None
with self._try():
block()
self.cst = [self.cst]
self._repeater(block, prefix=prefix)
cst = Closure(self.cst)
finally:
self._pop_cst()
self._add_cst_node(cst)
self.last_node = cst
return cst
def _empty_closure(self):
cst = Closure([])
self._add_cst_node(cst)
self.last_node = cst
return cst
def _check_name(self):
name = self.last_node
if (self.ignorecase and name.upper() or name) in self.keywords:
raise FailedKeywordSemantics('"%s" is a reserved word' % name)
| bsd-2-clause | 3,773,346,253,559,332,400 | 27.930809 | 87 | 0.515184 | false | 4.216324 | false | false | false |
meejah/txtorcon | txtorcon/socks.py | 1 | 23368 | # in-progress; implementing SOCKS5 client-side stuff as extended by
# tor because txsocksx will not be getting Python3 support any time
# soon, and its underlying dependency (Parsely) also doesn't support
# Python3. Also, Tor's SOCKS5 implementation is especially simple,
# since it doesn't do BIND or UDP ASSOCIATE.
from __future__ import print_function
import six
import struct
from socket import inet_pton, inet_ntoa, inet_aton, AF_INET6, AF_INET
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.address import IPv4Address, IPv6Address, HostnameAddress
from twisted.python.failure import Failure
from twisted.protocols import portforward
from twisted.protocols import tls
from twisted.internet.interfaces import IStreamClientEndpoint
from zope.interface import implementer
import ipaddress
import automat
from txtorcon import util
__all__ = (
'resolve',
'resolve_ptr',
'SocksError',
'GeneralServerFailureError',
'ConnectionNotAllowedError',
'NetworkUnreachableError',
'HostUnreachableError',
'ConnectionRefusedError',
'TtlExpiredError',
'CommandNotSupportedError',
'AddressTypeNotSupportedError',
'TorSocksEndpoint',
)
def _create_ip_address(host, port):
if not isinstance(host, six.text_type):
raise ValueError(
"'host' must be {}, not {}".format(six.text_type, type(host))
)
try:
a = ipaddress.ip_address(host)
except ValueError:
a = None
if isinstance(a, ipaddress.IPv4Address):
return IPv4Address('TCP', host, port)
if isinstance(a, ipaddress.IPv6Address):
return IPv6Address('TCP', host, port)
addr = HostnameAddress(host, port)
addr.host = host
return addr
class _SocksMachine(object):
"""
trying to prototype the SOCKS state-machine in automat
This is a SOCKS state machine to make a single request.
"""
_machine = automat.MethodicalMachine()
SUCCEEDED = 0x00
REPLY_IPV4 = 0x01
REPLY_HOST = 0x03
REPLY_IPV6 = 0x04
# XXX address = (host, port) instead
def __init__(self, req_type, host,
port=0,
on_disconnect=None,
on_data=None,
create_connection=None):
if req_type not in self._dispatch:
raise ValueError(
"Unknown request type '{}'".format(req_type)
)
if req_type == 'CONNECT' and create_connection is None:
raise ValueError(
"create_connection function required for '{}'".format(
req_type
)
)
if not isinstance(host, (bytes, str, six.text_type)):
raise ValueError(
"'host' must be text (not {})".format(type(host))
)
# XXX what if addr is None?
self._req_type = req_type
self._addr = _create_ip_address(six.text_type(host), port)
self._data = b''
self._on_disconnect = on_disconnect
self._create_connection = create_connection
# XXX FIXME do *one* of these:
self._on_data = on_data
self._outgoing_data = []
# the other side of our proxy
self._sender = None
self._when_done = util.SingleObserver()
def when_done(self):
"""
Returns a Deferred that fires when we're done
"""
return self._when_done.when_fired()
def _data_to_send(self, data):
if self._on_data:
self._on_data(data)
else:
self._outgoing_data.append(data)
def send_data(self, callback):
"""
drain all pending data by calling `callback()` on it
"""
# a "for x in self._outgoing_data" would potentially be more
# efficient, but then there's no good way to bubble exceptions
# from callback() out without lying about how much data we
# processed .. or eat the exceptions in here.
while len(self._outgoing_data):
data = self._outgoing_data.pop(0)
callback(data)
def feed_data(self, data):
# I feel like maybe i'm doing all this buffering-stuff
# wrong. but I also don't want a bunch of "received 1 byte"
# etc states hanging off everything that can "get data"
self._data += data
self.got_data()
@_machine.output()
def _parse_version_reply(self):
"waiting for a version reply"
if len(self._data) >= 2:
reply = self._data[:2]
self._data = self._data[2:]
(version, method) = struct.unpack('BB', reply)
if version == 5 and method in [0x00, 0x02]:
self.version_reply(method)
else:
if version != 5:
self.version_error(SocksError(
"Expected version 5, got {}".format(version)))
else:
self.version_error(SocksError(
"Wanted method 0 or 2, got {}".format(method)))
def _parse_ipv4_reply(self):
if len(self._data) >= 10:
addr = inet_ntoa(self._data[4:8])
port = struct.unpack('H', self._data[8:10])[0]
self._data = self._data[10:]
if self._req_type == 'CONNECT':
self.reply_ipv4(addr, port)
else:
self.reply_domain_name(addr)
def _parse_ipv6_reply(self):
if len(self._data) >= 22:
addr = self._data[4:20]
port = struct.unpack('H', self._data[20:22])[0]
self._data = self._data[22:]
self.reply_ipv6(addr, port)
def _parse_domain_name_reply(self):
assert len(self._data) >= 8 # _parse_request_reply checks this
addrlen = struct.unpack('B', self._data[4:5])[0]
# may simply not have received enough data yet...
if len(self._data) < (5 + addrlen + 2):
return
addr = self._data[5:5 + addrlen]
# port = struct.unpack('H', self._data[5 + addrlen:5 + addrlen + 2])[0]
self._data = self._data[5 + addrlen + 2:]
self.reply_domain_name(addr)
@_machine.output()
def _parse_request_reply(self):
"waiting for a reply to our request"
# we need at least 6 bytes of data: 4 for the "header", such
# as it is, and 2 more if it's DOMAINNAME (for the size) or 4
# or 16 more if it's an IPv4/6 address reply. plus there's 2
# bytes on the end for the bound port.
if len(self._data) < 8:
return
msg = self._data[:4]
# not changing self._data yet, in case we've not got
# enough bytes so far.
(version, reply, _, typ) = struct.unpack('BBBB', msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method()
@_machine.output()
def _make_connection(self, addr, port):
"make our proxy connection"
sender = self._create_connection(addr, port)
# XXX look out! we're depending on this "sender" implementing
# certain Twisted APIs, and the state-machine shouldn't depend
# on that.
# XXX also, if sender implements producer/consumer stuff, we
# should register ourselves (and implement it to) -- but this
# should really be taking place outside the state-machine in
# "the I/O-doing" stuff
self._sender = sender
self._when_done.fire(sender)
@_machine.output()
def _domain_name_resolved(self, domain):
self._when_done.fire(domain)
@_machine.input()
def connection(self):
"begin the protocol (i.e. connection made)"
@_machine.input()
def disconnected(self, error):
"the connection has gone away"
@_machine.input()
def got_data(self):
"we recevied some data and buffered it"
@_machine.input()
def version_reply(self, auth_method):
"the SOCKS server replied with a version"
@_machine.input()
def version_error(self, error):
"the SOCKS server replied, but we don't understand"
@_machine.input()
def reply_error(self, error):
"the SOCKS server replied with an error"
@_machine.input()
def reply_ipv4(self, addr, port):
"the SOCKS server told me an IPv4 addr, port"
@_machine.input()
def reply_ipv6(self, addr, port):
"the SOCKS server told me an IPv6 addr, port"
@_machine.input()
def reply_domain_name(self, domain):
"the SOCKS server told me a domain-name"
@_machine.input()
def answer(self):
"the SOCKS server replied with an answer"
@_machine.output()
def _send_version(self):
"sends a SOCKS version reply"
self._data_to_send(
# for anonymous(0) *and* authenticated (2): struct.pack('BBBB', 5, 2, 0, 2)
struct.pack('BBB', 5, 1, 0)
)
@_machine.output()
def _disconnect(self, error):
"done"
if self._on_disconnect:
self._on_disconnect(str(error))
if self._sender:
self._sender.connectionLost(Failure(error))
self._when_done.fire(Failure(error))
@_machine.output()
def _send_request(self, auth_method):
"send the request (connect, resolve or resolve_ptr)"
assert auth_method == 0x00 # "no authentication required"
return self._dispatch[self._req_type](self)
@_machine.output()
def _relay_data(self):
"relay any data we have"
if self._data:
d = self._data
self._data = b''
# XXX this is "doing I/O" in the state-machine and it
# really shouldn't be ... probably want a passed-in
# "relay_data" callback or similar?
self._sender.dataReceived(d)
def _send_connect_request(self):
"sends CONNECT request"
# XXX needs to support v6 ... or something else does
host = self._addr.host
port = self._addr.port
if isinstance(self._addr, (IPv4Address, IPv6Address)):
is_v6 = isinstance(self._addr, IPv6Address)
self._data_to_send(
struct.pack(
'!BBBB4sH',
5, # version
0x01, # command
0x00, # reserved
0x04 if is_v6 else 0x01,
inet_pton(AF_INET6 if is_v6 else AF_INET, host),
port,
)
)
else:
host = host.encode('ascii')
self._data_to_send(
struct.pack(
'!BBBBB{}sH'.format(len(host)),
5, # version
0x01, # command
0x00, # reserved
0x03,
len(host),
host,
port,
)
)
@_machine.output()
def _send_resolve_request(self):
"sends RESOLVE_PTR request (Tor custom)"
host = self._addr.host.encode()
self._data_to_send(
struct.pack(
'!BBBBB{}sH'.format(len(host)),
5, # version
0xF0, # command
0x00, # reserved
0x03, # DOMAINNAME
len(host),
host,
0, # self._addr.port?
)
)
@_machine.output()
def _send_resolve_ptr_request(self):
"sends RESOLVE_PTR request (Tor custom)"
addr_type = 0x04 if isinstance(self._addr, ipaddress.IPv4Address) else 0x01
encoded_host = inet_aton(self._addr.host)
self._data_to_send(
struct.pack(
'!BBBB4sH',
5, # version
0xF1, # command
0x00, # reserved
addr_type,
encoded_host,
0, # port; unused? SOCKS is fun
)
)
@_machine.state(initial=True)
def unconnected(self):
"not yet connected"
@_machine.state()
def sent_version(self):
"we've sent our version request"
@_machine.state()
def sent_request(self):
"we've sent our stream/etc request"
@_machine.state()
def relaying(self):
"received our response, now we can relay"
@_machine.state()
def abort(self, error_message):
"we've encountered an error"
@_machine.state()
def done(self):
"operations complete"
unconnected.upon(
connection,
enter=sent_version,
outputs=[_send_version],
)
sent_version.upon(
got_data,
enter=sent_version,
outputs=[_parse_version_reply],
)
sent_version.upon(
version_error,
enter=abort,
outputs=[_disconnect],
)
sent_version.upon(
version_reply,
enter=sent_request,
outputs=[_send_request],
)
sent_version.upon(
disconnected,
enter=unconnected,
outputs=[_disconnect]
)
sent_request.upon(
got_data,
enter=sent_request,
outputs=[_parse_request_reply],
)
sent_request.upon(
reply_ipv4,
enter=relaying,
outputs=[_make_connection],
)
sent_request.upon(
reply_ipv6,
enter=relaying,
outputs=[_make_connection],
)
# XXX this isn't always a _domain_name_resolved -- if we're a
# req_type CONNECT then it's _make_connection_domain ...
sent_request.upon(
reply_domain_name,
enter=done,
outputs=[_domain_name_resolved],
)
sent_request.upon(
reply_error,
enter=abort,
outputs=[_disconnect],
)
# XXX FIXME this needs a test
sent_request.upon(
disconnected,
enter=abort,
outputs=[_disconnect], # ... or is this redundant?
)
relaying.upon(
got_data,
enter=relaying,
outputs=[_relay_data],
)
relaying.upon(
disconnected,
enter=done,
outputs=[_disconnect],
)
abort.upon(
got_data,
enter=abort,
outputs=[],
)
abort.upon(
disconnected,
enter=abort,
outputs=[],
)
done.upon(
disconnected,
enter=done,
outputs=[],
)
_dispatch = {
'CONNECT': _send_connect_request,
'RESOLVE': _send_resolve_request,
'RESOLVE_PTR': _send_resolve_ptr_request,
}
class _TorSocksProtocol(Protocol):
def __init__(self, host, port, socks_method, factory):
self._machine = _SocksMachine(
req_type=socks_method,
host=host, # noqa unicode() on py3, py2? we want idna, actually?
port=port,
on_disconnect=self._on_disconnect,
on_data=self._on_data,
create_connection=self._create_connection,
)
self._factory = factory
def when_done(self):
return self._machine.when_done()
def connectionMade(self):
self._machine.connection()
# we notify via the factory that we have teh
# locally-connecting host -- this is e.g. used by the "stream
# over one particular circuit" code to determine the local
# port that "our" SOCKS connection went to
self.factory._did_connect(self.transport.getHost())
def connectionLost(self, reason):
self._machine.disconnected(SocksError(reason))
def dataReceived(self, data):
self._machine.feed_data(data)
def _on_data(self, data):
self.transport.write(data)
def _create_connection(self, addr, port):
addr = IPv4Address('TCP', addr, port)
sender = self._factory.buildProtocol(addr)
client_proxy = portforward.ProxyClient()
sender.makeConnection(self.transport)
# portforward.ProxyClient is going to call setPeer but this
# probably doesn't have it...
setattr(sender, 'setPeer', lambda _: None)
client_proxy.setPeer(sender)
self._sender = sender
return sender
def _on_disconnect(self, error_message):
self.transport.loseConnection()
# self.transport.abortConnection()#SocksError(error_message)) ?
class _TorSocksFactory(Factory):
protocol = _TorSocksProtocol
# XXX should do validation on this stuff so we get errors before
# building the protocol
def __init__(self, *args, **kw):
self._args = args
self._kw = kw
self._host = None
self._when_connected = util.SingleObserver()
def _get_address(self):
"""
Returns a Deferred that fires with the transport's getHost()
when this SOCKS protocol becomes connected.
"""
return self._when_connected.when_fired()
def _did_connect(self, host):
self._host = host
self._when_connected.fire(host)
def buildProtocol(self, addr):
p = self.protocol(*self._args, **self._kw)
p.factory = self
return p
class SocksError(Exception):
code = None
message = ''
def __init__(self, message='', code=None):
super(SocksError, self).__init__(message or self.message)
self.message = message or self.message
self.code = code or self.code
class GeneralServerFailureError(SocksError):
code = 0x01
message = 'general SOCKS server failure'
class ConnectionNotAllowedError(SocksError):
code = 0x02
message = 'connection not allowed by ruleset'
class NetworkUnreachableError(SocksError):
code = 0x03
message = 'Network unreachable'
class HostUnreachableError(SocksError):
code = 0x04
message = 'Host unreachable'
class ConnectionRefusedError(SocksError):
code = 0x05
message = 'Connection refused'
class TtlExpiredError(SocksError):
code = 0x06
message = 'TTL expired'
class CommandNotSupportedError(SocksError):
code = 0x07
message = 'Command not supported'
class AddressTypeNotSupportedError(SocksError):
code = 0x08
message = 'Address type not supported'
_socks_errors = {cls.code: cls for cls in SocksError.__subclasses__()}
def _create_socks_error(code):
try:
return _socks_errors[code]()
except KeyError:
return SocksError("Unknown SOCKS error-code {}".format(code),
code=code)
@inlineCallbacks
def resolve(tor_endpoint, hostname):
"""
This is easier to use via :meth:`txtorcon.Tor.dns_resolve`
:param tor_endpoint: the Tor SOCKS endpoint to use.
:param hostname: the hostname to look up.
"""
if six.PY2 and isinstance(hostname, str):
hostname = unicode(hostname) # noqa
elif six.PY3 and isinstance(hostname, bytes):
hostname = hostname.decode('ascii')
factory = _TorSocksFactory(
hostname, 0, 'RESOLVE', None,
)
proto = yield tor_endpoint.connect(factory)
result = yield proto.when_done()
returnValue(result)
@inlineCallbacks
def resolve_ptr(tor_endpoint, ip):
"""
This is easier to use via :meth:`txtorcon.Tor.dns_resolve_ptr`
:param tor_endpoint: the Tor SOCKS endpoint to use.
:param ip: the IP address to look up.
"""
if six.PY2 and isinstance(ip, str):
ip = unicode(ip) # noqa
elif six.PY3 and isinstance(ip, bytes):
ip = ip.decode('ascii')
factory = _TorSocksFactory(
ip, 0, 'RESOLVE_PTR', None,
)
proto = yield tor_endpoint.connect(factory)
result = yield proto.when_done()
returnValue(result)
@implementer(IStreamClientEndpoint)
class TorSocksEndpoint(object):
"""
Represents an endpoint which will talk to a Tor SOCKS port.
These should usually not be instantiated directly, instead use
:meth:`txtorcon.TorConfig.socks_endpoint`.
"""
# XXX host, port args should be (host, port) tuple, or
# IAddress-implementer?
def __init__(self, socks_endpoint, host, port, tls=False):
self._proxy_ep = socks_endpoint # can be Deferred
assert self._proxy_ep is not None
if six.PY2 and isinstance(host, str):
host = unicode(host) # noqa
if six.PY3 and isinstance(host, bytes):
host = host.decode('ascii')
self._host = host
self._port = port
self._tls = tls
self._socks_factory = None
self._when_address = util.SingleObserver()
def _get_address(self):
"""
Returns a Deferred that fires with the source IAddress of the
underlying SOCKS connection (i.e. usually a
twisted.internet.address.IPv4Address)
circuit.py uses this; better suggestions welcome!
"""
return self._when_address.when_fired()
@inlineCallbacks
def connect(self, factory):
# further wrap the protocol if we're doing TLS.
# "pray i do not wrap the protocol further".
if self._tls:
# XXX requires Twisted 14+
from twisted.internet.ssl import optionsForClientTLS
if self._tls is True:
context = optionsForClientTLS(self._host)
else:
context = self._tls
tls_factory = tls.TLSMemoryBIOFactory(context, True, factory)
socks_factory = _TorSocksFactory(
self._host, self._port, 'CONNECT', tls_factory,
)
else:
socks_factory = _TorSocksFactory(
self._host, self._port, 'CONNECT', factory,
)
self._socks_factory = socks_factory
# forward our address (when we get it) to any listeners
self._socks_factory._get_address().addBoth(self._when_address.fire)
# XXX isn't this just maybeDeferred()
if isinstance(self._proxy_ep, Deferred):
proxy_ep = yield self._proxy_ep
if not IStreamClientEndpoint.providedBy(proxy_ep):
raise ValueError(
"The Deferred provided as 'socks_endpoint' must "
"resolve to an IStreamClientEndpoint provider (got "
"{})".format(type(proxy_ep).__name__)
)
else:
proxy_ep = self._proxy_ep
# socks_proto = yield proxy_ep.connect(socks_factory)
proto = yield proxy_ep.connect(socks_factory)
wrapped_proto = yield proto.when_done()
if self._tls:
returnValue(wrapped_proto.wrappedProtocol)
else:
returnValue(wrapped_proto)
| mit | 3,089,544,794,076,965,400 | 29.910053 | 87 | 0.572663 | false | 4.009609 | false | false | false |
appi147/Jarvis | jarviscli/plugin.py | 1 | 6525 | from inspect import cleandoc, isclass
import pluginmanager
from requests import ConnectionError
# Constants
# platform
MACOS = "MACOS"
LINUX = "LINUX"
WINDOWS = "WINDOWS"
# Shortcut for MACOS + LINUX
UNIX = "UNIX"
def plugin(name):
"""
Convert function in Plugin Class
@python(platform=LINUX, native="ap-hotspot")
def hotspot_start(jarvis, s):
system("sudo ap-hotspot start")
"""
def create_plugin(run):
plugin_class = type(
run.__name__, Plugin.__bases__, dict(
Plugin.__dict__))
plugin_class.__doc__ = run.__doc__
if isclass(run):
# class -> object
run = run()
# create class
plugin_class._require = []
plugin_class._complete = []
plugin_class._alias = []
plugin_class._name = name
plugin_class._backend = (run,)
plugin_class._backend_instance = run
return plugin_class
return create_plugin
def require(network=None, platform=None, native=None):
require = []
if network is not None:
require.append(('network', network))
if platform is not None:
require.append(('platform', platform))
if native is not None:
require.append(('native', native))
def __require(plugin):
plugin._require.extend(require)
return plugin
return __require
def complete(*complete):
def __complete(plugin):
plugin._complete.extend(complete)
return plugin
return __complete
def alias(*alias):
def __alias(plugin):
plugin._alias.extend(alias)
return plugin
return __alias
def _yield_something(values):
for value in values:
yield value
class PluginStorage(object):
def __init__(self):
self._sub_plugins = {}
def add_plugin(self, name, plugin_to_add):
self._sub_plugins[name] = plugin_to_add
def get_plugins(self, name=None):
if name is None:
return self._sub_plugins
if name in self._sub_plugins:
return self._sub_plugins[name]
return None
def change_with(self, plugin_new):
plugin_new._sub_plugins = self._sub_plugins
class Plugin(pluginmanager.IPlugin, PluginStorage):
"""
"""
_backend = None
def __init__(self):
super(pluginmanager.IPlugin, self).__init__()
self._sub_plugins = {}
def init(self, jarvis_api):
"""
Called before Jarvis starts;
Passes jarvis_api object for plugins to do initialization.
(would not be possible with __init__)
"""
if self.is_callable_plugin():
if hasattr(
self._backend[0].__class__,
"init") and callable(
getattr(
self._backend[0].__class__,
"init")):
self._backend[0].init(jarvis_api)
for plugin in self.get_plugins().values():
plugin.init(jarvis_api)
def is_callable_plugin(self):
"""
Return True, if this plugin has a executable implementation (e.g. news)
Return False, if this instance is only used for calling other plugins
(e.g. movie in 'movie search' and 'movie plot')
"""
return self._backend is not None
def get_name(self):
"""Set with @plugin(name)"""
return self._name
def require(self):
"""Set with @require"""
return self._require
def alias(self):
"""Set with @alias"""
return self._alias
def complete(self):
"""Set with @complete"""
# return default complete() if possible
if self.is_callable_plugin():
for complete in self._complete:
yield complete
# yield each sub command
for complete in self.get_plugins().keys():
yield complete
def get_doc(self):
"""Parses plugin doc string"""
doc = ""
examples = ""
extended_doc = ""
# default complete
if self.__doc__ is not None:
default_command_doc = cleandoc(self.__doc__)
default_command_doc = default_command_doc.split("-- Example:")
if len(default_command_doc) > 1:
examples += default_command_doc[1]
default_command_doc = default_command_doc[0]
doc += default_command_doc
if not doc.endswith("\n"):
doc += "\n"
doc += "\nSubcommands:"
# sub command complete
for name, sub_command in self.get_plugins().items():
doc += "\n-> {}: ".format(name)
sub_command_doc = sub_command.get_doc()
sub_command_doc = sub_command_doc.split("-- Example:")
if len(sub_command_doc) > 1:
examples += sub_command_doc[1]
sub_command_doc = sub_command_doc[0]
if '\n' not in sub_command_doc:
doc += sub_command_doc
else:
extended_doc += "\n {}:\n".format(name)
extended_doc += sub_command_doc
if not sub_command_doc.endswith("\n"):
extended_doc += "\n"
if extended_doc != "":
doc += "\n"
doc += extended_doc
if examples != "":
doc += "\n--Examples:"
doc += examples
return doc
def run(self, jarvis, s):
"""Entry point if this plugin is called"""
sub_command = jarvis.find_action(s, self.get_plugins().keys())
if sub_command is "None":
# run default
if self.is_callable_plugin():
self._backend[0](jarvis.get_api(), s)
else:
jarvis.get_api().say("Sorry, I could not recognise your command. Did you mean:")
for sub_command in self._sub_plugins.keys():
jarvis.get_api().say(" * {} {}".format(self.get_name(), sub_command))
else:
command = sub_command.split()[0]
new_s = " ".join(sub_command.split()[1:])
self.get_plugins(command).run(jarvis, new_s)
def _plugin_run_with_network_error(self, run_func, jarvis, s):
"""
Calls run_func(jarvis, s); try-catch ConnectionError
This method is auto-used if require() yields ("network", True). Do not
use m
"""
try:
run_func(jarvis, s)
except ConnectionError:
jarvis.get_api().connection_error()
| mit | 4,146,784,341,586,826,000 | 27.49345 | 96 | 0.540383 | false | 4.161352 | false | false | false |
brigittebigi/proceed | proceed/scripts/import.py | 1 | 9928 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
"""
Import abstracts from a conference and save them in a directory,
in the form of one latex file per abstract.
Input can be one of sciencesconf XML file or easychair CSV file.
No options for the output style: use default.
"""
# ---------------------------------------------------------------------------
import sys
import os.path
import getopt
sys.path.append( os.path.join(os.path.dirname(os.path.dirname( os.path.abspath(__file__))), "src") )
from DataIO.Read.reader import Reader
from DataIO.Write.writer import Writer
from structs.prefs import Preferences
from structs.abstracts_themes import all_themes
from term.textprogress import TextProgress
from term.terminalcontroller import TerminalController
from sp_glob import program, author, version, copyright, url
wxop = True
try:
import wx
from wxgui.frames.import_wizard import ImportWizard
except Exception:
wxop = False
# ----------------------------------------------------------------------
# USEFUL FUNCTIONS
# ----------------------------------------------------------------------
def usage(output):
"""
Print the usage of this script on an output.
@param output is a string representing the output (for example: sys.stdout)
"""
output.write('import.py [options] where options are:\n')
output.write(' -i file Input file name [required] \n')
output.write(' -a file Authors Input file name [required if easychair] \n')
output.write(' -o output Output directory [required] \n')
output.write(' -s status Status number (0-4) [default=1=accepted]\n')
output.write(' -r reader name One of: sciencesconf or easychair [default=sciencesconf]\n')
output.write(' -S style name One of: basic, palme, nalte [default=basic]\n')
output.write(' -c compiler One of: pdflatex, xetex [default=pdflatex]\n')
output.write(' --nocsv Do not generate '+program+' CSV files\n')
output.write(' --notex Do not generate LaTeX files\n')
output.write(' --nohtml Do not generate HTML file\n')
output.write(' --help Print this help\n\n')
# End usage
# ----------------------------------------------------------------------
def Quit(message=None, status=0, usageoutput=None):
"""
Quit the program with the appropriate exit status.
@param message is a text to communicate to the user on sys.stderr.
@param status is an integer of the status exit value.
@param usageoutput is a file descriptor.
"""
if message: sys.stderr.write('export.py '+message)
if usageoutput: usage(usageoutput)
sys.exit(status)
# End Quit
# ----------------------------------------------------------------------
# --------------------------------------------------------------------------
# MAIN PROGRAM
# --------------------------------------------------------------------------
if __name__=="__main__":
# ----------------------------------------------------------------------
# Get all arguments, verify inputs.
# ----------------------------------------------------------------------
# Verify the program name and possibly some arguments
if len(sys.argv) == 1:
if not wxop:
# stop the program and print an error message
Quit(status=1, usageoutput=sys.stderr)
else:
app = wx.App(False)
ImportWizard(None)
app.MainLoop()
sys.exit(0)
# Get options (if any...)
try:
opts, args = getopt.getopt(sys.argv[1:], "i:a:o:s:r:S:c:", ["help", "nocsv", "notex", "nohtml"])
except getopt.GetoptError, err:
# Print help information and exit:
Quit(message="Error: "+str(err)+".\nUse option --help for any help.\n", status=1)
fileinput = None
authorsinput = None
output = None
extension = "tex"
status = 1 # only accepted papers
readername = "sciencesconf"
themename = "basic"
compiler = "pdflatex"
exportcsv = True
exporttex= True
exporthtml = True
# Extract options
for o, a in opts:
if o == "-i":
fileinput = a
elif o == "-a":
authorsinput = a
elif o == "-o":
output = a
elif o == "-s":
status = int(a)
elif o == "-r":
readername = a
elif o == "-S":
themename = a
elif o == "-c":
compiler = a
elif o == "--help": # need help
Quit(message='Help', status=0, usageoutput=sys.stdout)
elif o == "--nocsv":
exportcsv = False
elif o == "--notex":
exporttex = False
elif o == "--nohtml":
exporthtml = False
# Verify args
if fileinput is not None:
if not os.path.exists(fileinput):
Quit(message="Error: BAD input file name: "+fileinput+"\n", status=1)
else:
Quit(message="Error: an input is required.\n.", status=1, usageoutput=sys.stderr)
if output is None:
Quit(message="Error: an output is required.\n.", status=1, usageoutput=sys.stderr)
if readername == "easychair" and not authorsinput:
Quit(message="With easychair, an input file with authors is required.", status=1, usageoutput=sys.stderr)
try:
term = TerminalController()
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}')
print term.render('${RED}'+program+' - Version '+version+'${NORMAL}')
print term.render('${BLUE}'+copyright+'${NORMAL}')
print term.render('${BLUE}'+url+'${NORMAL}')
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}\n')
except:
print '-----------------------------------------------------------------------\n'
print program+' - Version '+version
print copyright
print url+'\n'
print '-----------------------------------------------------------------------\n'
# ----------------------------------------------------------------------
p = TextProgress()
# ----------------------------------------------------------------------
# Load input data
# ----------------------------------------------------------------------
arguments = {}
arguments['readername'] = readername
arguments['filename'] = fileinput
arguments['authorsfilename'] = authorsinput
arguments['progress'] = p
reader = Reader( arguments )
# ----------------------------------------------------------------------
# Write output data (with default parameters)
# ----------------------------------------------------------------------
# Create preferences
prefs = Preferences()
theme = all_themes.get_theme(themename.lower())
prefs.SetTheme( theme )
prefs.SetValue('COMPILER', 'str', compiler.strip())
# Create the Writer
writer = Writer( reader.docs )
writer.set_status( status )
writer.set_progress( p )
# Write abstracts as LaTeX
if exporttex:
writer.writeLaTeX_as_Dir( output, prefs, tocompile=True )
# Write proceed native CSV files
if exportcsv:
writer.writeCSV( output )
# Write html file
if exporthtml:
writer.writeHTML( output+".html" )
# Done
try:
term = TerminalController()
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}')
print term.render('${RED}Result is in '+output)
print term.render('${GREEN}Thank you for using '+program+".")
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}\n')
except:
print ('-----------------------------------------------------------------------\n')
print "Result is in "+output+".\nThank you for using "+program+"."
print ('-----------------------------------------------------------------------\n')
# ----------------------------------------------------------------------
| gpl-3.0 | -6,585,036,941,953,271,000 | 36.044776 | 119 | 0.463638 | false | 4.512727 | false | false | false |
zvolsky/muzika | models/menu.py | 1 | 6740 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="brand",_href="http://www.web2py.com/")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <[email protected]>'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Rozpis'), False, URL('default', 'index'), [
(T('Můj rozpis'), False, URL('default', 'index'), []),
]),
]
if auth.has_membership('rozpis'):
response.menu[0][3].append((T('Celkový rozpis'), False, URL('default', 'rozpis'), []))
if auth.has_membership('admin'):
response.menu.append((T('Číselníky'), False, None, [
(T('Práva uživatelů'), False, URL('plugin_manage_groups', 'index'), []),
(T('Muzikanti (uživatelé)'), False, URL('default', 'muzikanti'), []),
(T('Místa'), False, URL('default', 'mista'), []),
]))
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(SPAN('web2py', _class='highlighted'), False, 'http://web2py.com', [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, URL('admin', 'default', 'design/%s' % app), [
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py.css' % app)),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, 'http://www.web2py.com', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Layouts'), False, 'http://web2py.com/layouts'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
(T('Semantic'), False, 'http://web2py.com/semantic'),
]),
(T('Documentation'), False, 'http://www.web2py.com/book', [
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Buy this book'), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
(T('Plugins'), False, None, [
('plugin_wiki', False,
'http://web2py.com/examples/default/download'),
(T('Other Plugins'), False,
'http://web2py.com/plugins'),
(T('Layout Plugins'),
False, 'http://web2py.com/layouts'),
])
]
)]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| agpl-3.0 | -2,985,467,640,629,348,000 | 43.562914 | 90 | 0.480755 | false | 3.705396 | false | false | false |
dtbcoinlab/dtbcoin | share/qt/make_spinner.py | 1 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
DTC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(DTC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit | 131,144,194,758,330,020 | 23.069767 | 85 | 0.691787 | false | 2.820163 | false | false | false |
Kapiche/gcloud-datastore-oem | gcloudoem/exceptions.py | 1 | 8700 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CHANGED BY Kapiche Ltd.
# Copyright 2015 Kapiche Ltd. All rights reserved.
# Based on work by the good folk responsible for gcloud-python. Thanks folks!
#
"""
Custom exceptions.
"""
from collections import defaultdict
import json
import six
_HTTP_CODE_TO_EXCEPTION = {} # populated at end of module
class GCloudError(Exception):
"""Base error class for gcloud errors (abstract).
Each subclass represents a single type of HTTP error response.
"""
code = None
"""HTTP status code. Concrete subclasses *must* define.
See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
"""
def __init__(self, message, errors=()):
super(GCloudError, self).__init__()
# suppress deprecation warning under 2.6.x
self.message = message
self._errors = [error.copy() for error in errors]
def __str__(self):
return '%d %s' % (self.code, self.message)
@property
def errors(self):
"""Detailed error information.
:rtype: list(dict)
:returns: a list of mappings describing each error.
"""
return [error.copy() for error in self._errors]
class Redirection(GCloudError):
"""Base for 3xx responses
This class is abstract.
"""
class MovedPermanently(Redirection):
"""Exception mapping a '301 Moved Permanently' response."""
code = 301
class NotModified(Redirection):
"""Exception mapping a '304 Not Modified' response."""
code = 304
class TemporaryRedirect(Redirection):
"""Exception mapping a '307 Temporary Redirect' response."""
code = 307
class ResumeIncomplete(Redirection):
"""Exception mapping a '308 Resume Incomplete' response."""
code = 308
class ClientError(GCloudError):
"""Base for 4xx responses
This class is abstract
"""
class BadRequest(ClientError):
"""Exception mapping a '400 Bad Request' response."""
code = 400
class Unauthorized(ClientError):
"""Exception mapping a '401 Unauthorized' response."""
code = 401
class Forbidden(ClientError):
"""Exception mapping a '403 Forbidden' response."""
code = 403
class NotFound(ClientError):
"""Exception mapping a '404 Not Found' response."""
code = 404
class MethodNotAllowed(ClientError):
"""Exception mapping a '405 Method Not Allowed' response."""
code = 405
class Conflict(ClientError):
"""Exception mapping a '409 Conflict' response."""
code = 409
class LengthRequired(ClientError):
"""Exception mapping a '411 Length Required' response."""
code = 411
class PreconditionFailed(ClientError):
"""Exception mapping a '412 Precondition Failed' response."""
code = 412
class RequestRangeNotSatisfiable(ClientError):
"""Exception mapping a '416 Request Range Not Satisfiable' response."""
code = 416
class TooManyRequests(ClientError):
"""Exception mapping a '429 Too Many Requests' response."""
code = 429
class ServerError(GCloudError):
"""Base for 5xx responses: (abstract)"""
class InternalServerError(ServerError):
"""Exception mapping a '500 Internal Server Error' response."""
code = 500
class NotImplemented(ServerError):
"""Exception mapping a '501 Not Implemented' response."""
code = 501
class ServiceUnavailable(ServerError):
"""Exception mapping a '503 Service Unavailable' response."""
code = 503
def make_exception(response, content, use_json=True):
"""
Factory: create exception based on HTTP response code.
:type response: :class:`httplib2.Response` or other HTTP response object
:param response: A response object that defines a status code as the status attribute.
:type content: string or dictionary
:param content: The body of the HTTP error response.
:type use_json: boolean
:param use_json: Flag indicating if ``content`` is expected to be JSON.
:rtype: instance of :class:`GCloudError`, or a concrete subclass.
:returns: Exception specific to the error response.
"""
message = content
errors = ()
if isinstance(content, str):
if use_json:
payload = json.loads(content)
else:
payload = {}
else:
payload = content
message = payload.get('message', message)
errors = payload.get('error', {}).get('errors', ())
try:
klass = _HTTP_CODE_TO_EXCEPTION[response.status]
except KeyError:
error = GCloudError(message, errors)
error.code = response.status
else:
error = klass(message, errors)
return error
def _walk_subclasses(klass):
"""Recursively walk subclass tree."""
for sub in klass.__subclasses__():
yield sub
for subsub in _walk_subclasses(sub):
yield subsub
# Build the code->exception class mapping.
for eklass in _walk_subclasses(GCloudError):
code = getattr(eklass, 'code', None)
if code is not None:
_HTTP_CODE_TO_EXCEPTION[code] = eklass
class ValidationError(AssertionError):
"""
Validation exception.
May represent an error validating a field or a document containing fields with validation errors.
:ivar errors: A dictionary of errors for fields within this document or list, or None if the error is for an
individual field.
"""
errors = {}
field_name = None
_message = None
def __init__(self, message="", **kwargs):
self.errors = kwargs.get('errors', {})
self.field_name = kwargs.get('field_name', None)
self.message = message
def __str__(self):
return six.text_type(self.message)
def __repr__(self):
return '%s(%s,)' % (self.__class__.__name__, self.message)
def __getattribute__(self, name):
message = super(ValidationError, self).__getattribute__(name)
if name == 'message':
if self.field_name:
message = '%s' % message
if self.errors:
message = '%s(%s)' % (message, self._format_errors())
return message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def to_dict(self):
"""
Returns a dictionary of all errors within a entity.
Keys are field names or list indices and values are the validation error messages, or a nested dictionary of
errors for an embedded document or list.
"""
def build_dict(source):
errors_dict = {}
if not source:
return errors_dict
if isinstance(source, dict):
for field_name, error in source.items():
errors_dict[field_name] = build_dict(error)
elif isinstance(source, ValidationError) and source.errors:
return build_dict(source.errors)
else:
return six.text_type(source)
return errors_dict
if not self.errors:
return {}
return build_dict(self.errors)
def _format_errors(self):
"""Returns a string listing all errors within a document"""
def generate_key(value, prefix=''):
if isinstance(value, list):
value = ' '.join([generate_key(k) for k in value])
if isinstance(value, dict):
value = ' '.join(
[generate_key(v, k) for k, v in value.items()])
results = "%s.%s" % (prefix, value) if prefix else value
return results
error_dict = defaultdict(list)
for k, v in self.to_dict().items():
error_dict[generate_key(v)].append(k)
return ' '.join(["%s: %s" % (k, v) for k, v in error_dict.items()])
class InvalidQueryError(Exception):
"""Invalid Datastore query."""
pass
class EnvironmentError(Exception):
"""Generally means that connect() wasn't called."""
pass
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class ConnectionError(Exception):
pass
| apache-2.0 | -6,143,890,875,748,414,000 | 25.934985 | 116 | 0.63931 | false | 4.270987 | false | false | false |
llekn/ffado | admin/pyuic4.py | 1 | 1532 | #!/usr/bin/python
#
# Copyright (C) 2007-2008 Arnold Krille
#
# This file is part of FFADO
# FFADO = Free Firewire (pro-)audio drivers for linux
#
# FFADO is based upon FreeBoB.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import imp
def pyuic4_action( target, source, env ):
env.Execute( "pyuic4 " + str( source[0] ) + " > " + str( target[0] ) )
return 0
def pyuic4_string( target, source, env ):
return "building '%s' from '%s'" % ( str(target[0]), str( source[0] ) )
def PyQt4Check( context ):
context.Message( "Checking for pyuic4 (by checking for the python module pyqtconfig) " )
ret = True
try:
imp.find_module( "pyqtconfig" )
except ImportError:
ret = False
context.Result( ret )
return ret
def generate( env, **kw ):
env['BUILDERS']['PyUIC4'] = env.Builder( action=pyuic4_action, src_suffix=".ui", single_source=True )
env['PYUIC4_TESTS'] = { "PyQt4Check" : PyQt4Check }
def exists( env ):
return 1
| gpl-2.0 | -256,141,468,672,272,600 | 29.64 | 102 | 0.706266 | false | 3.171843 | false | false | false |
moto-timo/ironpython3 | Src/Scripts/generate_calls.py | 1 | 26135 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import sys
from generate import generate
MAX_ARGS = 16
def make_params(nargs, *prefix):
params = ["object arg%d" % i for i in range(nargs)]
return ", ".join(list(prefix) + params)
def make_params1(nargs, prefix=("CodeContext context",)):
params = ["object arg%d" % i for i in range(nargs)]
return ", ".join(list(prefix) + params)
def make_args(nargs, *prefix):
params = ["arg%d" % i for i in range(nargs)]
return ", ".join(list(prefix) + params)
def make_args1(nargs, prefix, start=0):
args = ["arg%d" % i for i in range(start, nargs)]
return ", ".join(list(prefix) + args)
def make_calltarget_type_args(nargs):
return ', '.join(['PythonFunction'] + ['object'] * (nargs + 1))
def gen_args_comma(nparams, comma):
args = ""
for i in range(nparams):
args = args + comma + ("object arg%d" % i)
comma = ", "
return args
def gen_args(nparams):
return gen_args_comma(nparams, "")
def gen_args_call(nparams, *prefix):
args = ""
comma = ""
for i in range(nparams):
args = args + comma +("arg%d" % i)
comma = ", "
if prefix:
if args:
args = prefix[0] + ', ' + args
else:
args = prefix[0]
return args
def gen_args_array(nparams):
args = gen_args_call(nparams)
if args: return "{ " + args + " }"
else: return "{ }"
def gen_callargs(nparams):
args = ""
comma = ""
for i in range(nparams):
args = args + comma + ("callArgs[%d]" % i)
comma = ","
return args
def gen_args_paramscall(nparams):
args = ""
comma = ""
for i in range(nparams):
args = args + comma + ("args[%d]" % i)
comma = ","
return args
method_caller_template = """
class MethodBinding<%(typeParams)s> : BaseMethodBinding {
private CallSite<Func<CallSite, CodeContext, object, object, %(typeParams)s, object>> _site;
public MethodBinding(PythonInvokeBinder binder) {
_site = CallSite<Func<CallSite, CodeContext, object, object, %(typeParams)s, object>>.Create(binder);
}
public object SelfTarget(CallSite site, CodeContext context, object target, %(callParams)s) {
Method self = target as Method;
if (self != null && self._inst != null) {
return _site.Target(_site, context, self._func, self._inst, %(callArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, target, %(callArgs)s);
}
public object SelflessTarget(CallSite site, CodeContext context, object target, object arg0, %(callParamsSelfless)s) {
Method self = target as Method;
if (self != null && self._inst == null) {
return _site.Target(_site, context, self._func, PythonOps.MethodCheckSelf(context, self, arg0), %(callArgsSelfless)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, object, %(typeParams)s, object>>)site).Update(site, context, target, arg0, %(callArgsSelfless)s);
}
public override Delegate GetSelfTarget() {
return new Func<CallSite, CodeContext, object, %(typeParams)s, object>(SelfTarget);
}
public override Delegate GetSelflessTarget() {
return new Func<CallSite, CodeContext, object, object, %(typeParams)s, object>(SelflessTarget);
}
}"""
def method_callers(cw):
for nparams in range(1, MAX_ARGS-3):
cw.write(method_caller_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams': ', '.join(('T%d arg%d' % (d,d) for d in range(nparams))),
'callParamsSelfless': ', '.join(('T%d arg%d' % (d,d+1) for d in range(nparams))),
'callArgsSelfless' : ', '.join(('arg%d' % (d+1) for d in range(nparams))),
'argCount' : nparams,
'callArgs': ', '.join(('arg%d' % d for d in range(nparams))),
'genFuncArgs' : make_calltarget_type_args(nparams),
})
def selfless_method_caller_switch(cw):
cw.enter_block('switch (typeArgs.Length)')
for i in range(1, MAX_ARGS-3):
cw.write('case %d: binding = (BaseMethodBinding)Activator.CreateInstance(typeof(MethodBinding<%s>).MakeGenericType(typeArgs), binder); break;' % (i, ',' * (i-1)))
cw.exit_block()
function_caller_template = """
public sealed class FunctionCaller<%(typeParams)s> : FunctionCaller {
public FunctionCaller(int compat) : base(compat) { }
public object Call%(argCount)d(CallSite site, CodeContext context, object func, %(callParams)s) {
PythonFunction pyfunc = func as PythonFunction;
if (pyfunc != null && pyfunc._compat == _compat) {
return ((Func<%(genFuncArgs)s>)pyfunc.__code__.Target)(pyfunc, %(callArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, func, %(callArgs)s);
}"""
defaults_template = """
public object Default%(defaultCount)dCall%(argCount)d(CallSite site, CodeContext context, object func, %(callParams)s) {
PythonFunction pyfunc = func as PythonFunction;
if (pyfunc != null && pyfunc._compat == _compat) {
int defaultIndex = pyfunc.Defaults.Length - pyfunc.NormalArgumentCount + %(argCount)d;
return ((Func<%(genFuncArgs)s>)pyfunc.__code__.Target)(pyfunc, %(callArgs)s, %(defaultArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, func, %(callArgs)s);
}"""
defaults_template_0 = """
public object Default%(argCount)dCall0(CallSite site, CodeContext context, object func) {
PythonFunction pyfunc = func as PythonFunction;
if (pyfunc != null && pyfunc._compat == _compat) {
int defaultIndex = pyfunc.Defaults.Length - pyfunc.NormalArgumentCount;
return ((Func<%(genFuncArgs)s>)pyfunc.__code__.Target)(pyfunc, %(defaultArgs)s);
}
return ((CallSite<Func<CallSite, CodeContext, object, object>>)site).Update(site, context, func);
}"""
def function_callers(cw):
cw.write('''class FunctionCallerProperties {
internal const int MaxGeneratedFunctionArgs = %d;
}''' % (MAX_ARGS-2))
cw.write('')
for nparams in range(1, MAX_ARGS-2):
cw.write(function_caller_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams': ', '.join(('T%d arg%d' % (d,d) for d in range(nparams))),
'argCount' : nparams,
'callArgs': ', '.join(('arg%d' % d for d in range(nparams))),
'genFuncArgs' : make_calltarget_type_args(nparams),
})
for i in range(nparams + 1, MAX_ARGS - 2):
cw.write(defaults_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams': ', '.join(('T%d arg%d' % (d,d) for d in range(nparams))),
'argCount' : nparams,
'totalParamCount' : i,
'callArgs': ', '.join(('arg%d' % d for d in range(nparams))),
'defaultCount' : i - nparams,
'defaultArgs' : ', '.join(('pyfunc.Defaults[defaultIndex + %d]' % curDefault for curDefault in range(i - nparams))),
'genFuncArgs' : make_calltarget_type_args(i),
})
cw.write('}')
def function_callers_0(cw):
for i in range(1, MAX_ARGS - 2):
cw.write(defaults_template_0 % {
'argCount' : i,
'defaultArgs' : ', '.join(('pyfunc.Defaults[defaultIndex + %d]' % curDefault for curDefault in range(i))),
'genFuncArgs' : make_calltarget_type_args(i),
})
function_caller_switch_template = """case %(argCount)d:
callerType = typeof(FunctionCaller<%(arity)s>).MakeGenericType(typeParams);
mi = callerType.GetMethod(baseName + "Call%(argCount)d");
Debug.Assert(mi != null);
fc = GetFunctionCaller(callerType, funcCompat);
funcType = typeof(Func<,,,,%(arity)s>).MakeGenericType(allParams);
return new Binding.FastBindResult<T>((T)(object)mi.CreateDelegate(funcType, fc), true);"""
def function_caller_switch(cw):
for nparams in range(1, MAX_ARGS-2):
cw.write(function_caller_switch_template % {
'arity' : ',' * (nparams - 1),
'argCount' : nparams,
})
def gen_lazy_call_targets(cw):
for nparams in range(MAX_ARGS):
cw.enter_block("public static object OriginalCallTarget%d(%s)" % (nparams, make_params(nparams, "PythonFunction function")))
cw.write("function.__code__.LazyCompileFirstTarget(function);")
cw.write("return ((Func<%s>)function.__code__.Target)(%s);" % (make_calltarget_type_args(nparams), gen_args_call(nparams, 'function')))
cw.exit_block()
cw.write('')
def gen_recursion_checks(cw):
for nparams in range(MAX_ARGS):
cw.enter_block("internal class PythonFunctionRecursionCheck%d" % (nparams, ))
cw.write("private readonly Func<%s> _target;" % (make_calltarget_type_args(nparams), ))
cw.write('')
cw.enter_block('public PythonFunctionRecursionCheck%d(Func<%s> target)' % (nparams, make_calltarget_type_args(nparams)))
cw.write('_target = target;')
cw.exit_block()
cw.write('')
cw.enter_block('public object CallTarget(%s)' % (make_params(nparams, "PythonFunction/*!*/ function"), ))
cw.write('PythonOps.FunctionPushFrame((PythonContext)function.Context.LanguageContext);')
cw.enter_block('try')
cw.write('return _target(%s);' % (gen_args_call(nparams, 'function'), ))
cw.finally_block()
cw.write('PythonOps.FunctionPopFrame();')
cw.exit_block()
cw.exit_block()
cw.exit_block()
cw.write('')
def gen_recursion_delegate_switch(cw):
for nparams in range(MAX_ARGS):
cw.case_label('case %d:' % nparams)
cw.write('finalTarget = new Func<%s>(new PythonFunctionRecursionCheck%d((Func<%s>)finalTarget).CallTarget);' % (make_calltarget_type_args(nparams), nparams, make_calltarget_type_args(nparams)))
cw.write('break;')
cw.dedent()
def get_call_type(postfix):
if postfix == "": return "CallType.None"
else: return "CallType.ImplicitInstance"
def make_call_to_target(cw, index, postfix, extraArg):
cw.enter_block("public override object Call%(postfix)s(%(params)s)", postfix=postfix,
params=make_params1(index))
cw.write("if (target%(index)d != null) return target%(index)d(%(args)s);", index=index,
args = make_args1(index, extraArg))
cw.write("throw BadArgumentError(%(callType)s, %(nargs)d);", callType=get_call_type(postfix), nargs=index)
cw.exit_block()
def make_call_to_targetX(cw, index, postfix, extraArg):
cw.enter_block("public override object Call%(postfix)s(%(params)s)", postfix=postfix,
params=make_params1(index))
cw.write("return target%(index)d(%(args)s);", index=index, args = make_args1(index, extraArg))
cw.exit_block()
def make_error_calls(cw, index):
cw.enter_block("public override object Call(%(params)s)", params=make_params1(index))
cw.write("throw BadArgumentError(CallType.None, %(nargs)d);", nargs=index)
cw.exit_block()
if index > 0:
cw.enter_block("public override object CallInstance(%(params)s)", params=make_params1(index))
cw.write("throw BadArgumentError(CallType.ImplicitInstance, %(nargs)d);", nargs=index)
cw.exit_block()
def gen_call(nargs, nparams, cw, extra=[]):
args = extra + ["arg%d" % i for i in range(nargs)]
cw.enter_block("public override object Call(%s)" % make_params1(nargs))
# first emit error checking...
ndefaults = nparams-nargs
if nargs != nparams:
cw.write("if (Defaults.Length < %d) throw BadArgumentError(%d);" % (ndefaults,nargs))
# emit the common case of no recursion check
if (nargs == nparams):
cw.write("if (!EnforceRecursion) return target(%s);" % ", ".join(args))
else:
dargs = args + ["Defaults[Defaults.Length - %d]" % i for i in range(ndefaults, 0, -1)]
cw.write("if (!EnforceRecursion) return target(%s);" % ", ".join(dargs))
# emit non-common case of recursion check
cw.write("PushFrame();")
cw.enter_block("try")
# make function body
if (nargs == nparams):
cw.write("return target(%s);" % ", ".join(args))
else:
dargs = args + ["Defaults[Defaults.Length - %d]" % i for i in range(ndefaults, 0, -1)]
cw.write("return target(%s);" % ", ".join(dargs))
cw.finally_block()
cw.write("PopFrame();")
cw.exit_block()
cw.exit_block()
def gen_params_callN(cw, any):
cw.enter_block("public override object Call(CodeContext context, params object[] args)")
cw.write("if (!IsContextAware) return Call(args);")
cw.write("")
cw.enter_block("if (Instance == null)")
cw.write("object[] newArgs = new object[args.Length + 1];")
cw.write("newArgs[0] = context;")
cw.write("Array.Copy(args, 0, newArgs, 1, args.Length);")
cw.write("return Call(newArgs);")
cw.else_block()
# need to call w/ Context, Instance, *args
if any:
cw.enter_block("switch (args.Length)")
for i in range(MAX_ARGS-1):
if i == 0:
cw.write(("case %d: if(target2 != null) return target2(context, Instance); break;") % (i))
else:
cw.write(("case %d: if(target%d != null) return target%d(context, Instance, " + gen_args_paramscall(i) + "); break;") % (i, i+2, i+2))
cw.exit_block()
cw.enter_block("if (targetN != null)")
cw.write("object [] newArgs = new object[args.Length+2];")
cw.write("newArgs[0] = context;")
cw.write("newArgs[1] = Instance;")
cw.write("Array.Copy(args, 0, newArgs, 2, args.Length);")
cw.write("return targetN(newArgs);")
cw.exit_block()
cw.write("throw BadArgumentError(args.Length);")
cw.exit_block()
else:
cw.write("object [] newArgs = new object[args.Length+2];")
cw.write("newArgs[0] = context;")
cw.write("newArgs[1] = Instance;")
cw.write("Array.Copy(args, 0, newArgs, 2, args.Length);")
cw.write("return target(newArgs);")
cw.exit_block()
cw.exit_block()
cw.write("")
CODE = """
public static object Call(%(params)s) {
FastCallable fc = func as FastCallable;
if (fc != null) return fc.Call(%(args)s);
return PythonCalls.Call(func, %(argsArray)s);
}"""
def gen_python_switch(cw):
for nparams in range(MAX_ARGS):
genArgs = make_calltarget_type_args(nparams)
cw.write("""case %d:
originalTarget = (Func<%s>)OriginalCallTarget%d;
return typeof(Func<%s>);""" % (nparams, genArgs, nparams, genArgs))
fast_type_call_template = """
class FastBindingBuilder<%(typeParams)s> : FastBindingBuilderBase {
public FastBindingBuilder(CodeContext context, PythonType type, PythonInvokeBinder binder, Type siteType, Type[] genTypeArgs) :
base(context, type, binder, siteType, genTypeArgs) {
}
protected override Delegate GetNewSiteDelegate(PythonInvokeBinder binder, object func) {
return new Func<%(newInitDlgParams)s>(new NewSite<%(typeParams)s>(binder, func).Call);
}
protected override Delegate MakeDelegate(int version, Delegate newDlg, LateBoundInitBinder initBinder) {
return new Func<%(funcParams)s>(
new FastTypeSite<%(typeParams)s>(
version,
(Func<%(newInitDlgParams)s>)newDlg,
initBinder
).CallTarget
);
}
}
class FastTypeSite<%(typeParams)s> {
private readonly int _version;
private readonly Func<%(newInitDlgParams)s> _new;
private readonly CallSite<Func<%(nestedSlowSiteParams)s>> _initSite;
public FastTypeSite(int version, Func<%(newInitDlgParams)s> @new, LateBoundInitBinder initBinder) {
_version = version;
_new = @new;
_initSite = CallSite<Func<%(nestedSlowSiteParams)s>>.Create(initBinder);
}
public object CallTarget(CallSite site, CodeContext context, object type, %(callTargetArgs)s) {
PythonType pt = type as PythonType;
if (pt != null && pt.Version == _version) {
object res = _new(context, type, %(callTargetPassedArgs)s);
_initSite.Target(_initSite, context, res, %(callTargetPassedArgs)s);
return res;
}
return ((CallSite<Func<%(funcParams)s>>)site).Update(site, context, type, %(callTargetPassedArgs)s);
}
}
class NewSite<%(typeParams)s> {
private readonly CallSite<Func<%(nestedSiteParams)s>> _site;
private readonly object _target;
public NewSite(PythonInvokeBinder binder, object target) {
_site = CallSite<Func<%(nestedSiteParams)s>>.Create(binder);
_target = target;
}
public object Call(CodeContext context, object typeOrInstance, %(callTargetArgs)s) {
return _site.Target(_site, context, _target, typeOrInstance, %(callTargetPassedArgs)s);
}
}
"""
def gen_fast_type_callers(cw):
for nparams in range(1, 6):
funcParams = 'CallSite, CodeContext, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
newInitDlgParams = 'CodeContext, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
callTargetArgs = ', '.join(('T%d arg%d' % (d, d) for d in range(nparams)))
callTargetPassedArgs = ', '.join(('arg%d' % (d, ) for d in range(nparams)))
nestedSiteParams = 'CallSite, CodeContext, object, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
nestedSlowSiteParams = 'CallSite, CodeContext, object, ' + ', '.join(('T%d' % d for d in range(nparams))) + ', object'
cw.write(fast_type_call_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'funcParams' : funcParams,
'newInitDlgParams' : newInitDlgParams,
'callTargetArgs' : callTargetArgs,
'callTargetPassedArgs': callTargetPassedArgs,
'nestedSiteParams' : nestedSiteParams,
'nestedSlowSiteParams' : nestedSlowSiteParams,
})
def gen_fast_type_caller_switch(cw):
for nparams in range(1, 6):
cw.write('case %d: baseType = typeof(FastBindingBuilder<%s>); break;' % (nparams, (',' * (nparams - 1))))
fast_init_template = """
class FastInitSite<%(typeParams)s> {
private readonly int _version;
private readonly PythonFunction _slot;
private readonly CallSite<Func<CallSite, CodeContext, PythonFunction, object, %(typeParams)s, object>> _initSite;
public FastInitSite(int version, PythonInvokeBinder binder, PythonFunction target) {
_version = version;
_slot = target;
_initSite = CallSite<Func<CallSite, CodeContext, PythonFunction, object, %(typeParams)s, object>>.Create(binder);
}
public object CallTarget(CallSite site, CodeContext context, object inst, %(callParams)s) {
IPythonObject pyObj = inst as IPythonObject;
if (pyObj != null && pyObj.PythonType.Version == _version) {
_initSite.Target(_initSite, context, _slot, inst, %(callArgs)s);
return inst;
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, inst, %(callArgs)s);
}
public object EmptyCallTarget(CallSite site, CodeContext context, object inst, %(callParams)s) {
IPythonObject pyObj = inst as IPythonObject;
if ((pyObj != null && pyObj.PythonType.Version == _version) || DynamicHelpers.GetPythonType(inst).Version == _version) {
return inst;
}
return ((CallSite<Func<CallSite, CodeContext, object, %(typeParams)s, object>>)site).Update(site, context, inst, %(callArgs)s);
}
}
"""
MAX_FAST_INIT_ARGS = 6
def gen_fast_init_callers(cw):
for nparams in range(1, MAX_FAST_INIT_ARGS):
callParams = ', '.join(('T%d arg%d' % (d, d) for d in range(nparams)))
callArgs = ', '.join(('arg%d' % (d, ) for d in range(nparams)))
cw.write(fast_init_template % {
'typeParams' : ', '.join(('T%d' % d for d in range(nparams))),
'callParams' : callParams,
'callArgs': callArgs,
})
def gen_fast_init_switch(cw):
for nparams in range(1, MAX_FAST_INIT_ARGS):
cw.write("case %d: initSiteType = typeof(FastInitSite<%s>); break;" % (nparams, ',' * (nparams-1), ))
def gen_fast_init_max_args(cw):
cw.write("public const int MaxFastLateBoundInitArgs = %d;" % MAX_FAST_INIT_ARGS)
MAX_INSTRUCTION_PROVIDED_CALLS = 7
def gen_call_expression_instruction_switch(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
cw.case_label('case %d:' % i)
cw.write('compiler.Compile(Parent.LocalContext);')
cw.write('compiler.Compile(_target);')
for j in range(i):
cw.write('compiler.Compile(_args[%d].Expression);' % j)
cw.write('compiler.Instructions.Emit(new Invoke%dInstruction(Parent.PyContext));' % i)
cw.write('return;')
cw.dedent()
def gen_call_expression_instructions(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
siteargs = 'object, ' * (i + 1)
argfetch = '\n'.join([' var arg%d = frame.Pop();' % (j-1) for j in range(i, 0, -1)])
callargs = ', '.join(['target'] + ['arg%d' % j for j in range(i)])
cw.write("""
class Invoke%(argcount)dInstruction : InvokeInstruction {
private readonly CallSite<Func<CallSite, CodeContext, %(siteargs)sobject>> _site;
public Invoke%(argcount)dInstruction(PythonContext context) {
_site = context.CallSite%(argcount)d;
}
public override int ConsumedStack {
get {
return %(consumedCount)d;
}
}
public override int Run(InterpretedFrame frame) {
%(argfetch)s
var target = frame.Pop();
frame.Push(_site.Target(_site, (CodeContext)frame.Pop(), %(callargs)s));
return +1;
}
}""" % {'siteargs': siteargs, 'argfetch' : argfetch, 'callargs' : callargs, 'argcount' : i, 'consumedCount' : i + 2 })
def gen_shared_call_sites_storage(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
siteargs = 'object, ' * (i + 1)
cw.writeline('private CallSite<Func<CallSite, CodeContext, %sobject>> _callSite%d;' % (siteargs, i))
def gen_shared_call_sites_properties(cw):
for i in range(MAX_INSTRUCTION_PROVIDED_CALLS):
siteargs = 'object, ' * (i + 1)
cw.enter_block('internal CallSite<Func<CallSite, CodeContext, %sobject>> CallSite%d' % (siteargs, i))
cw.enter_block('get')
cw.writeline('EnsureCall%dSite();' % i)
cw.writeline('return _callSite%d;' % i)
cw.exit_block()
cw.exit_block()
cw.writeline('')
cw.enter_block('private void EnsureCall%dSite()' % i)
cw.enter_block('if (_callSite%d == null)' % i)
cw.writeline('Interlocked.CompareExchange(')
cw.indent()
cw.writeline('ref _callSite%d,' % i)
cw.writeline('CallSite<Func<CallSite, CodeContext, %sobject>>.Create(Invoke(new CallSignature(%d))),' % (siteargs, i))
cw.writeline('null')
cw.dedent()
cw.writeline(');')
cw.exit_block()
cw.exit_block()
cw.writeline('')
def main():
return generate(
("Python Selfless Method Caller Switch", selfless_method_caller_switch),
("Python Method Callers", method_callers),
("Python Shared Call Sites Properties", gen_shared_call_sites_properties),
("Python Shared Call Sites Storage", gen_shared_call_sites_storage),
("Python Call Expression Instructions", gen_call_expression_instructions),
("Python Call Expression Instruction Switch", gen_call_expression_instruction_switch),
("Python Fast Init Max Args", gen_fast_init_max_args),
("Python Fast Init Switch", gen_fast_init_switch),
("Python Fast Init Callers", gen_fast_init_callers),
("Python Fast Type Caller Switch", gen_fast_type_caller_switch),
("Python Fast Type Callers", gen_fast_type_callers),
("Python Recursion Enforcement", gen_recursion_checks),
("Python Recursion Delegate Switch", gen_recursion_delegate_switch),
("Python Lazy Call Targets", gen_lazy_call_targets),
("Python Zero Arg Function Callers", function_callers_0),
("Python Function Callers", function_callers),
("Python Function Caller Switch", function_caller_switch),
("Python Call Target Switch", gen_python_switch),
)
if __name__ == "__main__":
main()
| apache-2.0 | 6,270,476,123,638,950,000 | 42.056013 | 201 | 0.601033 | false | 3.54854 | false | false | false |
waveform80/dbsuite | dbsuite/plugins/db2/zos/parser.py | 1 | 412770 | # vim: set et sw=4 sts=4:
# Copyright 2012 Dave Hughes.
#
# This file is part of dbsuite.
#
# dbsuite is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# dbsuite is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# dbsuite. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
from collections import namedtuple
from dbsuite.plugins.db2.zos.tokenizer import db2zos_namechars, db2zos_identchars
from dbsuite.parser import BaseParser, ParseError, ParseBacktrack, quote_str
from dbsuite.tokenizer import TokenTypes as TT, Token
# Standard size suffixes and multipliers
SUFFIX_KMG = {
'K': 1024**1,
'M': 1024**2,
'G': 1024**3,
}
# Default sizes for certain datatypes
CHAR_DEFAULT_SIZE = 1
BLOB_DEFAULT_SIZE = 1024*1024
DECIMAL_DEFAULT_SIZE = 5
DECIMAL_DEFAULT_SCALE = 0
DECFLOAT_DEFAULT_SIZE = 34
TIMESTAMP_DEFAULT_SIZE = 6
class DB2ZOSParser(BaseParser):
"""Reformatter which breaks up and re-indents DB2 for LUW's SQL dialect.
This class is, at its core, a full blown SQL language parser that
understands many common SQL DML and DDL commands (from the basic ones like
INSERT, UPDATE, DELETE, SELECT, to the more DB2 specific ones such as
CREATE TABLESPACE, CREATE FUNCTION, and dynamic compound statements).
"""
def __init__(self):
super(DB2ZOSParser, self).__init__()
self.namechars = db2zos_namechars
self.identchars = db2zos_identchars
self.current_schema = None
def _parse_init(self, tokens):
super(DB2ZOSParser, self)._parse_init(tokens)
self.current_schema = None
def _save_state(self):
# Override _save_state to save the current schema
self._states.append((
self._index,
self._level,
len(self._output),
self.current_schema
))
def _restore_state(self):
# Override _restore_state to restore the current schema
(
self._index,
self._level,
output_len,
self.current_schema
) = self._states.pop()
del self._output[output_len:]
def _parse_top(self):
# Override _parse_top to make a 'statement' the top of the parse tree
self._parse_statement()
def _prespace_default(self, template):
# Overridden to include array and set operators, and the specific
# intra-statement terminator used by func/proc definitions
return super(DB2ZOSParser, self)._prespace_default(template) and template not in (
']', '}', ';',
(TT.OPERATOR, ']'),
(TT.OPERATOR, '}'),
(TT.TERMINATOR, ';'),
)
def _postspace_default(self, template):
# Overridden to include array and set operators
return super(DB2ZOSParser, self)._postspace_default(template) and template not in (
'[', '{',
(TT.OPERATOR, '['),
(TT.OPERATOR, '{'),
)
# PATTERNS ###############################################################
def _parse_subrelation_name(self):
"""Parses the (possibly qualified) name of a relation-owned object.
A relation-owned object is either a column or a constraint. This method
parses such a name with up to two optional qualifiers (e.g., it is
possible in a SELECT statement with no table correlation clauses to
specify SCHEMA.TABLE.COLUMN). The method returns the parsed name as a
tuple with 3 elements (None is used for qualifiers which are missing).
"""
token1 = self._expect(TT.IDENTIFIER)
result = (None, None, token1.value)
if self._match('.'):
self._update_output(Token(TT.RELATION, token1.value, token1.source, token1.line, token1.column), -2)
token2 = self._expect(TT.IDENTIFIER)
result = (None, result[2], token2.value)
if self._match('.'):
self._update_output(Token(TT.SCHEMA, token1.value, token1.source, token1.line, token1.column), -4)
self._update_output(Token(TT.RELATION, token2.value, token2.source, token2.line, token2.column), -2)
token3 = self._expect(TT.IDENTIFIER)
result = (result[1], result[2], token3.value)
return result
_parse_column_name = _parse_subrelation_name
_parse_constraint_name = _parse_subrelation_name
# These are cheats; remote object names consist of server.schema.object
# instead of schema.relation.object, and source object names consist of
# schema.package.object, but they'll do
_parse_remote_object_name = _parse_subrelation_name
_parse_source_object_name = _parse_subrelation_name
# These are also cheats; routine, type and variables names as of 9.7 are
# either [schema.]routine (1 or 2-part) or schema.module.routine (3-part)
_parse_function_name = _parse_subrelation_name
_parse_procedure_name = _parse_subrelation_name
_parse_method_name = _parse_subrelation_name
_parse_type_name = _parse_subrelation_name
_parse_variable_name = _parse_subrelation_name
def _parse_subschema_name(self):
"""Parses the (possibly qualified) name of a schema-owned object.
A schema-owned object is a table, view, index, function, sequence, etc.
This method parses such a name with an optional qualifier (the schema
name). The method returns the parsed name as a tuple with 2 elements
(None is used for the schema qualifier if it is missing).
"""
token1 = self._expect(TT.RELATION)
result = (None, token1.value)
if self._match('.'):
self._update_output(Token(TT.SCHEMA, token1.value, token1.source, token1.line, token1.column), -2)
token2 = self._expect(TT.RELATION)
result = (result[1], token2.value)
return result
_parse_relation_name = _parse_subschema_name
_parse_table_name = _parse_subschema_name
_parse_view_name = _parse_subschema_name
_parse_alias_name = _parse_subschema_name
_parse_nickname_name = _parse_subschema_name
_parse_trigger_name = _parse_subschema_name
_parse_index_name = _parse_subschema_name
_parse_routine_name = _parse_subschema_name
_parse_module_name = _parse_subschema_name
_parse_sequence_name = _parse_subschema_name
# Another cheat; security labels exist within a security policy
_parse_security_label_name = _parse_subschema_name
def _parse_size(self, optional=False, suffix={}):
"""Parses a parenthesized size with an optional scale suffix.
This method parses a parenthesized integer number. The optional
parameter controls whether an exception is raised if an opening
parenthesis is not encountered at the current input position. The
suffix parameter is a dictionary mapping suffix->multiplier. The global
constant SUFFIX_KMG defines a commonly used suffix mapping (K->1024,
M->1024**2, etc.)
"""
if optional:
if not self._match('(', prespace=False):
return None
else:
self._expect('(', prespace=False)
size = self._expect(TT.NUMBER)[1]
if suffix:
suf = self._match_one_of(suffix.keys())
if suf:
size *= suffix[suf[1]]
self._expect(')')
return size
def _parse_special_register(self):
"""Parses a special register (e.g. CURRENT_DATE)"""
if self._match((TT.REGISTER, 'CURRENT')):
if self._match((TT.REGISTER, 'TIMESTAMP')):
if self._match('('):
self._expect_sequence([TT.INTEGER, ')'])
elif self._match_one_of([
(TT.REGISTER, 'CLIENT_ACCTNG'),
(TT.REGISTER, 'CLIENT_APPLNAME'),
(TT.REGISTER, 'CLIENT_USERID'),
(TT.REGISTER, 'CLIENT_WRKSTNNAME'),
(TT.REGISTER, 'DATE'),
(TT.REGISTER, 'DBPARTITIONNUM'),
(TT.REGISTER, 'DEGREE'),
(TT.REGISTER, 'ISOLATION'),
(TT.REGISTER, 'NODE'),
(TT.REGISTER, 'PATH'),
(TT.REGISTER, 'SCHEMA'),
(TT.REGISTER, 'SERVER'),
(TT.REGISTER, 'SQLID'),
(TT.REGISTER, 'TIME'),
(TT.REGISTER, 'TIMEZONE'),
(TT.REGISTER, 'USER'),
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'DECFLOAT'),
(TT.REGISTER, 'ROUNDING'),
(TT.REGISTER, 'MODE')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'DEFAULT'),
(TT.REGISTER, 'TRANSFORM'),
(TT.REGISTER, 'GROUP')
]):
pass
elif self._match((TT.REGISTER, 'EXPLAIN')):
self._expect_one_of([
(TT.REGISTER, 'MODE'),
(TT.REGISTER, 'SNAPSHOT')
])
elif self._match_sequence([
(TT.REGISTER, 'FEDERATED'),
(TT.REGISTER, 'ASYNCHRONY')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'IMPLICIT'),
(TT.REGISTER, 'XMLPARSE'),
(TT.REGISTER, 'OPTION')]
):
pass
elif self._match_sequence([
(TT.REGISTER, 'LOCALE'),
(TT.REGISTER, 'LC_MESSAGES')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'LOCALE'),
(TT.REGISTER, 'LC_TIME')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'LOCK'),
(TT.REGISTER, 'TIMEOUT')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'MAINTAINED'),
(TT.REGISTER, 'TABLE'),
(TT.REGISTER, 'TYPES'),
(TT.REGISTER, 'FOR'),
(TT.REGISTER, 'OPTIMIZATION')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'MDC'),
(TT.REGISTER, 'ROLLOUT'),
(TT.REGISTER, 'MODE')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'OPTIMIZATION'),
(TT.REGISTER, 'PROFILE')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'PACKAGE'),
(TT.REGISTER, 'PATH')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'QUERY'),
(TT.REGISTER, 'OPTIMIZATION')
]):
pass
elif self._match_sequence([
(TT.REGISTER, 'REFRESH'),
(TT.REGISTER, 'AGE')
]):
pass
else:
self._expected((TT.REGISTER,))
elif self._match((TT.REGISTER, 'CLIENT')):
self._expect_one_of([
(TT.REGISTER, 'ACCTNG'),
(TT.REGISTER, 'APPLNAME'),
(TT.REGISTER, 'USERID'),
(TT.REGISTER, 'WRKSTNNAME'),
])
elif self._match((TT.REGISTER, 'CURRENT_TIMESTAMP')):
if self._match('('):
self._expect_sequence([TT.INTEGER, ')'])
else:
self._expect_one_of([
(TT.REGISTER, 'CURRENT_DATE'),
(TT.REGISTER, 'CURRENT_PATH'),
(TT.REGISTER, 'CURRENT_SCHEMA'),
(TT.REGISTER, 'CURRENT_SERVER'),
(TT.REGISTER, 'CURRENT_TIME'),
(TT.REGISTER, 'CURRENT_TIMEZONE'),
(TT.REGISTER, 'CURRENT_USER'),
(TT.REGISTER, 'SESSION_USER'),
(TT.REGISTER, 'SYSTEM_USER'),
(TT.REGISTER, 'USER'),
])
def _parse_datatype(self):
"""Parses a (possibly qualified) data type with optional arguments.
Parses a data type name with an optional qualifier (the schema name).
The method returns a tuple with the following structure:
(schema_name, type_name, size, scale)
If the type has no parameters size and/or scale may be None. If the
schema is not specified, schema_name is None, unless the type is a
builtin type in which case the schema_name will always be 'SYSIBM'
regardless of whether a schema was specified with the type in the
source.
"""
self._save_state()
try:
# Try and parse a built-in type
typeschema = 'SYSIBM'
size = None
scale = None
# Match the optional SYSIBM prefix
if self._match((TT.DATATYPE, 'SYSIBM')):
self._expect('.')
if self._match((TT.DATATYPE, 'SMALLINT')):
typename = 'SMALLINT'
elif self._match_one_of([(TT.DATATYPE, 'INT'), (TT.DATATYPE, 'INTEGER')]):
typename = 'INTEGER'
elif self._match((TT.DATATYPE, 'BIGINT')):
typename = 'BIGINT'
elif self._match((TT.DATATYPE, 'FLOAT')):
size = self._parse_size(optional=True)
if size is None or size > 24:
typename = 'DOUBLE'
else:
typename = 'REAL'
elif self._match((TT.DATATYPE, 'REAL')):
typename = 'REAL'
elif self._match((TT.DATATYPE, 'DOUBLE')):
self._match((TT.DATATYPE, 'PRECISION'))
typename = 'DOUBLE'
elif self._match((TT.DATATYPE, 'DECFLOAT')):
typename = 'DECFLOAT'
self._parse_size(optional=True) or DECFLOAT_DEFAULT_SIZE
elif self._match_one_of([(TT.DATATYPE, 'DEC'), (TT.DATATYPE, 'DECIMAL')]):
typename = 'DECIMAL'
size = DECIMAL_DEFAULT_SIZE
scale = DECIMAL_DEFAULT_SCALE
if self._match('(', prespace=False):
size = self._expect(TT.NUMBER).value
if self._match(','):
scale = self._expect(TT.NUMBER).value
self._expect(')')
elif self._match_one_of([(TT.DATATYPE, 'NUM'), (TT.DATATYPE, 'NUMERIC')]):
typename = 'NUMERIC'
size = DECIMAL_DEFAULT_SIZE
scale = DECIMAL_DEFAULT_SCALE
if self._match('(', prespace=False):
size = self._expect(TT.NUMBER).value
if self._match(','):
scale = self._expect(TT.NUMBER).value
self._expect(')')
elif self._match_one_of([(TT.DATATYPE, 'CHAR'), (TT.DATATYPE, 'CHARACTER')]):
if self._match((TT.DATATYPE, 'VARYING')):
typename = 'VARCHAR'
size = self._parse_size(optional=False, suffix=SUFFIX_KMG)
self._match_sequence(['FOR', 'BIT', 'DATA'])
elif self._match_sequence([(TT.DATATYPE, 'LARGE'), (TT.DATATYPE, 'OBJECT')]):
typename = 'CLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
else:
typename = 'CHAR'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or CHAR_DEFAULT_SIZE
self._match_sequence(['FOR', 'BIT', 'DATA'])
elif self._match((TT.DATATYPE, 'VARCHAR')):
typename = 'VARCHAR'
size = self._parse_size(optional=False, suffix=SUFFIX_KMG)
self._match_sequence(['FOR', 'BIT', 'DATA'])
elif self._match((TT.DATATYPE, 'VARGRAPHIC')):
typename = 'VARGRAPHIC'
size = self._parse_size(optional=False)
elif self._match_sequence([(TT.DATATYPE, 'LONG'), (TT.DATATYPE, 'VARCHAR')]):
typename = 'LONG VARCHAR'
elif self._match_sequence([(TT.DATATYPE, 'LONG'), (TT.DATATYPE, 'VARGRAPHIC')]):
typename = 'LONG VARGRAPHIC'
elif self._match((TT.DATATYPE, 'CLOB')):
typename = 'CLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'BLOB')):
typename = 'BLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match_sequence([(TT.DATATYPE, 'BINARY'), (TT.DATATYPE, 'LARGE'), (TT.DATATYPE, 'OBJECT')]):
typename = 'BLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'DBCLOB')):
typename = 'DBCLOB'
size = self._parse_size(optional=True, suffix=SUFFIX_KMG) or BLOB_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'GRAPHIC')):
typename = 'GRAPHIC'
size = self._parse_size(optional=True) or CHAR_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'NCHAR')):
typename = 'NCHAR'
if self._match((TT.DATATYPE, 'VARYING')):
typename = 'NVARCHAR'
size = self._parse_size(optional=False)
else:
typename = 'NCHAR'
size = self._parse_size(optional=True) or CHAR_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'NATIONAL')):
self._expect_one_of([(TT.DATATYPE, 'CHAR'), (TT.DATATYPE, 'CHARACTER')])
if self._match((TT.DATATYPE, 'VARYING')):
typename = 'NVARCHAR'
size = self._parse_size(optional=False)
else:
typename = 'NCHAR'
size = self._parse_size(optional=True) or CHAR_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'DATE')):
typename = 'DATE'
elif self._match((TT.DATATYPE, 'TIME')):
typename = 'TIME'
elif self._match((TT.DATATYPE, 'TIMESTAMP')):
typename = 'TIMESTAMP'
size = self._parse_size(optional=True) or TIMESTAMP_DEFAULT_SIZE
elif self._match((TT.DATATYPE, 'DATALINK')):
typename = 'DATALINK'
size = self._parse_size(optional=True)
elif self._match((TT.DATATYPE, 'XML')):
typename = 'XML'
elif self._match((TT.DATATYPE, 'DB2SECURITYLABEL')):
typeschema = 'SYSPROC'
typename = 'DB2SECURITYLABEL'
elif self._match((TT.DATATYPE, 'BOOLEAN')):
typename = 'BOOLEAN'
elif self._match((TT.DATATYPE, 'CURSOR')):
typename = 'CURSOR'
elif self._match((TT.DATATYPE, 'ARRAY')):
typename = 'ARRAY'
size = self._parse_size(optional=False, suffix=SUFFIX_KMG)
else:
raise ParseBacktrack()
except ParseError:
# If that fails, rewind and parse a user-defined type (user defined
# types do not have a size or scale)
self._restore_state()
typeschema = None
typename = self._expect(TT.DATATYPE).value
if self._match('.'):
typeschema = typename
typename = self._expect(TT.DATATYPE).value
size = None
scale = None
else:
self._forget_state()
return (typeschema, typename, size, scale)
def _parse_ident_list(self, newlines=False):
"""Parses a comma separated list of identifiers.
This is a common pattern in SQL, for example within parentheses on the
left hand side of an assignment in an UPDATE statement, or the INCLUDE
list of a CREATE UNIQUE INDEX statement.
The method returns a list of the identifiers seen (primarily useful for
counting the number of identifiers seen, but has other uses too).
"""
result = []
while True:
ident = self._expect(TT.IDENTIFIER).value
# Parse an optional array element suffix
if self._match('[', prespace=False):
self._parse_expression()
self._expect(']')
result.append(ident)
if not self._match(','):
break
elif newlines:
self._newline()
return result
def _parse_expression_list(self, allowdefault=False, newlines=False):
"""Parses a comma separated list of expressions.
This is a common pattern in SQL, for example the parameter list of
a function, the arguments of an ORDER BY clause, etc. The allowdefault
parameter indicates whether DEFAULT can appear in the list instead
of an expression (useful when parsing the VALUES clause of an INSERT
statement for example).
"""
while True:
if not (allowdefault and self._match('DEFAULT')):
self._parse_expression()
if not self._match(','):
break
elif newlines:
self._newline()
def _parse_datatype_list(self, newlines=False):
"""Parses a comma separated list of data-types.
This is another common pattern in SQL, found when trying to define
the prototype of a function or procedure without using the specific
name (and a few other places).
"""
while True:
self._parse_datatype()
if not self._match(','):
break
elif newlines:
self._newline()
def _parse_ident_type_list(self, newlines=False):
"""Parses a comma separated list of identifiers and data-types.
This is a common pattern in SQL, found in the prototype of SQL
functions, the INCLUDE portion of a SELECT-FROM-DML statement, etc.
"""
while True:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
if not self._match(','):
break
elif newlines:
self._newline()
def _parse_tuple(self, allowdefault=False):
"""Parses a full-select or a tuple (list) of expressions.
This is a common pattern found in SQL, for example on the right hand
side of the IN operator, in an UPDATE statement on the right hand side
of a parenthesized column list, etc. The easiest way to implement
this is by saving the current parser state, attempting to parse a
full-select, rewinding the state if this fails and parsing a tuple
of expressions.
The allowdefault parameter is propogated to parse_expression_list. See
parse_expression_list for more detail.
"""
# Opening parenthesis already matched
if self._peek_one_of(['SELECT', 'VALUES']):
# Parse a full-select
self._indent()
self._parse_full_select()
self._outdent()
else:
# Everything else (including a redundantly parenthesized
# full-select) can be parsed as an expression list
self._parse_expression_list(allowdefault)
# EXPRESSIONS and PREDICATES #############################################
def _parse_search_condition(self, newlines=True):
"""Parse a search condition (as part of WHERE/HAVING/etc.)"""
while True:
self._match('NOT')
# Ambiguity: open parentheses could indicate a parentheiszed search
# condition, or a parenthesized expression within a predicate
self._save_state()
try:
# Attempt to parse a parenthesized search condition
self._expect('(')
self._parse_search_condition(newlines)
self._expect(')')
except ParseError:
# If that fails, rewind and parse a predicate instead (which
# will parse a parenthesized expression)
self._restore_state()
self._parse_predicate()
if self._match('SELECTIVITY'):
self._expect(TT.NUMBER)
else:
self._forget_state()
if self._match_one_of(['AND', 'OR']):
if newlines:
self._newline(-1)
else:
break
def _parse_predicate(self):
"""Parse high precedence predicate operators (BETWEEN, IN, etc.)"""
if self._match('EXISTS'):
self._expect('(')
self._parse_full_select()
self._expect(')')
else:
self._parse_expression()
if self._match('NOT'):
if self._match('LIKE'):
self._parse_expression()
if self._match('ESCAPE'):
self._parse_expression()
elif self._match('BETWEEN'):
self._parse_expression()
self._expect('AND')
self._parse_expression()
elif self._match('IN'):
if self._match('('):
self._parse_tuple()
self._expect(')')
else:
self._parse_expression()
else:
self._expected_one_of(['LIKE', 'BETWEEN', 'IN'])
elif self._match('LIKE'):
self._parse_expression()
if self._match('ESCAPE'):
self._parse_expression()
elif self._match('BETWEEN'):
self._parse_expression()
self._expect('AND')
self._parse_expression()
elif self._match('IN'):
if self._match('('):
self._parse_tuple()
self._expect(')')
else:
self._parse_expression()
elif self._match('IS'):
self._match('NOT')
if self._match('VALIDATED'):
if self._match('ACCORDING'):
self._expect_sequence(['TO', 'XMLSCHEMA'])
if self._match('IN'):
self._expect('(')
while True:
self._parse_xml_schema_identification()
if not self._match(','):
break
self._expect(')')
else:
self._parse_xml_schema_identification()
else:
self._expect_one_of(['NULL', 'VALIDATED'])
elif self._match('XMLEXISTS'):
self._expect('(')
self._expect(TT.STRING)
if self._match('PASSING'):
self._match_sequence(['BY', 'REF'])
while True:
self._parse_expression()
self._expect_sequence(['AS', TT.IDENTIFIER])
self._match_sequence(['BY', 'REF'])
if not self._match(','):
break
self._expect(')')
elif self._match_one_of(['=', '<', '>', '<>', '<=', '>=']):
if self._match_one_of(['SOME', 'ANY', 'ALL']):
self._expect('(')
self._parse_full_select()
self._expect(')')
else:
self._parse_expression()
else:
self._expected_one_of([
'EXISTS',
'NOT',
'LIKE',
'BETWEEN',
'IS',
'IN',
'=',
'<',
'>',
'<>',
'<=',
'>='
])
def _parse_duration_label(self, optional=False):
labels = (
'YEARS',
'YEAR',
'DAYS',
'DAY',
'MONTHS',
'MONTH',
'HOURS',
'HOUR',
'MINUTES',
'MINUTE',
'SECONDS',
'SECOND',
'MICROSECONDS',
'MICROSECOND',
)
if optional:
self._match_one_of(labels)
else:
self._expect_one_of(labels)
def _parse_expression(self):
while True:
self._match_one_of(['+', '-'], postspace=False) # Unary +/-
if self._match('('):
self._parse_tuple()
self._expect(')')
elif self._match('CAST'):
self._parse_cast_expression()
elif self._match('XMLCAST'):
self._parse_cast_expression()
elif self._match('CASE'):
if self._match('WHEN'):
self._parse_searched_case()
else:
self._parse_simple_case()
elif self._match_sequence(['NEXT', 'VALUE', 'FOR']) or self._match_sequence(['NEXTVAL', 'FOR']):
self._parse_sequence_name()
elif self._match_sequence(['PREVIOUS', 'VALUE', 'FOR']) or self._match_sequence(['PREVVAL', 'FOR']):
self._parse_sequence_name()
elif self._match_sequence(['ROW', 'CHANGE']):
self._expect_one_of(['TOKEN', 'TIMESTAMP'])
self._expect('FOR')
self._parse_table_name()
elif self._match_one_of([TT.NUMBER, TT.STRING, TT.PARAMETER, 'NULL']): # Literals
pass
else:
# Ambiguity: an identifier could be a register, a function
# call, a column name, etc.
self._save_state()
try:
self._parse_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_special_register()
except ParseError:
self._restore_state()
self._parse_column_name()
else:
self._forget_state()
else:
self._forget_state()
# Parse an optional array element suffix
if self._match('[', prespace=False):
self._parse_expression()
self._expect(']')
# Parse an optional interval suffix
self._parse_duration_label(optional=True)
if not self._match_one_of(['+', '-', '*', '/', '||', 'CONCAT']): # Binary operators
break
def _parse_function_call(self):
"""Parses a function call of various types"""
# Ambiguity: certain functions have "abnormal" internal syntaxes (extra
# keywords, etc). The _parse_scalar_function_call method is used to
# handle all "normal" syntaxes. Special methods are tried first for
# everything else
self._save_state()
try:
self._parse_aggregate_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_olap_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_xml_function_call()
except ParseError:
self._restore_state()
self._save_state()
try:
self._parse_sql_function_call()
except ParseError:
self._restore_state()
self._parse_scalar_function_call()
else:
self._forget_state()
else:
self._forget_state()
else:
self._forget_state()
else:
self._forget_state()
def _parse_aggregate_function_call(self):
"""Parses an aggregate function with it's optional arg-prefix"""
# Parse the optional SYSIBM schema prefix
if self._match('SYSIBM'):
self._expect('.')
# Although CORRELATION and GROUPING are aggregate functions they're not
# included here as their syntax is entirely compatible with "ordinary"
# functions so _parse_scalar_function_call will handle them
aggfunc = self._expect_one_of([
'ARRAY_AGG',
'COUNT',
'COUNT_BIG',
'AVG',
'MAX',
'MIN',
'STDDEV',
'SUM',
'VARIANCE',
'VAR',
]).value
self._expect('(', prespace=False)
if aggfunc in ('COUNT', 'COUNT_BIG') and self._match('*'):
# COUNT and COUNT_BIG can take '*' as a sole parameter
pass
elif aggfunc == 'ARRAY_AGG':
self._parse_expression()
if self._match_sequence(['ORDER', 'BY']):
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
else:
# The aggregation functions handled by this method have an optional
# ALL/DISTINCT argument prefix
self._match_one_of(['ALL', 'DISTINCT'])
# And only take a single expression as an argument
self._parse_expression()
self._expect(')')
# Parse an OLAP suffix if one exists
if self._match('OVER'):
self._parse_olap_window_clause()
def _parse_olap_function_call(self):
"""Parses an OLAP function call (some of which have non-standard internal syntax)"""
if self._match('SYSIBM'):
self._expect('.')
olapfunc = self._expect_one_of([
'ROW_NUMBER',
'RANK',
'DENSE_RANK',
'LAG',
'LEAD',
'FIRST_VALUE',
'LAST_VALUE',
]).value
self._expect('(', prespace=False)
if olapfunc in ('LAG', 'LEAD'):
self._parse_expression()
if self._match(','):
self._expect(TT.NUMBER)
if sel._match(','):
self._parse_expression()
if self._match(','):
self._expect_one_of([(TT.STRING, 'RESPECT NULLS'), (TT.STRING, 'IGNORE NULLS')])
elif olapfunc in ('FIRST_VALUE', 'LAST_VALUE'):
self._parse_expression()
if self._match(','):
self._expect_one_of([(TT.STRING, 'RESPECT NULLS'), (TT.STRING, 'IGNORE NULLS')])
self._expect(')')
self._expect('OVER')
self._parse_olap_window_clause()
def _parse_xml_function_call(self):
"""Parses an XML function call (which has non-standard internal syntax)"""
# Parse the optional SYSIBM schema prefix
if self._match('SYSIBM'):
self._expect('.')
# Note that XML2CLOB (compatibility), XMLCOMMENT, XMLCONCAT,
# XMLDOCUMENT, XMLTEXT, and XMLXSROBJECTID aren't handled by this
# method as their syntax is "normal" so _parse_scalar_function_call
# will handle them
xmlfunc = self._expect_one_of([
'XMLAGG',
'XMLATTRIBUTES',
'XMLELEMENT',
'XMLFOREST',
'XMLGROUP',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLQUERY',
'XMLROW',
'XMLSERIALIZE',
'XMLVALIDATE',
'XMLTABLE',
'XMLTRANSFORM',
]).value
self._expect('(', prespace=False)
if xmlfunc == 'XMLAGG':
self._parse_expression()
if self._match_sequence(['ORDER', 'BY']):
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
elif xmlfunc == 'XMLATTRIBUTES':
while True:
self._parse_expression()
if self._match('AS'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
elif xmlfunc == 'XMLELEMENT':
self._expect('NAME')
self._expect(TT.IDENTIFIER)
if self._match(','):
# XXX We're not specifically checking for namespaces and
# attributes calls as we should here (although expression_list
# will parse them just fine)
self._parse_expression_list()
if self._match('OPTION'):
self._parse_xml_value_option()
elif xmlfunc == 'XMLFOREST':
while True:
# XXX We're not specifically checking for a namespaces call as
# we should here (although expression will parse it just fine)
self._parse_expression()
self._match_sequence(['AS', TT.IDENTIFIER])
if not self._match(','):
break
if self._match('OPTION'):
self._parse_xml_value_option()
elif xmlfunc == 'XMLGROUP':
while True:
self._parse_expression()
if self._match('AS'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
if self._match_sequence(['ORDER', 'BY']):
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
if self._match('OPTION'):
self._parse_xml_row_option(allowroot=True)
elif xmlfunc == 'XMLNAMESPACES':
while True:
if self._match('DEFAULT'):
self._expect(TT.STRING)
elif self._match('NO'):
self._expect_sequence(['DEFAULT', TT.STRING])
else:
self._expect_sequence([TT.STRING, 'AS', TT.IDENTIFIER])
if not self._match(','):
break
elif xmlfunc == 'XMLPARSE':
self._expect_sequence(['DOCUMENT', TT.STRING])
if self._match_one_of(['STRIP', 'PRESERVE']):
self._expect('WHITESPACE')
elif xmlfunc == 'XMLPI':
self._expect_sequence(['NAME', TT.IDENTIFIER])
if self._match(','):
self._expect(TT.STRING)
elif xmlfunc == 'XMLQUERY':
self._expect(TT.STRING)
if self._match('PASSING'):
self._match_sequence(['BY', 'REF'])
while True:
self._parse_expression()
self._expect_sequence(['AS', TT.IDENTIFIER])
self._match_sequence(['BY', 'REF'])
if not self._match(','):
break
if self._match('RETURNING'):
self._expect('SEQUENCE')
self._match_sequence(['BY', 'REF'])
self._match_sequence(['EMPTY', 'ON', 'EMPTY'])
elif xmlfunc == 'XMLROW':
while True:
self._parse_expression()
self._match_sequence(['AS', TT.IDENTIFIER])
if not self._match(','):
break
if self._match('OPTION'):
self._parse_xml_row_option(allowroot=False)
elif xmlfunc == 'XMLSERIALIZE':
self._match('CONTENT')
self._parse_expression()
self._expect('AS')
# XXX Data type can only be CHAR/VARCHAR/CLOB
self._parse_datatype()
valid = set(['VERSION', 'INCLUDING', 'EXCLUDING'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'VERSION':
self._expect(TT.STRING)
elif t == 'INCLUDING':
valid.remove('EXCLUDING')
self._expect('XMLDECLARATION')
elif t == 'EXCLUDING':
valid.remove('INCLUDING')
self._expect('XMLDECLARATION')
elif xmlfunc == 'XMLVALIDATE':
self._match('DOCUMENT')
self._parse_expression()
if self._match('ACCORDING'):
self._expect_sequence(['TO', 'XMLSCHEMA'])
self._parse_xml_schema_identification()
if self._match('NAMESPACE'):
self._expect(TT.STRING)
elif self._match('NO'):
self._expect('NAMESPACE')
self._match_sequence(['ELEMENT', TT.IDENTIFIER])
elif xmlfunc == 'XMLTABLE':
self._parse_expression()
if self._match(','):
self._expect(TT.STRING)
if self._match('PASSING'):
self._match_sequence(['BY', 'REF'])
while True:
self._parse_expression()
self._expect_sequence(['AS', TT.IDENTIFIER])
self._match_sequence(['BY', 'REF'])
if not self._match(','):
break
if self._match('COLUMNS'):
while True:
self._expect(TT.IDENTIFIER)
if not self._match_sequence(['FOR', 'ORDINALITY']):
self._parse_datatype()
self._match_sequence(['BY', 'REF'])
if self._match('DEFAULT'):
self._parse_expression()
if self._match('PATH'):
self._expect(TT.STRING)
if not self._match(','):
break
elif xmlfunc == 'XMLTRANSFORM':
self._parse_expression()
self._expect('USING')
self._parse_expression()
if self._match('WITH'):
self._parse_expression()
if self._match('AS'):
self._parse_datatype()
self._expect(')')
def _parse_xml_schema_identification(self):
"""Parses an identifier for an XML schema"""
# ACCORDING TO XMLSCHEMA already matched
if self._match('ID'):
self._parse_subschema_name()
else:
if self._match('URI'):
self._expect(TT.STRING)
elif self._match('NO'):
self._expect('NAMESPACE')
else:
self._expected_one_of(['ID', 'URI', 'NO'])
self._match_sequence(['LOCATION', TT.STRING])
def _parse_xml_row_option(self, allowroot=False):
"""Parses an XML OPTION suffix for rows in certain XML function calls"""
# OPTION already matched
valid = set(['ROW', 'AS'])
if allowroot:
valid.add('ROOT')
while valid:
t = self._expect_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t in ('ROW', 'ROOT'):
self._expect(TT.IDENTIFIER)
elif t == 'AS':
self._expect('ATTRIBUTES')
def _parse_xml_value_option(self):
"""Parses an XML OPTION suffix for scalar values in certain XML function calls"""
# OPTION already matched
valid = set(['EMPTY', 'NULL', 'XMLBINARY'])
while valid:
t = self._expect_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'EMPTY':
valid.remove('NULL')
self._expect_sequence(['ON', 'NULL'])
elif t == 'NULL':
valid.remove('EMPTY')
self._expect_sequence(['ON', 'NULL'])
elif t == 'XMLBINARY':
self._match('USING')
self._expect_one_of(['BASE64', 'HEX'])
def _parse_sql_function_call(self):
"""Parses scalar function calls with abnormal internal syntax (usually as dictated by the SQL standard)"""
# Parse the optional SYSIBM schema prefix
if self._match('SYSIBM'):
self._expect('.')
# Note that only the "special" syntax of functions is handled here.
# Most of these functions will also accept "normal" syntax. In that
# case, this method will raise a parse error and the caller will
# backtrack to handle the function as normal with
# _parse_scalar_function_call
sqlfunc = self._expect_one_of([
'CHAR_LENGTH',
'CHARACTER_LENGTH',
'OVERLAY',
'POSITION',
'SUBSTRING',
'TRIM',
]).value
self._expect('(', prespace=False)
if sqlfunc in ('CHAR_LENGTH', 'CHARACTER_LENGTH'):
self._parse_expression()
if self._match('USING'):
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'OVERLAY':
self._parse_expression()
self._expect('PLACING')
self._parse_expression()
self._expect('FROM')
self._parse_expression()
if self._match('FOR'):
self._parse_expression()
self._expect('USING')
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'POSITION':
self._parse_expression()
self._expect('IN')
self._parse_expression()
self._expect('USING')
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'SUBSTRING':
self._parse_expression()
self._expect('FROM')
self._parse_expression()
if self._match('FOR'):
self._parse_expression()
self._expect('USING')
self._expect_one_of(['CODEUNITS16', 'CODEUNITS32', 'OCTETS'])
elif sqlfunc == 'TRIM':
if self._match_one_of(['BOTH', 'B', 'LEADING', 'L', 'TRAILING', 'T']):
self._match(TT.STRING)
self._expect('FROM')
self._parse_expression()
self._expect(')')
def _parse_scalar_function_call(self):
"""Parses a scalar function call with all its arguments"""
self._parse_function_name()
self._expect('(', prespace=False)
if not self._match(')'):
self._parse_expression_list()
self._expect(')')
def _parse_olap_range(self, optional):
"""Parses a ROWS or RANGE specification in an OLAP-function call"""
# [ROWS|RANGE] already matched
if self._match('CURRENT'):
self._expect('ROW')
elif self._match_one_of(['UNBOUNDED', TT.NUMBER]):
self._expect_one_of(['PRECEDING', 'FOLLOWING'])
elif not optional:
self._expected_one_of(['CURRENT', 'UNBOUNDED', TT.NUMBER])
else:
return False
return True
def _parse_olap_window_clause(self):
"""Parses the aggregation suffix in an OLAP-function call"""
# OVER already matched
self._expect('(')
if not self._match(')'):
self._indent()
if self._match('PARTITION'):
self._expect('BY')
self._parse_expression_list()
if self._match('ORDER'):
self._newline(-1)
self._expect('BY')
while True:
if self._match('ORDER'):
self._expect('OF')
self._parse_table_name()
else:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if self._match('NULLS'):
self._expect_one_of(['FIRST', 'LAST'])
if not self._match(','):
break
if self._match_one_of(['ROWS', 'RANGE']):
if not self._parse_olap_range(True):
self._expect('BETWEEN')
self._parse_olap_range(False)
self._expect('AND')
self._parse_olap_range(False)
self._outdent()
self._expect(')')
def _parse_cast_expression(self):
"""Parses a CAST() expression"""
# CAST already matched
self._expect('(', prespace=False)
self._parse_expression()
self._expect('AS')
self._parse_datatype()
if self._match('SCOPE'):
self._parse_relation_name()
self._expect(')')
def _parse_searched_case(self):
"""Parses a searched CASE expression (CASE WHEN expression...)"""
# CASE WHEN already matched
# Parse all WHEN cases
self._indent(-1)
while True:
self._parse_search_condition(newlines=False) # WHEN Search condition
self._expect('THEN')
self._parse_expression() # THEN Expression
if self._match('WHEN'):
self._newline(-1)
elif self._match('ELSE'):
self._newline(-1)
break
elif self._match('END'):
self._outdent(-1)
return
else:
self._expected_one_of(['WHEN', 'ELSE', 'END'])
# Parse the optional ELSE case
self._parse_expression() # ELSE Expression
self._outdent()
self._expect('END')
def _parse_simple_case(self):
"""Parses a simple CASE expression (CASE expression WHEN value...)"""
# CASE already matched
# Parse the CASE Expression
self._parse_expression() # CASE Expression
# Parse all WHEN cases
self._indent()
self._expect('WHEN')
while True:
self._parse_expression() # WHEN Expression
self._expect('THEN')
self._parse_expression() # THEN Expression
if self._match('WHEN'):
self._newline(-1)
elif self._match('ELSE'):
self._newline(-1)
break
elif self._match('END'):
self._outdent(-1)
return
else:
self._expected_one_of(['WHEN', 'ELSE', 'END'])
# Parse the optional ELSE case
self._parse_expression() # ELSE Expression
self._outdent()
self._expect('END')
def _parse_column_expression(self):
"""Parses an expression representing a column in a SELECT expression"""
if not self._match_sequence([TT.IDENTIFIER, '.', '*']):
self._parse_expression()
# Parse optional column alias
if self._match('AS'):
self._expect(TT.IDENTIFIER)
# Ambiguity: FROM and INTO can legitimately appear in this
# position as a KEYWORD (which the IDENTIFIER match below would
# accept)
elif not self._peek_one_of(['FROM', 'INTO']):
self._match(TT.IDENTIFIER)
def _parse_grouping_expression(self):
"""Parses a grouping-expression in a GROUP BY clause"""
if not self._match_sequence(['(', ')']):
self._parse_expression()
def _parse_super_group(self):
"""Parses a super-group in a GROUP BY clause"""
# [ROLLUP|CUBE] already matched
self._expect('(')
self._indent()
while True:
if self._match('('):
self._parse_expression_list()
self._expect(')')
else:
self._parse_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
self._expect(')')
def _parse_grouping_sets(self):
"""Parses a GROUPING SETS expression in a GROUP BY clause"""
# GROUPING SETS already matched
self._expect('(')
self._indent()
while True:
if self._match('('):
while True:
if self._match_one_of(['ROLLUP', 'CUBE']):
self._parse_super_group()
else:
self._parse_grouping_expression()
if not self._match(','):
break
self._expect(')')
elif self._match_one_of(['ROLLUP', 'CUBE']):
self._parse_super_group()
else:
self._parse_grouping_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
self._expect(')')
def _parse_group_by(self):
"""Parses the grouping-expression-list of a GROUP BY clause"""
# GROUP BY already matched
alt_syntax = True
while True:
if self._match('GROUPING'):
self._expect('SETS')
self._parse_grouping_sets()
alt_syntax = False
elif self._match_one_of(['ROLLUP', 'CUBE']):
self._parse_super_group()
alt_syntax = False
else:
self._parse_grouping_expression()
if not self._match(','):
break
else:
self._newline()
# Ambiguity: the WITH used in the alternate syntax for super-groups
# can be mistaken for the WITH defining isolation level at the end
# of a query. Hence we must use a sequence match here...
if alt_syntax:
if not self._match_sequence(['WITH', 'ROLLUP']):
self._match_sequence(['WITH', 'CUBE'])
def _parse_sub_select(self, allowinto=False):
"""Parses a sub-select expression"""
# SELECT already matched
self._match_one_of(['ALL', 'DISTINCT'])
if not self._match('*'):
self._indent()
while True:
self._parse_column_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
if allowinto and self._match('INTO'):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect('FROM')
self._indent()
while True:
self._parse_join_expression()
if not self._match(','):
break
else:
self._newline()
self._outdent()
if self._match('WHERE'):
self._indent()
self._parse_search_condition()
self._outdent()
if self._match_sequence(['GROUP', 'BY']):
self._indent()
self._parse_group_by()
self._outdent()
if self._match('HAVING'):
self._indent()
self._parse_search_condition()
self._outdent()
if self._match_sequence(['ORDER', 'BY']):
self._indent()
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
else:
self._newline()
self._outdent()
if self._match_sequence(['FETCH', 'FIRST']):
self._match(TT.NUMBER) # Row count is optional (defaults to 1)
self._expect_one_of(['ROW', 'ROWS'])
self._expect('ONLY')
def _parse_table_correlation(self, optional=True):
"""Parses a table correlation clause (with optional column alias list)"""
if optional:
# An optional table correlation is almost always ambiguous given
# that it can start with just about any identifier (the AS is
# always optional)
self._save_state()
try:
# Call ourselves recursively to try and parse the correlation
self._parse_table_correlation(False)
except ParseError:
# If it fails, rewind and return
self._restore_state()
else:
self._forget_state()
else:
if self._match('AS'):
self._expect(TT.IDENTIFIER)
# Ambiguity: Several KEYWORDs can legitimately appear in this
# position. XXX This is horrible - there /must/ be a cleaner way of
# doing this with states and backtracking
elif not self._peek_one_of([
'DO',
'EXCEPT',
'MINUS',
'FETCH',
'GROUP',
'HAVING',
'CROSS',
'LEFT',
'RIGHT',
'FULL',
'INNER',
'JOIN',
'NATURAL',
'INTERSECT',
'ON',
'ORDER',
'SET',
'TABLESAMPLE',
'UNION',
'USING',
'WHERE',
'WITH',
]):
self._expect(TT.IDENTIFIER)
# Parse optional column aliases
if self._match('('):
self._parse_ident_list()
self._expect(')')
def _parse_values_expression(self, allowdefault=False, allowinto=False):
"""Parses a VALUES expression"""
# VALUES already matched
self._indent()
while True:
if self._match('('):
self._parse_expression_list(allowdefault)
self._expect(')')
else:
if not (allowdefault and self._match('DEFAULT')):
self._parse_expression()
if self._match(','):
self._newline()
else:
break
self._outdent()
if allowinto and self._match('INTO'):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
def _parse_join_expression(self):
"""Parses join operators in a table-reference"""
self._parse_table_ref()
while True:
if self._match('CROSS'):
self._newline(-1)
self._expect('JOIN')
self._parse_table_ref()
elif self._match('INNER'):
self._newline(-1)
self._expect('JOIN')
self._parse_table_ref()
self._parse_join_condition()
elif self._match_one_of(['LEFT', 'RIGHT', 'FULL']):
self._newline(-1)
self._match('OUTER')
self._expect('JOIN')
self._parse_table_ref()
self._parse_join_condition()
elif self._match('JOIN'):
self._newline(-1)
self._parse_table_ref()
self._parse_join_condition()
else:
break
def _parse_lateral_options(self):
"""Parses the RETURN DATA UNTIL options of a LATERAL/TABLE reference"""
if self._match_sequence(['RETURN', 'DATA', 'UNTIL']):
while True:
self._expect_sequence(['FEDERATED', 'SQLSTATE'])
self._match('VALUE')
self._expect(TT.STRING)
if self._match('SQLCODE'):
while True:
self._expect(TT.NUMBER)
if not self._match(','):
break
if not self._match(','):
break
return True
else:
return False
def _parse_table_ref(self):
"""Parses literal table references or functions in a table-reference"""
# Ambiguity: A table or schema can be named TABLE, FINAL, OLD, etc.
reraise = False
self._save_state()
try:
if self._match('('):
# Ambiguity: Open-parenthesis could indicate a full-select or a
# join expression
self._save_state()
try:
# Try and parse a full-select
self._parse_full_select()
reraise = True
self._expect(')')
self._parse_table_correlation(optional=True)
except ParseError:
# If it fails, rewind and try a join expression instead
self._restore_state()
if reraise: raise
self._parse_join_expression()
self._expect(')')
else:
self._forget_state()
elif self._match('LATERAL'):
self._parse_lateral_options()
self._expect('(', prespace=False)
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match('TABLE'):
lateral = self._parse_lateral_options()
self._expect('(', prespace=False)
# Ambiguity: TABLE() can indicate a table-function call or a
# nested table expression
self._save_state()
try:
# Try and parse a full-select
self._indent()
self._parse_full_select()
self._outdent()
except ParseError:
# If it fails, rewind and try a function call instead
self._restore_state()
if lateral: raise
self._parse_function_call()
else:
self._forget_state()
reraise = True
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match_one_of(['FINAL', 'NEW']):
self._expect('TABLE')
self._expect('(', prespace=False)
self._indent()
if self._expect_one_of(['INSERT', 'UPDATE']).value == 'INSERT':
self._parse_insert_statement()
else:
self._parse_update_statement()
reraise = True
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match('OLD'):
self._expect('TABLE')
self._expect('(', prespace=False)
self._indent()
if self._expect_one_of(['UPDATE', 'DELETE']).value == 'DELETE':
self._parse_delete_statement()
else:
self._parse_update_statement()
reraise = True
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=True)
elif self._match('UNNEST'):
self._expect('(', prespace=False)
self._indent()
while True:
if self._match('CAST'):
self._parse_cast_expression()
else:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._outdent()
self._expect(')')
self._parse_table_correlation(optional=False)
elif self._peek('XMLTABLE'):
# Bizarrely, the XMLTABLE table function can be used outside a
# TABLE() reference...
self._parse_xml_function_call()
else:
raise ParseBacktrack()
except ParseError:
# If the above fails, rewind and try a simple table reference
self._restore_state()
if reraise: raise
self._parse_table_name()
self._parse_table_correlation(optional=True)
if self._match('TABLESAMPLE'):
self._expect_one_of(['BERNOULLI', 'SYSTEM'])
self._expect('(')
self._parse_expression()
self._expect(')')
if self._match('REPEATABLE'):
self._expect('(')
self._parse_expression()
self._expect(')')
else:
self._forget_state()
def _parse_join_condition(self):
"""Parses the condition on an SQL-92 style join"""
# This method can be extended to support USING(ident-list) if this
# if ever added to DB2 (see PostgreSQL)
self._indent()
self._expect('ON')
self._parse_search_condition()
self._outdent()
def _parse_full_select(self, allowdefault=False, allowinto=False):
"""Parses set operators (low precedence) in a full-select expression"""
self._parse_relation(allowdefault, allowinto)
while True:
if self._match_one_of(['UNION', 'INTERSECT', 'EXCEPT', 'MINUS']):
self._newline(-1)
self._newline(-1, allowempty=True)
self._match('ALL')
self._newline()
self._newline(allowempty=True)
# No need to include allowinto here (it's only permitted in a
# top-level subselect)
self._parse_relation(allowdefault)
else:
break
if self._match('ORDER'):
self._expect('BY')
while True:
self._parse_expression()
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
if self._match('FETCH'):
self._expect('FIRST')
self._match(TT.NUMBER) # Row count is optional (defaults to 1)
self._expect_one_of(['ROW', 'ROWS'])
self._expect('ONLY')
def _parse_relation(self, allowdefault=False, allowinto=False):
"""Parses relation generators (high precedence) in a full-select expression"""
# XXX Add support for the TABLE statement from the SQL standard
if self._match('('):
self._indent()
# No need to include allowinto here (it's only permitted in a
# top-level subselect)
self._parse_full_select(allowdefault)
self._outdent()
self._expect(')')
elif self._match('SELECT'):
self._parse_sub_select(allowinto)
elif self._match('VALUES'):
self._parse_values_expression(allowdefault, allowinto)
else:
self._expected_one_of(['SELECT', 'VALUES', '('])
def _parse_query(self, allowdefault=False, allowinto=False):
"""Parses a full-select with optional common-table-expression"""
# Parse the optional common-table-expression
if self._match('WITH'):
while True:
self._expect(TT.IDENTIFIER)
# Parse the optional column-alias list
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
self._expect('AS')
self._expect('(')
self._indent()
# No need to include allowdefault or allowinto here. Neither
# are ever permitted in a CTE
self._parse_full_select()
self._outdent()
self._expect(')')
if not self._match(','):
break
else:
self._newline()
self._newline()
# Parse the actual full-select. DEFAULT may be permitted here if the
# full-select turns out to be a VALUES statement
self._parse_full_select(allowdefault, allowinto)
# CLAUSES ################################################################
def _parse_assignment_clause(self, allowdefault):
"""Parses a SET clause"""
# SET already matched
while True:
if self._match('('):
# Parse tuple assignment
while True:
self._parse_subrelation_name()
if not self._match(','):
break
self._expect_sequence([')', '=', '('])
self._parse_tuple(allowdefault=True)
self._expect(')')
else:
# Parse simple assignment
self._parse_subrelation_name()
if self._match('['):
self._parse_expression()
self._expect(']')
if self._match('.'):
self._expect(TT.IDENTIFIER)
self._expect('=')
if self._match('ARRAY'):
self._expect('[', prespace=False)
# Ambiguity: Expression list vs. select-statement
self._save_state()
try:
self._parse_expression_list()
except ParseError:
self._restore_state()
self._parse_full_select()
else:
self._forget_state()
self._expect(']')
elif not (allowdefault and self._match('DEFAULT')):
self._parse_expression()
if not self._match(','):
break
else:
self._newline()
def _parse_identity_options(self, alter=None):
"""Parses options for an IDENTITY column"""
# AS IDENTITY already matched
# Build a couple of lists of options which have not yet been seen
validno = [
'MINVALUE',
'MAXVALUE',
'CACHE',
'CYCLE',
'ORDER',
]
valid = validno + ['INCREMENT', 'NO']
if alter is None:
valid = valid + ['START']
elif alter == 'SEQUENCE':
valid = valid + ['RESTART']
# XXX Allow backward compatibility options here? Backward
# compatibility options include comma separation of arguments, and
# NOMINVALUE instead of NO MINVALUE, etc.
while valid:
if alter == 'COLUMN':
if self._match('RESTART'):
if self._match('WITH'):
self._expect(TT.NUMBER)
continue
elif self._match('SET'):
t = self._expect_one_of(valid).value
if t != 'NO': valid.remove(t)
if t in validno: validno.remove(t)
else:
break
else:
t = self._match_one_of(valid)
if t:
t = t.value
if t != 'NO': valid.remove(t)
if t in validno: validno.remove(t)
else:
break
if t == 'START':
self._expect_sequence(['WITH', TT.NUMBER])
elif t == 'RESTART':
if self._match('WITH'):
self._expect(TT.NUMBER)
elif t == 'INCREMENT':
self._expect_sequence(['BY', TT.NUMBER])
elif t in ('MINVALUE', 'MAXVALUE', 'CACHE'):
self._expect(TT.NUMBER)
elif t in ('CYCLE', 'ORDER'):
pass
elif t == 'NO':
t = self._expect_one_of(validno).value
validno.remove(t)
valid.remove(t)
def _parse_column_definition(self, aligntypes=False, alignoptions=False, federated=False):
"""Parses a column definition in a CREATE TABLE statement"""
# Parse a column definition
self._expect(TT.IDENTIFIER)
if aligntypes:
self._valign()
self._parse_datatype()
if alignoptions and not self._peek_one_of([',', ')']):
self._valign()
# Parse column options
while True:
if self._match('NOT'):
self._expect_one_of(['NULL', 'LOGGED', 'COMPACT', 'HIDDEN'])
elif self._match('LOGGED'):
pass
elif self._match('COMPACT'):
pass
elif self._match('WITH'):
self._expect('DEFAULT')
self._save_state()
try:
self._parse_expression()
except ParseError:
self._restore_state()
else:
self._forget_state()
elif self._match('DEFAULT'):
self._save_state()
try:
self._parse_expression()
except ParseError:
self._restore_state()
else:
self._forget_state()
elif self._match('GENERATED'):
if self._expect_one_of(['ALWAYS', 'BY']).value == 'BY':
self._expect('DEFAULT')
if self._match('AS'):
if self._match('IDENTITY'):
if self._match('('):
self._parse_identity_options()
self._expect(')')
elif self._match('('):
self._parse_expression()
self._expect(')')
else:
self._expected_one_of(['IDENTITY', '('])
else:
self._expect_sequence(['FOR', 'EACH', 'ROW', 'ON', 'UPDATE', 'AS', 'ROW', 'CHANGE', 'TIMESTAMP'])
elif self._match('INLINE'):
self._expect_sequence(['LENGTH', TT.NUMBER])
elif self._match('COMPRESS'):
self._expect_sequence(['SYSTEM', 'DEFAULT'])
elif self._match('COLUMN'):
self._expect_sequence(['SECURED', 'WITH', TT.IDENTIFIER])
elif self._match('SECURED'):
self._expect_sequence(['WITH', TT.IDENTIFIER])
elif self._match('IMPLICITLY'):
self._expect('HIDDEN')
elif federated and self._match('OPTIONS'):
self._parse_federated_options()
else:
self._save_state()
try:
self._parse_column_constraint()
except ParseError:
self._restore_state()
break
else:
self._forget_state()
def _parse_column_constraint(self):
"""Parses a constraint attached to a specific column in a CREATE TABLE statement"""
# Parse the optional constraint name
if self._match('CONSTRAINT'):
self._expect(TT.IDENTIFIER)
# Parse the constraint definition
if self._match('PRIMARY'):
self._expect('KEY')
elif self._match('UNIQUE'):
pass
elif self._match('REFERENCES'):
self._parse_table_name()
if self._match('(', prespace=False):
self._expect(TT.IDENTIFIER)
self._expect(')')
t = ['DELETE', 'UPDATE']
for i in xrange(2):
if self._match('ON'):
t.remove(self._expect_one_of(t).value)
if self._match('NO'):
self._expect('ACTION')
elif self._match('SET'):
self._expect('NULL')
elif self._match_one_of(['RESTRICT', 'CASCADE']):
pass
else:
self._expected_one_of([
'RESTRICT',
'CASCADE',
'NO',
'SET'
])
else:
break
elif self._match('CHECK'):
self._expect('(')
# Ambiguity: check constraint can be a search condition or a
# functional dependency. Try the search condition first
self._save_state()
try:
self._parse_search_condition()
except ParseError:
self._restore_state()
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
self._expect_sequence(['DETERMINED', 'BY'])
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
else:
self._forget_state()
self._expect(')')
else:
self._expected_one_of([
'CONSTRAINT',
'PRIMARY',
'UNIQUE',
'REFERENCES',
'CHECK'
])
def _parse_table_constraint(self):
"""Parses a constraint attached to a table in a CREATE TABLE statement"""
if self._match('CONSTRAINT'):
self._expect(TT.IDENTIFIER)
if self._match('PRIMARY'):
self._expect('KEY')
self._expect('(')
self._parse_ident_list()
self._expect(')')
elif self._match('UNIQUE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
elif self._match('FOREIGN'):
self._expect('KEY')
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._expect('REFERENCES')
self._parse_subschema_name()
self._expect('(', prespace=False)
self._parse_ident_list()
self._expect(')')
t = ['DELETE', 'UPDATE']
for i in xrange(2):
if self._match('ON'):
t.remove(self._expect_one_of(t).value)
if self._match('NO'):
self._expect('ACTION')
elif self._match('SET'):
self._expect('NULL')
elif self._match_one_of(['RESTRICT', 'CASCADE']):
pass
else:
self._expected_one_of([
'RESTRICT',
'CASCADE',
'NO',
'SET'
])
else:
break
elif self._match('CHECK'):
self._expect('(')
# Ambiguity: check constraint can be a search condition or a
# functional dependency. Try the search condition first
self._save_state()
try:
self._parse_search_condition(newlines=False)
except ParseError:
self._restore_state()
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
self._expect_sequence(['DETERMINED', 'BY'])
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
else:
self._forget_state()
self._expect(')')
else:
self._expected_one_of([
'CONSTRAINT',
'PRIMARY',
'UNIQUE',
'FOREIGN',
'CHECK'
])
def _parse_table_definition(self, aligntypes=False, alignoptions=False, federated=False):
"""Parses a table definition (list of columns and constraints)"""
self._expect('(')
self._indent()
while True:
self._save_state()
try:
# Try parsing a table constraint definition
self._parse_table_constraint()
except ParseError:
# If that fails, rewind and try and parse a column definition
self._restore_state()
self._parse_column_definition(aligntypes=aligntypes, alignoptions=alignoptions, federated=federated)
else:
self._forget_state()
if not self._match(','):
break
else:
self._newline()
if aligntypes:
self._vapply()
if alignoptions:
self._vapply()
self._outdent()
self._expect(')')
def _parse_constraint_alteration(self):
"""Parses a constraint-alteration in an ALTER TABLE statement"""
# FOREIGN KEY/CHECK already matched
self._expect(TT.IDENTIFIER)
if self._match_one_of(['ENABLE', 'DISABLE']):
self._expect_sequence(['QUERY', 'OPTIMIZATION'])
else:
self._match('NOT')
self._expect('ENFORCED')
def _parse_column_alteration(self):
"""Parses a column-alteration in an ALTER TABLE statement"""
self._expect(TT.IDENTIFIER)
if self._match('DROP'):
if self._match('NOT'):
self._expect('NULL')
elif self._match('COLUMN'):
self._expect('SECURITY')
else:
self._expect_one_of([
'NOT',
'COLUMN',
'IDENTITY',
'DEFAULT',
'EXPRESSION'
])
elif self._match('COMPRESS'):
if self._match('SYSTEM'):
self._expect('DEFAULT')
else:
self._expect('OFF')
elif self._match('SECURED'):
self._expect_sequence(['WITH', TT.IDENTIFIER])
else:
# Ambiguity: SET can introduce several different alterations
self._save_state()
try:
# Try and parse SET (DATA TYPE | EXPRESSION | INLINE LENGTH | GENERATED)
self._expect('SET')
if self._match('DATA'):
self._expect('TYPE')
self._parse_datatype()
elif self._match('EXPRESSION'):
self._expect('AS')
self._expect('(')
self._parse_expression()
self._expect(')')
elif self._match('INLINE'):
self._expect_sequence(['LENGTH', TT.NUMBER])
elif self._match('GENERATED'):
if self._match(['BY', 'ALWAYS']).value == 'BY':
self._expect('DEFAULT')
self._expect('AS')
if self._match('IDENTITY'):
if self._match('('):
self._parse_identity_options()
self._expect(')')
elif self._match('('):
self._parse_expression()
self._expect(')')
else:
self._expected_one_of(['IDENTITY', '('])
elif self._match('NOT'):
self._expect('NULL')
else:
raise ParseBacktrack()
except ParseBacktrack:
# NOTE: This exception block is only called on a ParseBacktrack
# error. Other parse errors will propogate outward. If the
# above SET clauses didn't match, try an identity-alteration.
self._restore_state()
self._parse_identity_options(alter='COLUMN')
else:
self._forget_state()
def _parse_federated_column_alteration(self):
"""Parses a column-alteration in an ALTER NICKNAME statement"""
self._expect(TT.IDENTIFIER)
while True:
if self._match('LOCAL'):
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
elif self._match('TYPE'):
self._parse_datatype()
elif self._match('OPTIONS'):
self._parse_federated_options(alter=True)
if not self._match(','):
break
def _parse_auth_list(self):
"""Parses an authorization list in a GRANT or REVOKE statement"""
# [TO|FROM] already matched
while True:
if not self._match('PUBLIC'):
self._match_one_of(['USER', 'GROUP', 'ROLE'])
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
def _parse_grant_revoke(self, grant):
"""Parses the body of a GRANT or REVOKE statement"""
# [GRANT|REVOKE] already matched
# Parse any preamble
seclabel = False
if self._match('ROLE'):
pass
elif self._match_sequence(['SECURITY', 'LABEL']):
seclabel = grant
# Parse the privilege list
while True:
priv = self._expect(TT.IDENTIFIER)
if priv.value in ('REFERENCES', 'UPDATE'):
if self._match('('):
self._parse_ident_list()
self._expect(')')
elif priv.value == 'DBADM':
while self._match_one_of(['WITH', 'WITHOUT']):
self._expect_one_of(['DATAACCESS', 'ACCESSCTRL'])
elif priv.value == 'DB2LBACWRITEARRAY':
self._expect_one_of(['WRITEDOWN', 'WRITEUP'])
elif priv.value == 'ALL':
self._match('PRIVILEGES')
break
if not self._match(','):
break
# Parse the target list
if self._match('OF'):
self._expect_sequence(['TABLESPACE', TT.IDENTIFIER])
elif self._match('ON'):
while True:
if self._match('DATABASE'):
break
elif self._match('RULE'):
if self._expect_one_of([
'DB2LBACREADARRAY',
'DB2LBACREADSET',
'DB2LBACREADTREE',
'DB2LBACWRITEARRAY',
'DB2LBACWRITESET',
'DB2LBACWRITETREE',
'ALL'
]).value == 'DB2LBACWRITEARRAY':
self._expect_one_of(['WRITEDOWN', 'WRITEUP'])
self._expect_sequence(['FOR', TT.IDENTIFIER])
break
elif self._match('VARIABLE'):
self._parse_variable_name()
break
elif self._match('INDEX'):
self._parse_index_name()
break
elif self._match('MODULE'):
self._parse_module_name()
break
elif self._match_one_of(['PROGRAM', 'PACKAGE']):
self._parse_subschema_name()
break
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
# Ambiguity: Can use schema.* or schema.name(prototype) here
if not self._match('*') and not self._match_sequence([TT.IDENTIFIER, '.', '*']):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
break
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
break
elif self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
break
elif self._match('SEQUENCE'):
self._parse_sequence_name()
break
elif self._match('SERVER'):
self._expect(TT.IDENTIFIER)
break
elif self._match('USER'):
self._expect(TT.IDENTIFIER)
elif self._match('PUBLIC'):
pass
elif self._match('TABLE'):
self._parse_table_name()
break
elif self._match('WORKLOAD'):
self._expect(TT.IDENTIFIER)
break
elif self._match('XSROBJECT'):
self._parse_subschema_name()
break
else:
self._parse_table_name()
break
if not self._match(','):
break
# Parse the grantee(s)
# XXX The following is a bit lax, but again, adhering strictly to the
# syntax results in a ridiculously complex syntax
self._expect(['FROM', 'TO'][grant])
self._parse_auth_list()
if seclabel:
if self._match('FOR'):
self._expect_one_of(['ALL', 'READ', 'WRITE'])
self._expect('ACCESS')
elif grant:
if self._match('WITH'):
self._expect_one_of(['GRANT', 'ADMIN'])
self._expect('OPTION')
else:
self._match_sequence(['BY', 'ALL'])
self._match('RESTRICT')
def _parse_tablespace_size_attributes(self):
"""Parses DMS size attributes in a CREATE TABLESPACE statement"""
if self._match('AUTORESIZE'):
self._expect_one_of(['NO', 'YES'])
if self._match('INTIALSIZE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G'])
if self._match('INCREASESIZE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G', 'PERCENT'])
if self._match('MAXSIZE'):
if not self._match('NONE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G'])
def _parse_database_container_clause(self, size=True):
"""Parses a container clause for a DMS tablespace"""
self._expect('(')
while True:
self._expect_one_of(['FILE', 'DEVICE'])
self._expect(TT.STRING)
if size:
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
if not self._match(','):
break
self._expect(')')
def _parse_system_container_clause(self):
"""Parses a container clause for an SMS tablespace"""
self._expect('(')
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
def _parse_db_partition_clause(self):
"""Parses a DBPARTITIONNUM clause in various statements"""
if not self._match('GLOBAL'):
if self._match('AT'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
def _parse_db_partition_list_clause(self, size=False):
"""Parses an DBPARTITIONNUM clause in various statements"""
self._expect_one_of([
'DBPARTITIONNUM',
'DBPARTITIONNUMS',
'NODE', # compatibility option
'NODES', # compatibility option
])
self._expect('(')
while True:
self._expect(TT.NUMBER)
self._match_sequence(['TO', TT.NUMBER])
if size:
self._expect_sequence(['SIZE', TT.NUMBER])
if not self._match(','):
break
self._expect(')')
def _parse_db_partitions_clause(self):
"""Parses a DBPARTITIONNUM list clause in various statements"""
if self._match('ON'):
if self._match('ALL'):
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
if self._match('EXCEPT'):
self._parse_db_partition_list_clause(size=False)
else:
self._parse_db_partition_list_clause(size=False)
def _parse_function_predicates_clause(self):
"""Parses the PREDICATES clause in a CREATE FUNCTION statement"""
# PREDICATES already matched
# The surrounding parentheses seem to be optional (although the syntax
# diagram in the DB2 Info Center implies otherwise)
parens = self._match('(')
self._expect('WHEN')
self._match_one_of(['=', '<>', '<', '>', '<=', '>='])
if self._match('EXPRESSION'):
self._expect_sequence(['AS', TT.IDENTIFIER])
else:
self._parse_expression()
valid = ['SEARCH', 'FILTER']
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'SEARCH':
self._expect('BY')
self._match('EXACT')
self._expect('INDEX')
self._expect('EXTENSION')
self._parse_index_name()
self._expect('WHEN')
while True:
self._expect_sequence(['KEY', '(', TT.IDENTIFIER, ')', 'USE', TT.IDENTIFIER, '('])
self._parse_ident_list()
self._expect(')')
if not self._match('WHEN'):
break
elif t == 'FILTER':
self._expect('USING')
if self._match('CASE'):
if self._match('WHEN'):
self._parse_searched_case()
else:
self._parse_simple_case()
else:
self._parse_scalar_function_call()
if parens:
self._expect(')')
def _parse_federated_options(self, alter=False):
"""Parses an OPTIONS list for a federated object"""
# OPTIONS already matched
self._expect('(')
while True:
if alter and self._match('DROP'):
self._expect(TT.IDENTIFIER)
else:
if alter:
self._match_one_of('ADD', 'SET')
else:
self._match('ADD')
self._expect(TT.IDENTIFIER)
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
def _parse_remote_server(self):
"""Parses a remote server specification"""
# SERVER already matched
if self._match('TYPE'):
self._expect(TT.IDENTIFIER)
if self._match('VERSION'):
self._parse_server_version()
if self._match('WRAPPER'):
self._expect(TT.IDENTIFIER)
else:
self._expect(TT.IDENTIFIER)
if self._match('VERSION'):
self._parse_server_version()
def _parse_server_version(self):
"""Parses a federated server version"""
# VERSION already matched
if self._match(TT.NUMBER):
if self._match('.'):
self._expect(TT.NUMBER)
if self._match('.'):
self._expect(TT.NUMBER)
elif self._match(TT.STRING):
pass
else:
self._expected_one_of([TT.NUMBER, TT.STRING])
def _parse_partition_boundary(self):
"""Parses a partition boundary in a PARTITION clause"""
if self._match('STARTING'):
self._match('FROM')
if self._match('('):
while True:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
if not self._match(','):
break
self._expect(')')
else:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
self._match_one_of(['INCLUSIVE', 'EXCLUSIVE'])
self._expect('ENDING')
self._match('AT')
if self._match('('):
while True:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
if not self._match(','):
break
self._expect(')')
else:
self._expect_one_of([TT.NUMBER, 'MINVALUE', 'MAXVALUE'])
self._match_one_of(['INCLUSIVE', 'EXCLUSIVE'])
def _parse_copy_options(self):
"""Parse copy options for CREATE TABLE... LIKE statements"""
# XXX Tidy this up (shouldn't just be a 2-time loop)
for i in xrange(2):
if self._match_one_of(['INCLUDING', 'EXCLUDING']):
if self._match('COLUMN'):
self._expect('DEFAULTS')
elif self._match('DEFAULTS'):
pass
elif self._match('IDENTITY'):
self._match_sequence(['COLUMN', 'ATTRIBUTES'])
def _parse_refreshable_table_options(self, alter=False):
"""Parses refreshable table options in a materialized query definition"""
if not alter and self._match('WITH'):
self._expect_sequence(['NO', 'DATA'])
self._parse_copy_options()
else:
valid = [
'DATA',
'REFRESH',
'ENABLE',
'DISABLE',
'MAINTAINED',
]
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'DATA':
self._expect_sequence(['INITIALLY', 'DEFERRED'])
elif t == 'REFRESH':
self._expect_one_of(['DEFERRED', 'IMMEDIATE'])
elif t in ('ENABLE', 'DISABLE'):
self._expect_sequence(['QUERY', 'OPTIMIZATION'])
if t == 'ENABLE':
valid.remove('DISABLE')
else:
valid.remove('ENABLE')
elif t == 'MAINTAINED':
self._expect('BY')
self._expect_one_of(['SYSTEM', 'USER', 'FEDERATED_TOOL'])
def _parse_action_types_clause(self):
"""Parses an action types clause in a WORK ACTION"""
if self._match('MAP'):
self._expect('ACTIVITY')
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('NESTED')
self._expect('TO')
self._expect(TT.IDENTIFIER)
elif self._match('WHEN'):
self._parse_threshold_predicate()
self._parse_threshold_exceeded_actions()
elif self._match('PREVENT'):
self._expect('EXECUTION')
elif self._match('COUNT'):
self._expect('ACTIVITY')
elif self._match('COLLECT'):
if self._match('ACTIVITY'):
self._expect('DATA')
self._parse_collect_activity_data_clause()
elif self._match('AGGREGATE'):
self._expect_sequence(['ACTIVITY', 'DATA'])
self._match_one_of(['BASE', 'EXTENDED'])
else:
self._expected_one_of(['MAP', 'WHEN', 'PREVENT', 'COUNT', 'COLLECT'])
def _parse_threshold_predicate(self):
"""Parses a threshold predicate in a WORK ACTION"""
if self._match_one_of([
'TOTALDBPARTITIONCONNECTIONS',
'CONCURRENTWORKLOADOCCURRENCES',
'CONCURRENTWORKLOADACTIVITIES',
'ESTIMATEDSQLCOST',
'SQLROWSRETURNED',
]):
self._expect_sequence(['>', TT.NUMBER])
elif self._match('TOTALSCPARTITIONCONNECTIONS'):
self._expect_sequence(['>', TT.NUMBER])
if self._match('QUEUEDCONNECTIONS'):
if self._match('>'):
self._expect(TT.NUMBER)
elif self._match('UNBOUNDED'):
pass
else:
self._expected_one_of(['>', 'UNBOUNDED'])
elif self._match('CONCURRENTDBCOORDACTIVITIES'):
self._expect_sequence(['>', TT.NUMBER])
if self._match('QUEUEDACTIVITIES'):
if self._match('>'):
self._expect(TT.NUMBER)
elif self._match('UNBOUNDED'):
pass
else:
self._expected_one_of(['>', 'UNBOUNDED'])
elif self._match_one_of([
'CONNECTIONIDLETIME',
'ACTIVITYTOTALTIME',
]):
self._expect_sequence(['>', TT.NUMBER])
self._expect_one_of([
'DAY',
'DAYS',
'HOUR',
'HOURS',
'MINUTE',
'MINUTES'
])
elif self._match('SQLTEMPSPACE'):
self._expect_sequence(['>', TT.NUMBER])
self._expect_one_of(['K', 'M', 'G'])
def _parse_threshold_exceeded_actions(self):
"""Parses a threshold exceeded actions clause in a WORK ACTION"""
if self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
if self._match('STOP'):
self._expect('EXECUTION')
elif not self._match('CONTINUE'):
self._expected_one_of(['STOP', 'CONTINUE'])
def _parse_collect_activity_data_clause(self, alter=False):
"""Parses a COLLECT ACTIVITY clause in an action clause"""
# COLLECT ACTIVITY DATA already matched
if not (alter and self._match('NONE')):
self._expect('ON')
if self._match('ALL'):
self._match_sequence(['DATABASE', 'PARTITIONS'])
elif self._match('COORDINATOR'):
self._match_sequence(['DATABASE', 'PARTITION'])
else:
self._expected_one_of(['ALL', 'COORDINATOR'])
if self._match('WITHOUT'):
self._expect('DETAILS')
elif self._match('WITH'):
self._expect('DETAILS')
if self._match('AND'):
self._expect('VALUES')
else:
self._expected_one_of(['WITHOUT', 'WITH'])
def _parse_histogram_template_clause(self):
"""Parses a history template clause in a WORK ACTION"""
if self._match('ACTIVITY'):
self._expect_one_of(['LIFETIME', 'QUEUETIME', 'EXECUTETIME', 'ESIMATEDCOST', 'INTERARRIVALTIME'])
self._expect_sequence(['HISTOGRAM', 'TEMPLATE'])
self._expect_one_of(['SYSDEFAULTHISTOGRAM', TT.IDENTIFIER])
def _parse_work_attributes(self):
"""Parses a work attributes clause in a WORK CLASS"""
self._expect_sequence(['WORK', 'TYPE'])
if self._match_one_of(['READ', 'WRITE', 'DML']):
self._parse_for_from_to_clause()
elif self._match('ALL'):
if self._match('FOR'):
self._parse_for_from_to_clause()
if self._match('ROUTINES'):
self._parse_routines_in_schema_clause()
elif self._match('CALL'):
if self._match('ROUTINES'):
self._parse_routines_in_schema_clause()
elif not self._match_one_of(['DDL', 'LOAD']):
self._expected_one_of(['READ', 'WRITE', 'DML', 'DDL', 'LOAD', 'ALL', 'CALL'])
def _parse_for_from_to_clause(self, alter=False):
"""Parses a FOR .. FROM .. TO clause in a WORK CLASS definition"""
# FOR already matched
if alter and self._match('ALL'):
self._expect_sequence(['UNITS', 'UNBOUNDED'])
else:
self._expect_one_of(['TIMERONCOST', 'CARDINALITY'])
self._expect_sequence(['FROM', TT.NUMBER])
if self._match('TO'):
self._expect_one_of(['UNBOUNDED', TT.NUMBER])
def _parse_routines_in_schema_clause(self, alter=False):
"""Parses a schema clause in a WORK CLASS definition"""
# ROUTINES already matched
if alter and self._match('ALL'):
pass
else:
self._expect_sequence(['IN', 'SCHEMA', TT.IDENTIFIER])
def _parse_position_clause(self):
"""Parses a POSITION clause in a WORK CLASS definition"""
# POSITION already matched
if self._match('AT'):
self._expect(TT.NUMBER)
elif self._match_one_of(['BEFORE', 'AFTER']):
self._expect(TT.IDENTIFIER)
elif self._match('LAST'):
pass
else:
self._expected_one_of(['AT', 'BEFORE', 'AFTER', 'LAST'])
def _parse_connection_attributes(self):
"""Parses connection attributes in a WORKLOAD"""
if self._match_one_of([(TT.REGISTER, 'APPLNAME'), (TT.REGISTER, 'SYSTEM_USER')]):
pass
elif self._match((TT.REGISTER, 'SESSION_USER')):
self._match('GROUP')
elif self._match('CURRENT'):
self._expect_one_of([
(TT.REGISTER, 'CLIENT_USERID'),
(TT.REGISTER, 'CLIENT_APPLNAME'),
(TT.REGISTER, 'CLIENT_WRKSTNNAME'),
(TT.REGISTER, 'CLIENT_ACCTNG')
])
else:
self._expected_one_of(['APPLNAME', 'SYSTEM_USER', 'SESSION_USER', 'CURRENT'])
self._expect('(')
while True:
if not self._match(TT.STRING):
self._expect(')')
break
def _parse_audit_policy(self, alter=False):
"""Parses an AUDIT POLICY definition"""
valid = set(['CATEGORIES', 'ERROR'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'CATEGORIES':
while True:
if self._expect_one_of([
'ALL',
'AUDIT',
'CHECKING',
'CONTEXT',
'EXECUTE',
'OBJMAINT',
'SECMAINT',
'SYSADMIN',
'VALIDATE'
]).value == 'EXECUTE':
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('DATA')
self._expect('STATUS')
self._expect_one_of(['BOTH', 'FAILURE', 'NONE', 'SUCCESS'])
if not self._match(','):
break
elif t == 'ERROR':
self._expect('TYPE')
self._expect_one_of(['NORMAL', 'AUDIT'])
# If we're defining a new policy, ensure both terms are specified
if not alter and valid:
self._expected(valid.pop())
def _parse_evm_group(self):
"""Parses an event monitor group in a non-wlm event monitor definition"""
while True:
self._expect(TT.IDENTIFIER)
if self._match('('):
valid = set(['TABLE', 'IN', 'PCTDEACTIVATE', 'TRUNC', 'INCLUDES', 'EXCLUDES'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'TABLE':
self._parse_table_name()
elif t == 'IN':
self._expect(TT.IDENTIFIER)
elif t == 'PCTDEACTIVATE':
self._expect(TT.NUMBER)
elif t == 'TRUNC':
pass
elif t == 'INCLUDES' or t == 'EXCLUDES':
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._expect(')')
self._expect(')')
if not self._match(','):
break
def _parse_evm_write_to(self):
"""Parses a WRITE TO clause in an event monitor definition"""
# WRITE TO already matched
if self._match('TABLE'):
valid = set(['BUFFERSIZE', 'BLOCKED', 'NONBLOCKED', 'evm-group'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
elif 'evm-group' in valid:
self._save_state()
try:
self._parse_evm_group()
valid.remove('evm-group')
except ParseError:
self._restore_state()
break
else:
self._forget_state()
else:
break
if t == 'BUFFERSIZE':
self._expect(TT.NUMBER)
elif t == 'BLOCKED':
valid.remove('NONBLOCKED')
elif t == 'NONBLOCKED':
valid.remove('BLOCKED')
elif self._match('PIPE'):
self._expect(TT.STRING)
elif self._match('FILE'):
self._expect(TT.STRING)
valid = set(['MAXFILES', 'MAXFILESIZE', 'BUFFERSIZE', 'BLOCKED', 'NONBLOCKED', 'APPEND', 'REPLACE'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'MAXFILES' or t == 'MAXFILESIZE':
self._expect_one_of(['NONE', TT.NUMBER])
elif t == 'BLOCKED':
valid.remove('NONBLOCKED')
elif t == 'NONBLOCKED':
valid.remove('BLOCKED')
elif t== 'APPEND':
valid.remove('REPLACE')
elif t == 'REPLACE':
valid.remove('APPEND')
else:
self._expected_one_of(['TABLE', 'PIPE', 'FILE'])
def _parse_evm_options(self):
"""Parses the options after an event monitor definition"""
valid = set(['WRITE', 'AUTOSTART', 'MANUALSTART', 'ON', 'LOCAL', 'GLOBAL'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'WRITE':
self._expect('TO')
self._parse_evm_write_to()
elif t == 'AUTOSTART':
valid.remove('MANUALSTART')
elif t == 'MANUALSTART':
valid.remove('AUTOSTART')
elif t == 'ON':
self._expect_one_of(['NODE', 'DBPARTITIONNUM'])
self._expect(TT.NUMBER)
elif t == 'LOCAL':
valid.remove('GLOBAL')
elif t == 'GLOBAL':
valid.remove('LOCAL')
def _parse_nonwlm_event_monitor(self):
"""Parses a non-wlm event monitor definition"""
while True:
if self._match_one_of(['DATABASE', 'TABLES', 'BUFFERPOOLS', 'TABLESPACES']):
pass
elif self._match('DEADLOCKS'):
if self._match_sequence(['WITH', 'DETAILS']):
if self._match('HISTORY'):
self._match('VALUES')
elif self._match_one_of(['CONNECTIONS', 'STATEMENTS', 'TRANSACTIONS']):
if self._match('WHERE'):
self._parse_search_condition()
else:
self._expected_one_of([
'DATABASE',
'TABLES',
'BUFFERPOOLS',
'TABLESPACES',
'DEADLOCKS',
'CONNECTIONS',
'STATEMENTS',
'TRANSACTIONS',
])
if not self._match(','):
break
self._parse_evm_options()
def _parse_wlm_event_monitor(self):
"""Parses a wlm event monitor definition"""
if self._expect_one_of(['ACTIVITIES', 'STATISTICS', 'THRESHOLD']).value == 'THRESHOLD':
self._expect('VIOLATIONS')
self._parse_evm_options()
# STATEMENTS #############################################################
def _parse_allocate_cursor_statement(self):
"""Parses an ALLOCATE CURSOR statement in a procedure"""
# ALLOCATE already matched
self._expect_sequence([TT.IDENTIFIER, 'CURSOR', 'FOR', 'RESULT', 'SET', TT.IDENTIFIER])
def _parse_alter_audit_policy_statement(self):
"""Parses an ALTER AUDIT POLICY statement"""
# ALTER AUDIT POLICY already matched
self._expect(IDENTIIER)
self._parse_audit_policy(alter=True)
def _parse_alter_bufferpool_statement(self):
"""Parses an ALTER BUFFERPOOL statement"""
# ALTER BUFFERPOOL already matched
self._expect(TT.IDENTIFIER)
if self._match('ADD'):
if self._expect_one_of(['NODEGROUP', 'DATABASE']).value == 'DATABASE':
self._expect_sequence(['PARTITION', 'GROUP'])
self._expect(TT.IDENTIFIER)
elif self._match('NUMBLOCKPAGES'):
self._expect(TT.NUMBER)
if self._match('BLOCKSIZE'):
self._expect(TT.NUMBER)
elif self._match('BLOCKSIZE'):
self._expect(TT.NUMBER)
elif self._match('NOT'):
self._expect_sequence(['EXTENDED', 'STORAGE'])
elif self._match('EXTENDED'):
self._expect('STORAGE')
else:
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
self._expect('SIZE')
if self._match(TT.NUMBER):
self._match('AUTOMATIC')
else:
self._expect_one_of([TT.NUMBER, 'AUTOMATIC'])
def _parse_alter_database_statement(self):
"""Parses an ALTER DATABASE statement"""
# ALTER DATABASE already matched
if not self._match('ADD'):
self._expect(TT.IDENTIFIER)
self._expect('ADD')
self._expect_sequence(['STORAGE', 'ON'])
while True:
self._expect(TT.STRING)
if not self._match(','):
break
def _parse_alter_function_statement(self, specific):
"""Parses an ALTER FUNCTION statement"""
# ALTER [SPECIFIC] FUNCTION already matched
self._parse_function_name()
if not specific and self._match('(', prespace=False):
if not self._match(')'):
self._parse_datatype_list()
self._expect(')')
first = True
while True:
if self._match('EXTERNAL'):
self._expect('NAME')
self._expect_one_of([TT.STRING, TT.IDENTIFIER])
elif self._match('NOT'):
self._expect_one_of(['FENCED', 'THREADSAFE'])
elif self._match_one_of(['FENCED', 'THREADSAFE']):
pass
elif first:
self._expected_one_of([
'EXTERNAL',
'NOT',
'FENCED',
'THREADSAFE',
])
else:
break
first = False
def _parse_alter_partition_group_statement(self):
"""Parses an ALTER DATABASE PARTITION GROUP statement"""
# ALTER [DATABASE PARTITION GROUP|NODEGROUP] already matched
self._expect(TT.IDENTIFIER)
while True:
if self._match('ADD'):
self._parse_db_partition_list_clause(size=False)
if self._match('LIKE'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
elif self._match('WITHOUT'):
self._expect('TABLESPACES')
elif self._match('DROP'):
self._parse_db_partition_list_clause(size=False)
else:
self._expected_one_of(['ADD', 'DROP'])
if not self._match(','):
break
def _parse_alter_histogram_template_statement(self):
"""Parses an ALTER HISTOGRAM TEMPLATE statement"""
# ALTER HISTOGRAM TEMPLATE already matched
self._expect_sequence([TT.IDENTIFIER, 'HIGH', 'BIN', 'VALUE', TT.NUMBER])
def _parse_alter_module_statement(self):
"""Parses an ALTER MODULE statement"""
# ALTER MODULE already matched
self._parse_module_name()
if self._match_one_of(['ADD', 'PUBLISH']):
self._match_sequence(['OR', 'REPLACE'])
if self._match('CONDITION'):
self._expect(TT.IDENTIFIER)
if self._match('FOR'):
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect(TT.STRING)
elif self._match('FUNCTION'):
self._parse_create_function_statement()
elif self._match('PROCEDURE'):
self._parse_create_procedure_statement()
elif self._match('TYPE'):
self._parse_create_type_statement()
elif self._match('VARIABLE'):
self._parse_create_variable_statement()
elif self._match('DROP'):
if not self._match('BODY'):
if self._match('CONDITION'):
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
elif self._match('TYPE'):
self._parse_type_name()
elif self._match('VARIABLE'):
self._parse_variable_name()
else:
self._expected_one_of([
'BODY',
'CONDITION',
'FUNCTION',
'PROCEDURE',
'SPECIFIC',
'TYPE',
'VARIABLE',
])
else:
self._expected_one_of(['ADD', 'DROP', 'PUBLISH'])
def _parse_alter_nickname_statement(self):
"""Parses an ALTER NICKNAME statement"""
# ALTER NICKNAME already matched
self._parse_nickname_name()
if self._match('OPTIONS'):
self._parse_federated_options(alter=True)
while True:
if self._match('ADD'):
self._parse_table_constraint()
elif self._match('ALTER'):
if self._match('FOREIGN'):
self._expect('KEY')
self._parse_constraint_alteration()
elif self._match('CHECK'):
self._parse_constraint_alteration()
else:
# Ambiguity: A column can be called COLUMN
self._save_state()
try:
self._match('COLUMN')
self._parse_federated_column_alteration()
except ParseError:
self._restore_state()
self._parse_federated_column_alteration()
else:
self._forget_state()
elif self._match('DROP'):
if self._match('PRIMARY'):
self._expect('KEY')
elif self._match('FOREIGN'):
self._expect_sequence(['KEY', TT.IDENTIFIER])
elif self._match_one_of(['UNIQUE', 'CHECK', 'CONSTRAINT']):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of(['PRIMARY', 'FOREIGN', 'CHECK', 'CONSTRAINT'])
elif self._match_one_of(['ALLOW', 'DISALLOW']):
self._expect('CACHING')
else:
break
self._newline()
def _parse_alter_procedure_statement(self, specific):
"""Parses an ALTER PROCEDURE statement"""
# ALTER [SPECIFIC] PROCEDURE already matched
self._parse_procedure_name()
if not specific and self._match('(', prespace=False):
if not self._match(')'):
self._parse_datatype_list()
self._expect(')')
first = True
while True:
if self._match('EXTERNAL'):
if self._match('NAME'):
self._expect([TT.STRING, TT.IDENTIFIER])
elif self._match('ACTION'):
pass
else:
self._expected_one_of(['NAME', 'ACTION'])
elif self._match('NOT'):
self._expect_one_of(['FENCED', 'THREADSAFE'])
elif self._match_one_of(['FENCED', 'THREADSAFE']):
pass
elif self._match('NO'):
self._expect_sequence(['EXTERNAL', 'ACTION'])
elif self._match('NEW'):
self._expect_sequence(['SAVEPOINT', 'LEVEL'])
elif self._match('ALTER'):
self._expect_sequence(['PARAMETER', TT.IDENTIFIER, 'SET', 'DATA', 'TYPE'])
self._parse_datatype()
elif first:
self._expected_one_of([
'EXTERNAL',
'NOT',
'FENCED',
'NO',
'EXTERNAL',
'THREADSAFE',
'ALTER',
])
else:
break
first = False
def _parse_alter_security_label_component_statement(self):
"""Parses an ALTER SECURITY LABEL COMPONENT statement"""
# ALTER SECURITY LABEL COMPONENT already matched
self._expect_sequence(TT.IDENTIFIER, 'ADD', 'ELEMENT', TT.STRING)
if self._match_one_of(['BEFORE', 'AFTER']):
self._expect(TT.STRING)
elif self._match('ROOT'):
pass
elif self._match('UNDER'):
self._expect(TT.STRING)
if self._match('OVER'):
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect('OVER')
def _parse_alter_security_policy_statement(self):
"""Parses an ALTER SECURITY POLICY statement"""
# ALTER SECURITY POLICY
self._expect(TT.IDENTIFIER)
while True:
if self._match('ADD'):
self._expect_sequence(['SECURITY', 'LABEL', 'COMPONENT', TT.IDENTIFIER])
elif self._match_one_of(['OVERRIDE', 'RESTRICT']):
self._expect_sequence(['NOT', 'AUTHORIZED', 'WRITE', 'SECURITY', 'LABEL'])
elif self._match_one_of(['USE', 'IGNORE']):
self._expect_one_of(['GROUP', 'ROLE'])
self._expect('AUTHORIZATIONS')
else:
break
def _parse_alter_sequence_statement(self):
"""Parses an ALTER SEQUENCE statement"""
# ALTER SEQUENCE already matched
self._parse_sequence_name()
self._parse_identity_options(alter='SEQUENCE')
def _parse_alter_server_statement(self):
"""Parses an ALTER SERVER statement"""
# ALTER SERVER already matched
self._parse_remote_server()
if self._match('OPTIONS'):
self._parse_federated_options(alter=True)
def _parse_alter_service_class_statement(self):
"""Parses an ALTER SERVICE CLASS statement"""
# ALTER SERVICE CLASS already matched
self._expect(TT.IDENTIFIER)
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('AGENT'):
self._expect('PRIORITY')
self._expect_one_of(['DEFAULT', TT.NUMBER])
elif self._match('PREFETCH'):
self._expect('PRIORITY')
self._expect_one_of(['LOW', 'MEDIUM', 'HIGH', 'DEFAULT'])
elif self._match('OUTBOUND'):
self._expect('CORRELATOR')
self._expect_one_of(['NONE', TT.STRING])
elif self._match('COLLECT'):
if self._match('ACTIVITY'):
self._expect('DATA')
if self._match('ON'):
if self._match('ALL'):
self._match_sequence(['DATABASE', 'PARTITIONS'])
elif self._match('COORDINATOR'):
self._match_sequence(['DATABASE', 'PARTITION'])
else:
self._expected_one_of(['ALL', 'COORDINATOR'])
self._expect_one_of(['WITH', 'WITHOUT'])
self._expect('DETAILS')
self._match_sequence(['AND', 'VALUES'])
elif self._match('NONE'):
pass
else:
self._expected_one_of(['ON', 'NONE'])
elif self._match('AGGREGATE'):
if self._match('ACTIVITY'):
self._expect('DATA')
self._match_one_of(['BASE', 'EXTENDED', 'NONE'])
elif self._match('REQUEST'):
self._expect('DATA')
self._match_one_of(['BASE', 'NONE'])
else:
self._expected_one_of(['ACTIVITY', 'REQUEST'])
else:
self._expected_one_of(['ACTIVITY', 'AGGREGATE'])
elif self._match('ACTIVITY'):
self._expect_one_of(['LIFETIME', 'QUEUETIME', 'EXECUTETIME', 'ESTIMATEDCOST', 'INTERARRIVALTIME'])
self._expect_sequence(['HISTOGRAM', 'TEMPLATE', TT.IDENTIFIER])
elif self._match('REQUEST'):
self._expect_sequence(['EXECUTETIME', 'HISTOGRAM', 'TEMPLATE', TT.IDENTIFIER])
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
elif not first:
break
else:
self._expected_one_of([
'AGENT',
'PREFETCH',
'OUTBOUND',
'COLLECT',
'ACTIVITY',
'REQUEST',
'ENABLE',
'DISABLE'
])
def _parse_alter_table_statement(self):
"""Parses an ALTER TABLE statement"""
# ALTER TABLE already matched
self._parse_table_name()
self._indent()
while True:
if self._match('ADD'):
if self._match('RESTRICT'):
self._expect_sequence(['ON', 'DROP'])
elif self._match('PARTITION'):
# Ambiguity: optional partition name
self._save_state()
try:
self._match(TT.IDENTIFIER)
self._parse_partition_boundary()
except ParseError:
self._restore_state()
self._parse_partition_boundary()
else:
self._forget_state()
if self._match('IN'):
self._expect(TT.IDENTIFIER)
if self._match('LONG'):
self._expect('IN')
self._expect(TT.IDENTIFIER)
elif self._match('MATERIALIZED'):
self._expect('QUERY')
self._expect('(')
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options(alter=True)
elif self._match('QUERY'):
self._expect('(')
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options(alter=True)
elif self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options(alter=True)
elif self._match('COLUMN'):
self._parse_column_definition()
elif self._match('SECURITY'):
self._expect('POLICY')
self._expect(TT.IDENTIFIER)
else:
self._save_state()
try:
# Try parsing a table constraint definition
self._parse_table_constraint()
except ParseError:
# If that fails, rewind and try and parse a column definition
self._restore_state()
self._parse_column_definition()
else:
self._forget_state()
elif self._match('ATTACH'):
self._expect('PARTITION')
# Ambiguity: optional partition name
self._save_state()
try:
self._match(TT.IDENTIFIER)
self._parse_partition_boundary()
except ParseError:
self._restore_state()
self._parse_partition_boundary()
else:
self._forget_state()
self._expect('FROM')
self._parse_table_name()
elif self._match('DETACH'):
self._expect_sequence(['PARTITION', TT.IDENTIFIER, 'FROM'])
self._parse_table_name()
elif self._match('ALTER'):
if self._match('FOREIGN'):
self._expect('KEY')
self._parse_constraint_alteration()
elif self._match('CHECK'):
self._parse_constraint_alteration()
else:
# Ambiguity: A column can be called COLUMN
self._save_state()
try:
self._match('COLUMN')
self._parse_column_alteration()
except ParseError:
self._restore_state()
self._parse_column_alteration()
else:
self._forget_state()
elif self._match('RENAME'):
self._match('COLUMN')
self._expect_sequence([TT.IDENTIFIER, 'TO', TT.IDENTIFIER])
elif self._match('DROP'):
if self._match('PRIMARY'):
self._expect('KEY')
elif self._match('FOREIGN'):
self._expect_sequence(['KEY', TT.IDENTIFIER])
elif self._match_one_of(['UNIQUE', 'CHECK', 'CONSTRAINT']):
self._expect(TT.IDENTIFIER)
elif self._match('COLUMN'):
self._expect(TT.IDENTIFIER)
self._match_one_of(['CASCADE', 'RESTRICT'])
elif self._match('RESTRICT'):
self._expect_sequence(['ON', 'DROP'])
elif self._match('DISTRIBUTION'):
pass
elif self._match('MATERIALIZED'):
self._expect('QUERY')
elif self._match('QUERY'):
pass
elif self._match('SECURITY'):
self._expect('POLICY')
else:
self._expect(TT.IDENTIFIER)
self._match_one_of(['CASCADE', 'RESTRICT'])
elif self._match('DATA'):
self._expect('CAPTURE')
if self._match('CHANGES'):
self._match_sequence(['INCLUDE', 'LONGVAR', 'COLUMNS'])
elif self._match('NONE'):
pass
else:
self._expected_one_of(['NONE', 'CHANGES'])
elif self._match('PCTFREE'):
self._expect(TT.NUMBER)
elif self._match('LOCKSIZE'):
self._expect_one_of(['ROW', 'BLOCKINSERT', 'TABLE'])
elif self._match('APPEND'):
self._expect_one_of(['ON', 'OFF'])
elif self._match('VOLATILE'):
self._match('CARDINALITY')
elif self._match('NOT'):
self._expect('VOLATILE')
self._match('CARDINALITY')
elif self._match('COMPRESS'):
self._expect_one_of(['YES', 'NO'])
elif self._match('ACTIVATE'):
if self._expect_one_of(['NOT', 'VALUE']).value == 'NOT':
self._expect_sequence(['LOGGED', 'INITIALLY'])
if self._match('WITH'):
self._expect_sequence(['EMPTY', 'TABLE'])
else:
self._expect('COMPRESSION')
elif self._match('DEACTIVATE'):
self._expect_sequence(['VALUE', 'COMPRESSION'])
else:
break
self._newline()
self._outdent()
def _parse_alter_tablespace_statement(self):
"""Parses an ALTER TABLESPACE statement"""
# ALTER TABLESPACE already matched
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
if self._match('TO'):
self._expect_sequence(['STRIPE', 'SET', TT.IDENTIFIER])
self._parse_database_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
else:
# Ambiguity: could be a Database or a System container
# clause here
reraise = False
self._save_state()
try:
# Try a database clause first
self._parse_database_container_clause()
reraise = True
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
except ParseError:
# If that fails, rewind and try a system container
# clause
self._restore_state()
if reraise: raise
self._parse_system_container_clause()
self._parse_db_partition_list_clause(size=False)
else:
self._forget_state()
elif self._match('BEGIN'):
self._expect_sequence(['NEW', 'STRIPE', 'SET'])
self._parse_database_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
elif self._match('DROP'):
self._parse_database_container_clause(size=False)
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
elif self._match_one_of(['EXTEND', 'REDUCE']):
# Ambiguity: could be a Database or ALL containers clause
reraise = False
self._save_state()
try:
# Try an ALL containers clause first
self._expect_sequence(['(', 'ALL'])
reraise = True
self._match('CONTAINERS')
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
self._expect(')')
except ParseError:
# If that fails, rewind and try a database container clause
self._restore_state()
if reraise: raise
self._parse_database_container_clause()
else:
self._forget_state()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
elif self._match('PREFETCHSIZE'):
if not self._match('AUTOMATIC'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
elif self._match('BUFFERPOOL'):
self._expect(TT.IDENTIFIER)
elif self._match('OVERHEAD'):
self._expect(TT.NUMBER)
elif self._match('TRANSFERRATE'):
self._expect(TT.NUMBER)
elif self._match('NO'):
self._expect_sequence(['FILE', 'SYSTEM', 'CACHING'])
elif self._match('FILE'):
self._expect_sequence(['SYSTEM', 'CACHING'])
elif self._match('DROPPED'):
self._expect_sequence(['TABLE', 'RECOVERY'])
self._expect_one_of(['ON', 'OFF'])
elif self._match('SWITCH'):
self._expect('ONLINE')
elif self._match('INCREASESIZE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G', 'PERCENT'])
elif self._match('MAXSIZE'):
if not self_match('NONE'):
self._expect(TT.NUMBER)
self._expect_one_of(['K', 'M', 'G'])
elif self._match('CONVERT'):
self._expect_sequence(['TO', 'LARGE'])
elif first:
self._expected_one_of([
'ADD',
'BEGIN',
'DROP'
'EXTEND',
'REDUCE',
'PREFETCHSIZE',
'BUFFERPOOL',
'OVERHEAD',
'TRANSFERRATE',
'NO',
'FILE',
'DROPPED',
'SWITCH',
'INCREASESIZE',
'MAXSIZE',
'CONVERT',
])
else:
break
first = False
def _parse_alter_threshold_statement(self):
"""Parses an ALTER THRESHOLD statement"""
# ALTER THRESHOLD already matched
self._expect(TT.IDENTIFIER)
while True:
if self._match('WHEN'):
self._parse_threshold_predicate()
self._parse_threshold_exceeded_actions()
elif not self._match_one_of(['ENABLE', 'DISABLE']):
break
def _parse_alter_trusted_context_statement(self):
"""Parses an ALTER TRUSTED CONTEXT statement"""
# ALTER TRUSTED CONTEXT already matched
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
if self._match('ATTRIBUTES'):
self._expect('(')
while True:
self._expect_sequence(['ADDRESS', TT.STRING])
if self._match('WITH'):
self._expect_sequence(['ENCRYPTION', TT.STRING])
if not self._match(','):
break
self._expect(')')
elif self._match('USE'):
self._expect('FOR')
while True:
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['ROLE', TT.IDENTIFIER])
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('AUTHENTICATION')
if not self._match(','):
break
else:
self._expected_one_of(['ATTRIBUTES', 'USE'])
elif self._match('DROP'):
if self._match('ATTRIBUTES'):
self._expect('(')
while True:
self._expect_sequence(['ADDRESS', TT.STRING])
if not self._match(','):
break
self._expect(')')
elif self._match('USE'):
self._expect('FOR')
while True:
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
else:
self._expected_one_of(['ATTRIBUTES', 'USE'])
elif self._match('ALTER'):
while True:
if self._match('SYSTEM'):
self._expect_sequence(['AUTHID', TT.IDENTIFIER])
elif self._match('ATTRIBUTES'):
self._expect('(')
while True:
self._expect_one_of(['ADDRESS', 'ENCRYPTION'])
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
elif self._match('NO'):
self._expect_sequence(['DEFAULT', 'ROLE'])
elif self._match('DEFAULT'):
self._expect_sequence(['ROLE', TT.IDENTIFIER])
elif not self._match_one_of(['ENABLE', 'DISABLE']):
break
elif self._match('REPLACE'):
self._expect_sequence(['USE', 'FOR'])
while True:
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['ROLE', TT.IDENTIFIER])
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('AUTHENTICATION')
if not self._match(','):
break
elif first:
self._expected_one_of(['ALTER', 'ADD', 'DROP', 'REPLACE'])
else:
break
first = False
def _parse_alter_user_mapping_statement(self):
"""Parses an ALTER USER MAPPING statement"""
# ALTER USER MAPPING already matched
if not self._match('USER'):
self._expect_sequence([TT.IDENTIFIER, 'SERVER', TT.IDENTIFIER, 'OPTIONS'])
self._parse_federated_options(alter=True)
def _parse_alter_view_statement(self):
"""Parses an ALTER VIEW statement"""
# ALTER VIEW already matched
self._parse_view_name()
self._expect_one_of(['ENABLE', 'DISABLE'])
self._expect_sequence(['QUERY', 'OPTIMIZATION'])
def _parse_alter_work_action_set_statement(self):
"""Parses an ALTER WORK ACTION SET statement"""
# ALTER WORK ACTION SET already matched
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
self._match_sequence(['WORK', 'ACTION'])
self._expect_sequence([TT.IDENTIFIER, 'ON', 'WORK', 'CLASS', TT.IDENTIFIER])
self._parse_action_types_clause()
self._parse_histogram_template_clause()
self._match_one_of(['ENABLE', 'DISABLE'])
elif self._match('ALTER'):
self._match_sequence(['WORK', 'ACTION'])
self._expect(TT.IDENTIFIER)
while True:
if self._match('SET'):
self._expect_sequence(['WORK', 'CLASS', TT.IDENTIFIER])
elif self._match('ACTIVITY'):
self._expect_one_of(['LIFETIME', 'QUEUETIME', 'EXECUTETIME', 'ESIMATEDCOST', 'INTERARRIVALTIME'])
self._expect_sequence(['HISTOGRAM', 'TEMPLATE', TT.IDENTIFIER])
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
else:
# Ambiguity: could be the end of the loop, or an action
# types clause
self._save_state()
try:
self._parse_action_types_clause()
except ParseError:
self._restore_state()
break
else:
self._forget_state()
elif self._match('DROP'):
self._match_sequence(['WORK', 'ACTION'])
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
elif first:
self._expected_one_of(['ADD', 'ALTER', 'DROP', 'ENABLE', 'DISABLE'])
else:
break
first = False
def _parse_alter_work_class_set_statement(self):
"""Parses an ALTER WORK CLASS SET statement"""
# ALTER WORK CLASS SET already matched
self._expect(TT.IDENTIFIER)
outer = True
while True:
if self._match('ADD'):
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
self._parse_work_attributes()
self._expect('POSITION')
self._parse_position_clause()
elif self._match('ALTER'):
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
inner = True
while True:
if self._match('FOR'):
self._parse_for_from_to_clause(alter=True)
elif self._match('POSITION'):
self._parse_position_clause()
elif self._match('ROUTINES'):
self._parse_routines_in_schema_clause(alter=True)
elif inner:
self._expected_one_of(['FOR', 'POSITION', 'ROUTINES'])
else:
break
inner = False
elif self._match('DROP'):
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
elif outer:
self._expected_one_of(['ADD', 'ALTER', 'DROP'])
else:
break
outer = False
def _parse_alter_workload_statement(self):
"""Parses an ALTER WORKLOAD statement"""
self._expect(TT.IDENTIFIER)
first = True
while True:
if self._match('ADD'):
self._parse_connection_attributes()
elif self._match('DROP'):
self._parse_connection_attributes()
elif self._match_one_of(['ALLOW', 'DISALLOW']):
self._expect_sequence(['DB', 'ACCESS'])
elif self._match_one_of(['ENABLE', 'DISABLE']):
pass
elif self._match('SERVICE'):
self._expect_sequence(['CLASS', TT.IDENTIFIER])
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
elif self._match('POSITION'):
self._parse_position_clause()
elif self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
elif first:
self._expected_one_of([
'ADD',
'DROP',
'ALLOW',
'DISALLOW',
'ENABLE',
'DISABLE',
'SERVICE',
'POSITION',
'COLLECT'
])
else:
break
first = False
def _parse_alter_wrapper_statement(self):
"""Parses an ALTER WRAPPER statement"""
# ALTER WRAPPER already matched
self._expect(TT.IDENTIFIER)
self._expect('OPTIONS')
self._parse_federated_options(alter=True)
def _parse_associate_locators_statement(self):
"""Parses an ASSOCIATE LOCATORS statement in a procedure"""
# ASSOCIATE already matched
self._match_sequence(['RESULT', 'SET'])
self._expect_one_of(['LOCATOR', 'LOCATORS'])
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._expect_sequence(['WITH', 'PROCEDURE'])
self._parse_procedure_name()
def _parse_audit_statement(self):
"""Parses an AUDIT statement"""
# AUDIT already matched
while True:
if self._match_one_of([
'DATABASE',
'SYSADM',
'SYSCTRL',
'SYSMAINT',
'SYSMON',
'SECADM',
'DBADM',
]):
pass
elif self._match('TABLE'):
self._parse_table_name()
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['USER', 'GROUP', 'ROLE']):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of([
'DATABASE',
'SYSADM',
'SYSCTRL',
'SYSMAINT',
'SYSMON',
'SECADM',
'DBADM',
'TABLE',
'TRUSTED',
'USER',
'GROUP',
'ROLE',
])
if not self._match(','):
break
if self._match_one_of(['USING', 'REPLACE']):
self._expect_sequence(['POLICY', TT.IDENTIFIER])
elif not self._match_sequence(['REMOVE', 'POLICY']):
self._expected_one_of(['USING', 'REPLACE', 'REMOVE'])
def _parse_call_statement(self):
"""Parses a CALL statement"""
# CALL already matched
self._parse_subschema_name()
if self._match('(', prespace=False):
if not self._match(')'):
while True:
# Try and parse an optional parameter name
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._expect('=>')
except ParseError:
self._restore_state()
# Parse the parameter value
self._parse_expression()
if not self._match(','):
break
self._expect(')')
def _parse_case_statement(self):
"""Parses a CASE-conditional in a procedure"""
# CASE already matched
if self._match('WHEN'):
# Parse searched-case-statement
simple = False
self._indent(-1)
else:
# Parse simple-case-statement
self._parse_expression()
self._indent()
self._expect('WHEN')
simple = True
# Parse WHEN clauses (only difference is predicate/expression after
# WHEN)
t = None
while True:
if simple:
self._parse_expression()
else:
self._parse_search_condition()
self._expect('THEN')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
t = self._match_one_of(['WHEN', 'ELSE', 'END'])
if t:
self._outdent(-1)
t = t.value
break
else:
self._newline()
if t != 'WHEN':
break
# Handle ELSE clause (common to both variations)
if t == 'ELSE':
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
self._outdent(-1)
self._expect('CASE')
def _parse_close_statement(self):
"""Parses a CLOSE cursor statement"""
# CLOSE already matched
self._expect(TT.IDENTIFIER)
self._match_sequence(['WITH', 'RELEASE'])
def _parse_comment_statement(self):
"""Parses a COMMENT ON statement"""
# COMMENT ON already matched
# Ambiguity: table/view can be called TABLE, VIEW, ALIAS, etc.
reraise = False
self._save_state()
try:
# Try parsing an extended TABLE/VIEW comment first
self._parse_relation_name()
self._expect('(')
self._indent()
while True:
self._expect(TT.IDENTIFIER)
self._valign()
self._expect_sequence(['IS', TT.STRING])
reraise = True
if self._match(','):
self._newline()
else:
break
self._vapply()
self._outdent()
self._expect(')')
except ParseError:
# If that fails, rewind and parse a single-object comment
self._restore_state()
if reraise: raise
if self._match_one_of(['ALIAS', 'TABLE', 'NICKNAME', 'INDEX', 'TRIGGER', 'VARIABLE']):
self._parse_subschema_name()
elif self._match('TYPE'):
if self._match('MAPPING'):
self._expect(TT.IDENTIFIER)
else:
self._parse_subschema_name()
elif self._match('PACKAGE'):
self._parse_subschema_name()
self._match('VERSION')
# XXX Ambiguity: IDENTIFIER will match "IS" below. How to solve
# this? Only double-quoted identifiers are actually permitted
# here (or strings)
self._match_one_of([TT.IDENTIFIER, TT.STRING])
elif self._match_one_of(['DISTINCT', 'DATA']):
self._expect('TYPE')
self._parse_type_name()
elif self._match_one_of(['COLUMN', 'CONSTRAINT']):
self._parse_subrelation_name()
elif self._match_one_of(['SCHEMA', 'TABLESPACE', 'WRAPPER', 'WORKLOAD', 'NODEGROUP', 'ROLE', 'THRESHOLD']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['AUDIT', 'POLICY']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['SECURITY', 'POLICY']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['SECURITY', 'LABEL']):
self._match('COMPONENT')
self._expect(TT.IDENTIFIER)
elif self._match('SERVER'):
if self._match('OPTION'):
self._expect_sequence([TT.IDENTIFIER, 'FOR'])
self._parse_remote_server()
else:
self._expect(TT.IDENTIFIER)
elif self._match('SERVICE'):
self._expect('CLASS')
self._expect(TT.IDENTIFIER)
self._match_sequence(['UNDER', TT.IDENTIFIER])
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['HISTOGRAM', 'TEMPLATE']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['WORK', 'ACTION', 'SET']):
self._expect(TT.IDENTIFIER)
elif self._match_sequence(['WORK', 'CLASS', 'SET']):
self._expect(TT.IDENTIFIER)
elif self._match('FUNCTION'):
if self._match('MAPPING'):
self._expect(TT.IDENTIFIER)
else:
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('PROCEDURE'):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
else:
self._expected_one_of([
'ALIAS',
'AUDIT',
'COLUMN',
'CONSTRAINT',
'DATA',
'DATABASE',
'DISTINCT',
'FUNCTION',
'HISTOGRAM',
'INDEX',
'NICKNAME',
'PROCEDURE',
'ROLE',
'SCHEMA',
'SECURITY',
'SERVER',
'SERVICE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRIGGER',
'TRUSTED',
'TYPE',
'VARIABLE',
'WORK',
'WORKLOAD',
'WRAPPER',
])
self._expect_sequence(['IS', TT.STRING])
else:
self._forget_state()
def _parse_commit_statement(self):
"""Parses a COMMIT statement"""
# COMMIT already matched
self._match('WORK')
def _parse_create_alias_statement(self):
"""Parses a CREATE ALIAS statement"""
# CREATE ALIAS already matched
self._parse_relation_name()
self._expect('FOR')
self._parse_relation_name()
def _parse_create_audit_policy_statement(self):
"""Parses a CREATE AUDIT POLICY statement"""
# CREATE AUDIT POLICY already matched
self._expect(TT.IDENTIFIER)
self._parse_audit_policy()
def _parse_create_bufferpool_statement(self):
"""Parses a CREATE BUFFERPOOL statement"""
# CREATE BUFFERPOOL already matched
self._expect(TT.IDENTIFIER)
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
if self._match('ALL'):
self._expect('DBPARTITIONNUMS')
elif self._match('DATABASE'):
self._expect_sequence(['PARTITION', 'GROUP'])
self._parse_ident_list()
elif self._match('NODEGROUP'):
self._parse_ident_list()
self._expect('SIZE')
if self._match(TT.NUMBER):
self._match('AUTOMATIC')
elif self._match('AUTOMATIC'):
pass
else:
self._expected_one_of([TT.NUMBER, 'AUTOMATIC'])
# Parse function options (which can appear in any order)
valid = set(['NUMBLOCKPAGES', 'PAGESIZE', 'EXTENDED', 'EXCEPT', 'NOT'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if self._match('EXCEPT'):
self._expect('ON')
self._parse_db_partition_list_clause(size=True)
elif t == 'NUMBLOCKPAGES':
self._expect(TT.NUMBER)
if self._match('BLOCKSIZE'):
self._expect(TT.NUMBER)
elif t == 'PAGESIZE':
self._expect(TT.NUMBER)
self._match('K')
elif t == 'EXTENDED':
self._expect('STORAGE')
valid.remove('NOT')
elif t == 'NOT':
self._expect_sequence(['EXTENDED', 'STORAGE'])
valid.remove('EXTENDED')
def _parse_create_database_partition_group_statement(self):
"""Parses an CREATE DATABASE PARTITION GROUP statement"""
# CREATE [DATABASE PARTITION GROUP|NODEGROUP] already matched
self._expect(TT.IDENTIFIER)
if self._match('ON'):
if self._match('ALL'):
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
else:
self._parse_db_partition_list_clause(size=False)
def _parse_create_event_monitor_statement(self):
"""Parses a CREATE EVENT MONITOR statement"""
# CREATE EVENT MONITOR already matched
self._expect(TT.IDENTIFIER)
self._expect('FOR')
self._save_state()
try:
self._parse_wlm_event_monitor()
except ParseError:
self._restore_state()
self._parse_nonwlm_event_monitor()
else:
self._forget_state()
def _parse_create_function_statement(self):
"""Parses a CREATE FUNCTION statement"""
# CREATE FUNCTION already matched
self._parse_function_name()
# Parse parameter list
self._expect('(', prespace=False)
if not self._match(')'):
while True:
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
except ParseError:
self._restore_state()
self._parse_datatype()
else:
self._forget_state()
self._match_sequence(['AS', 'LOCATOR'])
if not self._match(','):
break
self._expect(')')
self._indent()
# Parse function options (which can appear in any order)
valid = set([
'ALLOW',
'CALLED',
'CARDINALITY',
'CONTAINS',
'DBINFO',
'DETERMINISTIC',
'DISALLOW',
'EXTERNAL',
'FENCED',
'FINAL',
'INHERIT',
'LANGUAGE',
'MODIFIES',
'NO',
'NOT',
'NULL',
'PARAMETER',
'READS',
'RETURNS',
'SCRATCHPAD',
'SPECIFIC',
'STATIC',
'THREADSAFE',
'TRANSFORM',
'VARIANT',
])
while True:
# Ambiguity: INHERIT SPECIAL REGISTERS (which appears in the
# variable order options) and INHERIT ISOLATION LEVEL (which must
# appear after the variable order options). See below.
self._save_state()
try:
t = self._match_one_of(valid)
if t:
t = t.value
# Note that matches aren't removed from valid, because it's
# simply too complex to figure out what option disallows
# other options in many cases
else:
# break would skip the except and else blocks
raise ParseBacktrack()
if t == 'ALLOW':
self._expect('PARALLEL')
if self._match_sequence(['EXECUTE', 'ON', 'ALL']):
self._match_sequence(['DATABASE', 'PARTITIONS'])
self._expect_sequence(['RESULT', 'TABLE', 'DISTRIBUTED'])
elif t == 'CALLED':
self._expect_sequence(['ON', 'NULL', 'INPUT'])
elif t == 'CARDINALITY':
self._expect(TT.NUMBER)
elif t == 'CONTAINS':
self._expect('SQL')
elif t == 'DBINFO':
pass
elif t == 'DETERMINISTIC':
pass
elif t == 'DISALLOW':
self._expect('PARALLEL')
elif t == 'EXTERNAL':
if self._match('NAME'):
self._expect_one_of([TT.STRING, TT.IDENTIFIER])
else:
self._expect('ACTION')
elif t == 'FENCED':
pass
elif t == 'FINAL':
self._expect('CALL')
elif t == 'INHERIT':
# Try and parse INHERIT SPECIAL REGISTERS first
if not self._match('SPECIAL'):
raise ParseBacktrack()
self._expect('REGISTERS')
elif t == 'LANGUAGE':
self._expect_one_of(['SQL', 'C', 'JAVA', 'CLR', 'OLE'])
elif t == 'MODIFIES':
self._expect_sequence(['SQL', 'DATA'])
elif t == 'NO':
t = self._expect_one_of(['DBINFO', 'EXTERNAL', 'FINAL', 'SCRATCHPAD', 'SQL']).value
if t == 'EXTERNAL':
self._expect('ACTION')
elif t == 'FINAL':
self._expect('CALL')
elif t == 'NOT':
self._expect_one_of(['DETERMINISTIC', 'FENCED', 'THREADSAFE', 'VARIANT'])
elif t == 'NULL':
self._expect('CALL')
elif t == 'PARAMETER':
if self._match('CCSID'):
self._expect_one_of(['ASCII', 'UNICODE'])
else:
self._expect('STYLE')
self._expect_one_of(['DB2GENERAL', 'DB2GENERL', 'JAVA', 'SQL', 'DB2SQL'])
elif t == 'READS':
self._expect_sequence(['SQL', 'DATA'])
elif t == 'RETURNS':
if self._match('NULL'):
self._expect_sequence(['ON', 'NULL', 'INPUT'])
elif self._match_one_of(['ROW', 'TABLE']):
if self._match('('):
while True:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
self._match_sequence(['AS', 'LOCATOR'])
if not self._match(','):
break
self._expect(')')
else:
self._parse_datatype()
if self._match_sequence(['CAST', 'FROM']):
self._parse_datatype()
self._match_sequence(['AS', 'LOCATOR'])
elif t == 'SCRATCHPAD':
self._expect(TT.NUMBER)
elif t == 'SPECIFIC':
self._expect(TT.IDENTIFIER)
elif t == 'STATIC':
self._expect('DISPATCH')
elif t == 'THREADSAFE':
pass
elif t == 'TRANSFORM':
self._expect_sequence(['GROUP', TT.IDENTIFIER])
elif t == 'VARIANT':
pass
self._newline()
except ParseBacktrack:
# NOTE: This block only gets called for ParseBacktrack errors.
# Other parse errors will propogate outward. If the above has
# failed, rewind, and drop out of the loop so we can try
# INHERIT ISOLATION LEVEL (and PREDICATES)
self._restore_state()
break
else:
self._forget_state()
# Parse optional PREDICATES clause
if self._match('PREDICATES'):
self._parse_function_predicates_clause()
self._newline()
if self._match('INHERIT'):
self._expect_sequence(['ISOLATION', 'LEVEL'])
self._expect_one_of(['WITH', 'WITHOUT'])
self._expect_sequence(['LOCK', 'REQUEST'])
# Parse the function body
self._outdent()
if self._match('BEGIN'):
self._parse_compiled_compound_statement()
elif self._match('RETURN'):
self._indent()
self._parse_return_statement()
self._outdent()
else:
# External function with no body
pass
def _parse_create_function_mapping_statement(self):
"""Parses a CREATE FUNCTION MAPPING statement"""
# CREATE FUNCTION MAPPING already matched
if not self._match('FOR'):
self._expect_sequence([TT.IDENTIFIER, 'FOR'])
if not self._match('SPECIFIC'):
self._parse_function_name()
self._expect('(', prespace=False)
self._parse_datatype_list()
self._expect(')')
else:
self._parse_function_name()
self._expect('SERVER')
self._parse_remote_server()
if self._match('OPTIONS'):
self._parse_federated_options()
self._match_sequence(['WITH', 'INFIX'])
def _parse_create_histogram_template_statement(self):
"""Parses a CREATE HISTOGRAM TEMPLATE statement"""
# CREATE HISTOGRAM TEMPLATE already matched
self._expect_sequence([TT.IDENTIFIER, 'HIGH', 'BIN', 'VALUE', TT.NUMBER])
def _parse_create_index_statement(self, unique):
"""Parses a CREATE INDEX statement"""
# CREATE [UNIQUE] INDEX already matched
self._parse_index_name()
self._indent()
self._expect('ON')
self._parse_table_name()
self._expect('(')
self._indent()
while True:
if self._match('BUSINESS_TIME'):
self._expect_sequence(['WITHOUT', 'OVERLAPS'])
else:
self._expect(TT.IDENTIFIER)
self._match_one_of(['ASC', 'DESC'])
if not self._match(','):
break
else:
self._newline()
self._outdent()
self._expect(')')
valid = set([
'IN',
'PARTITIONED',
'NOT',
'SPECIFICATION',
'INCLUDE',
'CLUSTER',
'PCTFREE',
'LEVEL2',
'MINPCTUSED',
'ALLOW',
'DISALLOW',
'PAGE',
'COLLECT',
'COMPRESS',
])
while valid:
t = self._match_one_of(valid)
if t:
self._newline(-1)
t = t.value
valid.remove(t)
else:
break
if t == 'IN':
self._expect(TT.IDENTIFIER)
elif t == 'NOT':
self._expect('PARTITIONED')
valid.discard('NOT')
elif t == 'PARTITIONED':
valid.discard('NOT')
elif t == 'SPECIFICATION':
self._expect('ONLY')
elif t == 'INCLUDE':
self._expect('(')
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
elif t == 'CLUSTER':
pass
elif t == 'PCTFREE' or t == 'MINPCTUSED':
self._expect(TT.NUMBER)
elif t == 'LEVEL2':
self._expect_sequence(['PCTFREE', TT.NUMBER])
elif t == 'ALLOW' or t == 'DISALLOW':
valid.discard('ALLOW')
valid.discard('DISALLOW')
self._expect_sequence(['REVERSE', 'SCANS'])
elif t == 'PAGE':
self._expect('SPLIT')
self._expect_one_of(['SYMMETRIC', 'HIGH', 'LOW'])
elif t == 'COLLECT':
self._match('SAMPLED')
self._match('DETAILED')
self._expect('STATISTICS')
elif t == 'COMPRESS':
self._expect_one_of(['NO', 'YES'])
def _parse_create_module_statement(self):
"""Parses a CREATE MODULE statement"""
# CREATE MODULE already matched
self._parse_module_name()
def _parse_create_nickname_statement(self):
"""Parses a CREATE NICKNAME statement"""
# CREATE NICKNAME already matched
self._parse_nickname_name()
if self._match('FOR'):
self._parse_remote_object_name()
else:
self._parse_table_definition(aligntypes=True, alignoptions=True, federated=True)
self._expect_sequence(['FOR', 'SERVER', TT.IDENTIFIER])
if self._match('OPTIONS'):
self._parse_federated_options()
def _parse_create_procedure_statement(self):
"""Parses a CREATE PROCEDURE statement"""
# CREATE PROCEDURE already matched
self._parse_procedure_name()
if self._match('SOURCE'):
self._parse_source_object_name()
if self._match('(', prespace=False):
self._expect(')')
elif self._match('NUMBER'):
self._expect_sequence(['OF', 'PARAMETERS', TT.NUMBER])
if self._match('UNIQUE'):
self._expect(TT.STRING)
self.expect_sequence(['FOR', 'SERVER', TT.IDENTIFIER])
elif self._match('(', prespace=False):
if not self._match(')'):
while True:
self._match_one_of(['IN', 'OUT', 'INOUT'])
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
except ParseError:
self._restore_state()
self._parse_datatype()
else:
self._forget_state()
if self._match('DEFAULT'):
self._parse_expression()
if not self._match(','):
break
self._expect(')')
self._indent()
# Parse procedure options (which can appear in any order)
valid = set([
'AUTONOMOUS',
'CALLED',
'COMMIT',
'CONTAINS',
'DBINFO',
'DETERMINISTIC',
'DYNAMIC',
'EXTERNAL',
'FENCED',
'INHERIT',
'LANGUAGE',
'MODIFIES',
'NEW',
'NO',
'NOT',
'NOT',
'NULL',
'OLD',
'PARAMETER',
'PROGRAM',
'READS',
'RESULT',
'SPECIFIC',
'THREADSAFE',
'WITH',
])
while True:
t = self._match_one_of(valid)
if t:
t = t.value
# Note that matches aren't removed from valid, because it's
# simply too complex to figure out what option disallows other
# options in many cases
else:
break
if t == 'AUTONOMOUS':
pass
elif t == 'CALLED':
self._expect_sequence(['ON', 'NULL', 'INPUT'])
elif t == 'COMMIT':
self._expect_sequence(['ON', 'RETURN'])
self._expect_one_of(['NO', 'YES'])
elif t == 'CONTAINS':
self._expect('SQL')
elif t == 'DBINFO':
pass
elif t == 'DETERMINISTIC':
pass
elif t == 'DYNAMIC':
self._expect_sequence(['RESULT', 'SETS', TT.NUMBER])
elif t == 'EXTERNAL':
if self._match('NAME'):
self._expect_one_of([TT.STRING, TT.IDENTIFIER])
else:
self._expect('ACTION')
elif t == 'FENCED':
pass
elif t == 'INHERIT':
self._expect_sequence(['SPECIAL', 'REGISTERS'])
elif t == 'LANGUAGE':
self._expect_one_of(['SQL', 'C', 'JAVA', 'COBOL', 'CLR', 'OLE'])
elif t == 'MODIFIES':
self._expect_sequence(['SQL', 'DATA'])
elif t in ['NEW', 'OLD']:
self._expect_sequence(['SAVEPOINT', 'LEVEL'])
elif t == 'NO':
if self._match('EXTERNAL'):
self._expect('ACTION')
else:
self._expect_one_of(['DBINFO', 'SQL'])
elif t == 'NOT':
self._expect_one_of(['DETERMINISTIC', 'FENCED', 'THREADSAFE'])
elif t == 'NULL':
self._expect('CALL')
elif t == 'PARAMETER':
if self._match('CCSID'):
self._expect_one_of(['ASCII', 'UNICODE'])
else:
self._expect('STYLE')
p = self._expect_one_of([
'DB2GENERAL',
'DB2GENERL',
'DB2DARI',
'DB2SQL',
'GENERAL',
'SIMPLE',
'JAVA',
'SQL'
]).value
if p == 'GENERAL':
self._match_sequence(['WITH', 'NULLS'])
elif p == 'SIMPLE':
self._expect('CALL')
self._match_sequence(['WITH', 'NULLS'])
elif t == 'PROGRAM':
self._expect('TYPE')
self._expect_one_of(['SUB', 'MAIN'])
elif t == 'READS':
self._expect_sequence(['SQL', 'DATA'])
elif t == 'RESULT':
self._expect_sequence(['SETS', TT.NUMBER])
elif t == 'SPECIFIC':
self._expect(TT.IDENTIFIER)
elif t == 'THREADSAFE':
pass
elif t == 'WITH':
self._expect_sequence(['RETURN', 'TO'])
self._expect_one_of(['CALLER', 'CLIENT'])
self._expect('ALL')
self._newline()
self._outdent()
self._expect('BEGIN')
self._parse_compiled_compound_statement()
def _parse_create_role_statement(self):
"""Parses a CREATE ROLE statement"""
# CREATE ROLE already matched
self._expect(TT.IDENTIFIER)
def _parse_create_schema_statement(self):
"""Parses a CREATE SCHEMA statement"""
# CREATE SCHEMA already matched
if self._match('AUTHORIZATION'):
self._expect(TT.IDENTIFIER)
else:
self._expect(TT.IDENTIFIER)
if self._match('AUTHORIZATION'):
self._expect(TT.IDENTIFIER)
# Parse CREATE/COMMENT/GRANT statements
while True:
if self._match('CREATE'):
if self._match('TABLE'):
self._parse_create_table_statement()
elif self._match('VIEW'):
self._parse_create_view_statement()
elif self._match('INDEX'):
self._parse_create_index_statement(unique=False)
elif self._match_sequence(['UNIQUE', 'INDEX']):
self._parse_create_index_statement(unique=True)
else:
self._expected_one_of(['TABLE', 'VIEW', 'INDEX', 'UNIQUE'])
elif self._match_sequence(['COMMENT', 'ON']):
self._parse_comment_statement()
elif self._match('GRANT'):
self._parse_grant_statement()
else:
break
def _parse_create_security_label_component_statement(self):
"""Parses a CREATE SECURITY LABEL COMPONENT statement"""
# CREATE SECURITY LABEL COMPONENT already matched
self._expect(TT.IDENTIFIER)
if self._match('ARRAY'):
self._expect('[', prespace=False)
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(']')
elif self._match('SET'):
self._expect('{', prespace=False)
while True:
self._expect(TT.STRING)
if not self._match(','):
break
self._expect('}')
elif self._match('TREE'):
self._expect_sequence(['(', TT.STRING, 'ROOT'], prespace=False)
while self._match(','):
self._expect_sequence([TT.STRING, 'UNDER', TT.STRING])
self._expect(')')
def _parse_create_security_label_statement(self):
"""Parses a CREATE SECURITY LABEL statement"""
# CREATE SECURITY LABEL already matched
self._parse_security_label_name()
while True:
self._expect_sequence(['COMPONENT', TT.IDENTIFIER, TT.STRING])
while self._match_sequence([',', TT.STRING]):
pass
if not self._match(','):
break
def _parse_create_security_policy_statement(self):
"""Parses a CREATE SECURITY POLICY statement"""
# CREATE SECURITY POLICY already matched
self._expect_sequence([TT.IDENTIFIER, 'COMPONENTS'])
while True:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._expect_sequence(['WITH', 'DB2LBACRULES'])
if self._match_one_of(['OVERRIDE', 'RESTRICT']):
self._expect_sequence(['NOT', 'AUTHORIZED', 'WRITE', 'SECURITY', 'LABEL'])
def _parse_create_sequence_statement(self):
"""Parses a CREATE SEQUENCE statement"""
# CREATE SEQUENCE already matched
self._parse_sequence_name()
if self._match('AS'):
self._parse_datatype()
self._parse_identity_options()
def _parse_create_service_class_statement(self):
"""Parses a CREATE SERVICE CLASS statement"""
# CREATE SERVICE CLASS already matched
self._expect(TT.IDENTIFIER)
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
if self._match_sequence(['AGENT', 'PRIORITY']):
self._expect_one_of(['DEFAULT', TT.NUMBER])
if self._match_sequence(['PREFETCH', 'PRIORITY']):
self._expect_one_of(['DEFAULT', 'HIGH', 'MEDIUM', 'LOW'])
if self._match_sequence(['OUTBOUND', 'CORRELATOR']):
self._expect_one_of(['NONE', TT.STRING])
if self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
if self._match_sequence(['COLLECT', 'AGGREGATE', 'ACTIVITY', 'DATA']):
self._expect_one_of(['NONE', 'BASE', 'EXTENDED'])
if self._match_sequence(['COLLECT', 'AGGREGATE', 'REQUEST', 'DATA']):
self._expect_one_of(['NONE', 'BASE'])
self._parse_histogram_template_clause()
self._match_one_of(['ENABLE', 'DISABLE'])
def _parse_create_server_statement(self):
"""Parses a CREATE SERVER statement"""
# CREATE SERVER already matched
self._expect(TT.IDENTIFIER)
if self._match('TYPE'):
self._expect(TT.IDENTIFIER)
if self._match('VERSION'):
self._parse_server_version()
if self._match('WRAPPER'):
self._expect(TT.IDENTIFIER)
if self._match('AUTHORIZATION'):
self._expect_sequence([TT.IDENTIFIER, 'PASSWORD', TT.IDENTIFIER])
if self._match('OPTIONS'):
self._parse_federated_options()
def _parse_create_table_statement(self):
"""Parses a CREATE TABLE statement"""
# CREATE TABLE already matched
self._parse_table_name()
if self._match('LIKE'):
self._parse_relation_name()
self._parse_copy_options()
else:
# Ambiguity: Open parentheses could indicate an optional field list
# preceding a materialized query or staging table definition
reraise = False
self._save_state()
try:
# Try parsing CREATE TABLE ... AS first
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
if self._match('AS'):
reraise = True
self._expect('(')
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
self._parse_refreshable_table_options()
elif self._match('FOR'):
reraise = True
self._parse_relation_name()
self._expected_sequence(['PROPAGATE', 'IMMEDIATE'])
else:
self._expected_one_of(['AS', 'FOR'])
except ParseError:
# If that fails, rewind and parse other CREATE TABLE forms
self._restore_state()
if reraise: raise
self._parse_table_definition(aligntypes=True, alignoptions=True, federated=False)
else:
self._forget_state()
# Parse table option suffixes. Not all of these are valid with
# particular table definitions, but it's too difficult to sort out
# which are valid for what we've parsed so far
valid = set([
'ORGANIZE',
'DATA',
'IN',
'INDEX',
'LONG',
'DISTRIBUTE',
'PARTITION',
'COMPRESS',
'VALUE',
'WITH',
'NOT',
'CCSID',
'SECURITY',
'OPTIONS',
])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'ORGANIZE':
self._expect('BY')
if self._match_sequence(['KEY', 'SEQUENCE']):
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if self._match('STARTING'):
self._match('FROM')
self._expect(TT.NUMBER)
self._expect('ENDING')
self._match('AT')
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
self._expect_one_of(['ALLOW', 'DISALLOW'])
self._expect('OVERFLOW')
if self._match('PCTFREE'):
self._expect(INTEGER)
else:
self._match('DIMENSIONS')
self._expect('(')
while True:
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
elif t == 'DATA':
self._expect('CAPTURE')
self._expect_one_of(['CHANGES', 'NONE'])
elif t == 'IN':
self._parse_ident_list()
if self._match('NO'):
self._expect('CYCLE')
else:
self._match('CYCLE')
elif t == 'LONG':
self._expect('IN')
self._parse_ident_list()
elif t == 'INDEX':
self._expect_sequence(['IN', TT.IDENTIFIER])
elif t == 'DISTRIBUTE':
self._expect('BY')
if self._match('REPLICATION'):
pass
else:
self._match('HASH')
self._expect('(', prespace=False)
self._parse_ident_list()
self._expect(')')
elif t == 'PARTITION':
self._expect('BY')
self._match('RANGE')
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if self._match('NULLS'):
self._expect_one_of(['FIRST', 'LAST'])
if not self._match(','):
break
self._expect_sequence([')', '('])
while True:
if self._match('PARTITION'):
self._expect(TT.IDENTIFIER)
self._parse_partition_boundary()
if self._match('IN'):
self._expect(TT.IDENTIFIER)
elif self._match('EVERY'):
if self._match('('):
self._expect(TT.NUMBER)
self._parse_duration_label()
self._expect(')')
else:
self._expect(TT.NUMBER)
self._parse_duration_label()
if not self._match(','):
break
elif t == 'COMPRESS':
self._expect_one_of(['NO', 'YES'])
elif t == 'VALUE':
self._expect('COMPRESSION')
elif t == 'WITH':
self._expect_sequence(['RESTRICT', 'ON', 'DROP'])
elif t == 'NOT':
self._expect_sequence(['LOGGED', 'INITIALLY'])
elif t == 'CCSID':
self._expect_one_of(['ASCII', 'UNICODE'])
elif t == 'SECURITY':
self._expect_sequence(['POLICY', TT.IDENTIFIER])
elif t == 'OPTIONS':
self._parse_federated_options(alter=False)
def _parse_create_tablespace_statement(self, tbspacetype='REGULAR'):
"""Parses a CREATE TABLESPACE statement"""
# CREATE TABLESPACE already matched
self._expect(TT.IDENTIFIER)
if self._match('IN'):
if self._match('DATABASE'):
self._expect_sequence(['PARTITION', 'GROUP'])
elif self._match('NODEGROUP'):
pass
self._expect(TT.IDENTIFIER)
if self._match('PAGESIZE'):
self._expect(TT.NUMBER)
self._match('K')
if self._match('MANAGED'):
self._expect('BY')
if self._match('AUTOMATIC'):
self._expect('STORAGE')
self._parse_tablespace_size_attributes()
elif self._match('DATABASE'):
self._expect('USING')
while True:
self._parse_database_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
if not self._match('USING'):
break
self._parse_tablespace_size_attributes()
elif self._match('SYSTEM'):
self._expect('USING')
while True:
self._parse_system_container_clause()
if self._match('ON'):
self._parse_db_partition_list_clause(size=False)
if not self._match('USING'):
break
else:
self._expected_one_of(['AUTOMATIC', 'DATABASE', 'SYSTEM'])
if self._match('EXTENTSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M'])
if self._match('PREFETCHSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
if self._match('BUFFERPOOL'):
self._expect(TT.IDENTIFIER)
if self._match('OVERHEAD'):
self._expect(TT.NUMBER)
if self._match('NO'):
self._expect_sequence(['FILE', 'SYSTEM', 'CACHING'])
elif self._match('FILE'):
self._expect_sequence(['SYSTEM', 'CACHING'])
if self._match('TRANSFERRATE'):
self._expect(TT.NUMBER)
if self._match('DROPPED'):
self._expect_sequence(['TABLE', 'RECOVERY'])
self._expect_one_of(['ON', 'OFF'])
def _parse_create_threshold_statement(self):
"""Parses a CREATE THRESHOLD statement"""
# CREATE THRESHOLD already matched
self._expect_sequence([TT.IDENTIFIER, 'FOR'])
if self._match('SERVICE'):
self._expect_sequence(['CLASS', TT.IDENTIFIER])
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
elif self._match('WORKLOAD'):
self._expect(TT.IDENTIFIER)
elif not self._match('DATABASE'):
self._expected_one_of(['SERVICE', 'WORKLOAD', 'DATABASE'])
self._expect_sequence(['ACTIVITIES', 'ENFORCEMENT'])
if self._match('DATABASE'):
self._match('PARTITION')
elif self._match('WORKLOAD'):
self._expect('OCCURRENCE')
else:
self._expected_one_of(['DATABASE', 'WORKLOAD'])
self._match_one_of(['ENABLE', 'DISABLE'])
self._expect('WHEN')
self._parse_threshold_predicate()
self._parse_threshold_exceeded_actions()
def _parse_create_trigger_statement(self):
"""Parses a CREATE TRIGGER statement"""
# CREATE TRIGGER already matched
self._parse_trigger_name()
self._indent()
if self._match_sequence(['NO', 'CASCADE']):
self._expect('BEFORE')
elif self._match('BEFORE'):
pass
elif self._match_sequence(['INSTEAD', 'OF']):
pass
elif self._match('AFTER'):
pass
else:
self._expected_one_of(['AFTER', 'BEFORE', 'NO', 'INSTEAD'])
if self._match('UPDATE'):
if self._match('OF'):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
else:
self._expect_one_of(['INSERT', 'DELETE', 'UPDATE'])
self._expect('ON')
self._parse_table_name()
if self._match('REFERENCING'):
self._newline(-1)
valid = ['OLD', 'NEW', 'OLD_TABLE', 'NEW_TABLE']
while valid:
if len(valid) == 4:
t = self._expect_one_of(valid)
else:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t in ('OLD', 'NEW'):
if 'OLD_TABLE' in valid: valid.remove('OLD_TABLE')
if 'NEW_TABLE' in valid: valid.remove('NEW_TABLE')
elif t in ('OLD_TABLE', 'NEW_TABLE'):
if 'OLD' in valid: valid.remove('OLD')
if 'NEW' in valid: valid.remove('NEW')
self._match('AS')
self._expect(TT.IDENTIFIER)
self._newline()
self._expect_sequence(['FOR', 'EACH'])
self._expect_one_of(['ROW', 'STATEMENT'])
if self._match('MODE'):
self._newline(-1)
self._expect('DB2SQL')
if self._match('WHEN'):
self._expect('(')
self._indent()
self._parse_search_condition()
self._outdent()
self._expect(')')
try:
label = self._expect(TT.LABEL).value
self._outdent(-1)
self._newline()
except ParseError:
label = None
if self._match('BEGIN'):
if not label: self._outdent(-1)
self._parse_compiled_compound_statement(label=label)
else:
self._newline()
self._parse_compiled_statement()
if not label: self._outdent()
# XXX This shouldn't be here, but DB2 for z/OS appears to have a
# parser bug which allows this
self._match_sequence([(TT.TERMINATOR, ';'), (TT.KEYWORD, 'END')])
def _parse_create_trusted_context_statement(self):
"""Parses a CREATE TRUSTED CONTEXT statement"""
# CREATE TRUSTED CONTEXT already matched
self._expect_sequence([TT.IDENTIFIER, 'BASED', 'UPON', 'CONNECTION', 'USING'])
valid = set([
'SYSTEM',
'ATTRIBUTES',
'NO',
'DEFAULT',
'DISABLE',
'ENABLE',
'WITH',
])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'SYSTEM':
self._expect_sequence(['AUTHID', TT.IDENTIFIER])
elif t == 'ATTRIBUTES':
self._expect('(')
if self._match('ADDRESS'):
self._expect(TT.STRING)
if self._match('WITH'):
self._expect_sequence(['ENCRYPTION', TT.STRING])
elif self._match('ENCRYPTION'):
self._expect(TT.STRING)
if not self._match(','):
break
self._expect(')')
elif t == 'NO':
valid.remove('DEFAULT')
self._expect_sequence(['DEFAULT', 'ROLE'])
elif t == 'DEFAULT':
valid.remove('NO')
self._expect_sequence(['ROLE', TT.IDENTIFIER])
elif t == 'DISABLE':
valid.remove('ENABLE')
elif t == 'ENABLE':
valid.remove('DISABLE')
elif t == 'WITH':
self._expect_sequence(['USE', 'FOR'])
if not self._match('PUBLIC'):
self._expect(TT.IDENTIFIER)
if self._match('ROLE'):
self._expect(TT.IDENTIFIER)
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('AUTHENTICATION')
def _parse_create_type_statement(self):
"""Parses a CREATE DISTINCT TYPE statement"""
# CREATE DISTINCT TYPE already matched
self._parse_type_name()
self._expect('AS')
self._parse_datatype()
if self._match('ARRAY'):
self._expect('[', prespace=False)
self._match(TT.NUMBER)
self._expect(']')
else:
self._match_sequence(['WITH', 'COMPARISONS'])
def _parse_create_type_mapping_statement(self):
"""Parses a CREATE TYPE MAPPING statement"""
# CREATE TYPE MAPPING already matched
self._match(TT.IDENTIFIER)
valid = set(['FROM', 'TO'])
t = self._expect_one_of(valid).value
valid.remove(t)
self._match_sequence(['LOCAL', 'TYPE'])
self._parse_datatype()
self._expect_one_of(valid)
self._parse_remote_server()
self._match('REMOTE')
self._expect('TYPE')
self._parse_type_name()
if self._match('FOR'):
self._expect_sequence(['BIT', 'DATA'])
elif self._match('(', prespace=False):
if self._match('['):
self._expect_sequence([TT.NUMBER, '..', TT.NUMBER], interspace=False)
self._expect(']')
else:
self._expect(TT.NUMBER)
if self._match(','):
if self._match('['):
self._expect_sequence([TT.NUMBER, '..', TT.NUMBER], interspace=False)
self._expect(']')
else:
self._expect(TT.NUMBER)
self._expect(')')
if self._match('P'):
self._expect_one_of(['=', '>', '<', '>=', '<=', '<>'])
self._expect('S')
def _parse_create_user_mapping_statement(self):
"""Parses a CREATE USER MAPPING statement"""
# CREATE USER MAPPING already matched
self._expect('FOR')
self._expect_one_of(['USER', TT.IDENTIFIER])
self._expect_sequence(['SERVER', TT.IDENTIFIER])
self._expect('OPTIONS')
self._parse_federated_options(alter=False)
def _parse_create_variable_statement(self):
"""Parses a CREATE VARIABLE statement"""
# CREATE VARIABLE already matched
self._parse_variable_name()
self._parse_datatype()
if self._match_one_of(['DEFAULT', 'CONSTANT']):
self._parse_expression()
def _parse_create_view_statement(self):
"""Parses a CREATE VIEW statement"""
# CREATE VIEW already matched
self._parse_view_name()
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
self._expect('AS')
self._newline()
self._parse_query()
valid = set(['CASCADED', 'LOCAL', 'CHECK', 'ROW', 'NO'])
while valid:
if not self._match('WITH'):
break
t = self._expect_one_of(valid).value
valid.remove(t)
if t in ('CASCADED', 'LOCAL', 'CHECK'):
valid.discard('CASCADED')
valid.discard('LOCAL')
valid.discard('CHECK')
if t != 'CHECK':
self._expect('CHECK')
self._expect('OPTION')
elif t == 'NO':
valid.remove('ROW')
self._expect_sequence(['ROW', 'MOVEMENT'])
elif t == 'ROW':
valid.remove('NO')
self._expect('MOVEMENT')
def _parse_create_work_action_set_statement(self):
"""Parses a CREATE WORK ACTION SET statement"""
# CREATE WORK ACTION SET already matched
self._expect(TT.IDENTIFIER)
self._expect('FOR')
if self._match('SERVICE'):
self._expect_sequence(['CLASS', TT.IDENTIFIER])
elif self._match('DATABASE'):
pass
else:
self._expected_one_of(['SERVICE', 'DATABASE'])
self._expect_sequence(['USING', 'WORK', 'CLASS', 'SET', TT.IDENTIFIER])
if self._match('('):
self._indent()
while True:
self._expect_sequence(['WORK', 'ACTION', TT.IDENTIFIER, 'ON', 'WORK', 'CLASS', TT.IDENTIFIER])
self._parse_action_types_clause()
self._parse_histogram_template_clause()
self._match_one_of(['ENABLE', 'DISABLE'])
if self._match(','):
self._newline()
else:
break
self._outdent()
self._expect(')')
self._match_one_of(['ENABLE', 'DISABLE'])
def _parse_create_work_class_set_statement(self):
"""Parses a CREATE WORK CLASS SET statement"""
# CREATE WORK CLASS SET already matched
self._expect(TT.IDENTIFIER)
if self._match('('):
self._indent()
while True:
self._match_sequence(['WORK', 'CLASS'])
self._expect(TT.IDENTIFIER)
self._parse_work_attributes()
if self._match('POSITION'):
self._parse_position_clause()
if self._match(','):
self._newline()
else:
break
self._outdent()
self._expect(')')
def _parse_create_workload_statement(self):
"""Parses a CREATE WORKLOAD statement"""
# CREATE WORKLOAD statement
self._expect(TT.IDENTIFIER)
first = True
while True:
# Repeatedly try and match connection attributes. Only raise a
# parse error if the first match fails
try:
self._parse_connection_attributes()
except ParseError, e:
if first:
raise e
else:
first = False
self._match_one_of(['ENABLE', 'DISABLE'])
if self._match_one_of(['ALLOW', 'DISALLOW']):
self._expect_sequence(['DB', 'ACCESS'])
if self._match_sequence(['SERVICE', 'CLASS']):
if not self._match('SYSDEFAULTUSERCLASS'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['UNDER', TT.IDENTIFIER])
if self._match('POSITION'):
self._parse_position_clause()
if self._match_sequence(['COLLECT', 'ACTIVITY', 'DATA']):
self._parse_collect_activity_data_clause(alter=True)
def _parse_create_wrapper_statement(self):
"""Parses a CREATE WRAPPER statement"""
# CREATE WRAPPER already matched
self._expect(TT.IDENTIFIER)
if self._match('LIBRARY'):
self._expect(TT.STRING)
if self._match('OPTIONS'):
self._parse_federated_options(alter=False)
def _parse_declare_cursor_statement(self):
"""Parses a top-level DECLARE CURSOR statement"""
# DECLARE already matched
self._expect_sequence([TT.IDENTIFIER, 'CURSOR'])
self._match_sequence(['WITH', 'HOLD'])
self._expect('FOR')
self._newline()
self._parse_select_statement()
def _parse_declare_global_temporary_table_statement(self):
"""Parses a DECLARE GLOBAL TEMPORARY TABLE statement"""
# DECLARE GLOBAL TEMPORARY TABLE already matched
self._parse_table_name()
if self._match('LIKE'):
self._parse_table_name()
self._parse_copy_options()
elif self._match('AS'):
self._parse_full_select()
self._expect_sequence(['DEFINITION', 'ONLY'])
self._parse_copy_options()
else:
self._parse_table_definition(aligntypes=True, alignoptions=False, federated=False)
valid = set(['ON', 'NOT', 'WITH', 'IN', 'PARTITIONING'])
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'ON':
self._expect('COMMIT')
self._expect_one_of(['DELETE', 'PRESERVE'])
self._expect('ROWS')
elif t == 'NOT':
self._expect('LOGGED')
if self._match('ON'):
self._expect('ROLLBACK')
self._expect_one_of(['DELETE', 'PRESERVE'])
self._expect('ROWS')
elif t == 'WITH':
self._expect('REPLACE')
elif t == 'IN':
self._expect(TT.IDENTIFIER)
elif t == 'PARTITIONING':
self._expect('KEY')
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._match_sequence(['USING', 'HASHING'])
def _parse_delete_statement(self):
"""Parses a DELETE statement"""
# DELETE already matched
self._expect('FROM')
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
# Ambiguity: INCLUDE is an identifier and hence can look like a table
# correlation name
reraise = False
self._save_state()
try:
# Try and parse a mandatory table correlation followed by a
# mandatory INCLUDE
self._parse_table_correlation(optional=False)
self._newline()
self._expect('INCLUDE')
reraise = True
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
# XXX Is SET required for an assignment clause? The syntax diagram
# doesn't think so...
if self._match('SET'):
self._parse_assignment_clause(allowdefault=False)
except ParseError:
# If that fails, rewind and parse an optional INCLUDE or an
# optional table correlation
self._restore_state()
if reraise: raise
if self._match('INCLUDE'):
self._newline(-1)
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
if self._match('SET'):
self._newline(-1)
self._parse_assignment_clause(allowdefault=False)
else:
self._parse_table_correlation()
else:
self._forget_state()
if self._match('WHERE'):
self._newline(-1)
self._indent()
self._parse_search_condition()
self._outdent()
if self._match('WITH'):
self._newline(-1)
self._expect_one_of(['RR', 'RS', 'CS', 'UR'])
def _parse_drop_statement(self):
"""Parses a DROP statement"""
# DROP already matched
if self._match_one_of(['ALIAS', 'SYNONYM', 'TABLE', 'VIEW', 'NICKNAME', 'VARIABLE']):
self._parse_subschema_name()
elif self._match_sequence(['FUNCTION', 'MAPPING']):
self._parse_function_name()
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
self._parse_routine_name()
if self._match('(', prespace=False):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
elif self._match('INDEX'):
self._parse_index_name()
elif self._match('SEQUENCE'):
self._parse_sequence_name()
elif self._match_sequence(['SERVICE', 'CLASS']):
self._expect(TT.IDENTIFIER)
if self._match('UNDER'):
self._expect(TT.IDENTIFIER)
elif self._match_one_of(['TABLESPACE', 'TABLESPACES']):
self._parse_ident_list()
elif self._match_one_of(['DATA', 'DISTINCT']):
self._expect('TYPE')
self._parse_type_name()
elif self._match_sequence(['TYPE', 'MAPPING']):
self._parse_type_name()
elif self._match('TYPE'):
self._parse_type_name()
elif self._match_sequence(['USER', 'MAPPING']):
self._expect('FOR')
self._expect_one_of(['USER', TT.IDENTIFIER])
self._expect_sequence(['SERVER', TT.IDENTIFIER])
elif (self._match_sequence(['AUDIT', 'POLICY']) or
self._match('BUFFERPOOL') or
self._match_sequence(['EVENT', 'MONITOR']) or
self._match_sequence(['HISTORGRAM', 'TEMPLATE']) or
self._match('NODEGROUP') or
self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']) or
self._match('ROLE') or
self._match('SCHEMA') or
self._match_sequence(['SECURITY', 'LABEL', 'COMPONENT']) or
self._match_sequence(['SECURITY', 'LABEL']) or
self._match_sequence(['SECURITY', 'POLICY']) or
self._match('SERVER') or
self._match('THRESHOLD') or
self._match('TRIGGER') or
self._match_sequence(['TRUSTED', 'CONTEXT']) or
self._match_sequence(['WORK', 'ACTION', 'SET']) or
self._match_sequence(['WORK', 'CLASS', 'SET']) or
self._match('WORKLOAD') or
self._match('WRAPPER')):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of([
'ALIAS',
'AUDIT',
'BUFFERPOOL',
'DATA',
'DATABASE',
'DISTINCT',
'EVENT',
'FUNCTION',
'HISTOGRAM',
'INDEX',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'ROLE',
'SCHEMA',
'SECURITY',
'SEQUENCE',
'SERVICE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRIGGER',
'TRUSTED',
'TYPE',
'USER',
'VARIABLE',
'VIEW',
'WORK',
'WORKLOAD',
'WRAPPER',
])
# XXX Strictly speaking, this isn't DB2 syntax - it's generic SQL. But
# if we stick to strict DB2 semantics, this routine becomes boringly
# long...
self._match_one_of(['RESTRICT', 'CASCADE'])
def _parse_execute_immediate_statement(self):
"""Parses an EXECUTE IMMEDIATE statement in a procedure"""
# EXECUTE IMMEDIATE already matched
self._parse_expression()
def _parse_execute_statement(self):
"""Parses an EXECUTE statement in a procedure"""
# EXECUTE already matched
self._expect(TT.IDENTIFIER)
if self._match('INTO'):
while True:
self._parse_subrelation_name()
if self._match('['):
self._parse_expression()
self._expect(']')
if self._match('.'):
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
if self._match('USING'):
self._parse_expression_list()
def _parse_explain_statement(self):
"""Parses an EXPLAIN statement"""
# EXPLAIN already matched
if self._match('PLAN'):
self._match('SELECTION')
else:
self._expect_one_of(['PLAN', 'ALL'])
if self._match_one_of(['FOR', 'WITH']):
self._expect('SNAPSHOT')
self._match_sequence(['WITH', 'REOPT', 'ONCE'])
self._match_sequence(['SET', 'QUERYNO', '=', TT.NUMBER])
self._match_sequence(['SET', 'QUEYRTAG', '=', TT.STRING])
self._expect('FOR')
if self._match('DELETE'):
self._parse_delete_statement()
elif self._match('INSERT'):
self._parse_insert_statement()
elif self._match('MERGE'):
self._parse_merge_statement()
elif self._match_sequence(['REFRESH', 'TABLE']):
self._parse_refresh_table_statement()
elif self._match_sequence(['SET', 'INTEGRITY']):
self._parse_set_integrity_statement()
elif self._match('UPDATE'):
self._parse_update_statement()
else:
self._parse_select_statement()
def _parse_fetch_statement(self):
"""Parses a FETCH FROM statement in a procedure"""
# FETCH already matched
self._match('FROM')
self._expect(TT.IDENTIFIER)
if self._match('INTO'):
self._parse_ident_list()
elif self._match('USING'):
self._expect('DESCRIPTOR')
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of(['INTO', 'USING'])
def _parse_flush_optimization_profile_cache_statement(self):
"""Parses a FLUSH OPTIMIZATION PROFILE CACHE statement"""
# FLUSH OPTIMIZATION PROFILE CACHE already matched
if not self._match('ALL'):
self._parse_subschema_name()
def _parse_for_statement(self, label=None):
"""Parses a FOR-loop in a dynamic compound statement"""
# FOR already matched
self._expect_sequence([TT.IDENTIFIER, 'AS'])
reraise = False
self._indent()
# Ambiguity: IDENTIFIER vs. select-statement
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._match_one_of(['ASENSITIVE', 'INSENSITIVE'])
self._expect('CURSOR')
reraise = True
if self._match_one_of(['WITH', 'WITHOUT']):
self._expect('HOLD')
self._expect('FOR')
except ParseError:
self._restore_state()
if reraise: raise
else:
self._forget_state()
self._parse_select_statement()
self._outdent()
self._expect('DO')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
if self._match('END'):
break
self._outdent(-1)
self._expect('FOR')
if label:
self._match((TT.IDENTIFIER, label))
def _parse_free_locator_statement(self):
"""Parses a FREE LOCATOR statement"""
# FREE LOCATOR already matched
self._parse_ident_list()
def _parse_get_diagnostics_statement(self):
"""Parses a GET DIAGNOSTICS statement in a dynamic compound statement"""
# GET DIAGNOSTICS already matched
if self._match('EXCEPTION'):
self._expect((TT.NUMBER, 1))
while True:
self._expect_sequence([TT.IDENTIFIER, '='])
self._expect_one_of(['MESSAGE_TEXT', 'DB2_TOKEN_STRING'])
if not self._match(','):
break
else:
self._expect_sequence([TT.IDENTIFIER, '='])
self._expect(['ROW_COUNT', 'DB2_RETURN_STATUS'])
def _parse_goto_statement(self):
"""Parses a GOTO statement in a procedure"""
# GOTO already matched
self._expect(TT.IDENTIFIER)
def _parse_grant_statement(self):
"""Parses a GRANT statement"""
# GRANT already matched
self._parse_grant_revoke(grant=True)
def _parse_if_statement(self):
"""Parses an IF-conditional in a dynamic compound statement"""
# IF already matched
t = 'IF'
while True:
if t in ('IF', 'ELSEIF'):
self._parse_search_condition(newlines=False)
self._expect('THEN')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
t = self._match_one_of(['ELSEIF', 'ELSE', 'END'])
if t:
self._outdent(-1)
t = t.value
break
else:
self._newline()
elif t == 'ELSE':
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
break
else:
break
self._expect('IF')
def _parse_insert_statement(self):
"""Parses an INSERT statement"""
# INSERT already matched
self._expect('INTO')
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
if self._match('('):
self._indent()
self._parse_ident_list(newlines=True)
self._outdent()
self._expect(')')
if self._match('INCLUDE'):
self._newline(-1)
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
# Parse a full-select with optional common-table-expression, allowing
# the DEFAULT keyword in (for example) a VALUES clause
self._newline()
self._parse_query(allowdefault=True)
if self._match('WITH'):
self._newline(-1)
self._expect_one_of(['RR', 'RS', 'CS', 'UR'])
def _parse_iterate_statement(self):
"""Parses an ITERATE statement within a loop"""
# ITERATE already matched
self._match(TT.IDENTIFIER)
def _parse_leave_statement(self):
"""Parses a LEAVE statement within a loop"""
# LEAVE already matched
self._match(TT.IDENTIFIER)
def _parse_lock_table_statement(self):
"""Parses a LOCK TABLE statement"""
# LOCK TABLE already matched
self._parse_table_name()
self._expect('IN')
self._expect_one_of(['SHARE', 'EXCLUSIVE'])
self._expect('MODE')
def _parse_loop_statement(self, label=None):
"""Parses a LOOP-loop in a procedure"""
# LOOP already matched
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
self._expect('LOOP')
if label:
self._match((TT.IDENTIFIER, label))
def _parse_merge_statement(self):
# MERGE already matched
self._expect('INTO')
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
self._parse_table_correlation()
self._expect('USING')
self._parse_table_ref()
self._expect('ON')
self._parse_search_condition()
self._expect('WHEN')
while True:
self._match('NOT')
self._expect('MATCHED')
if self._match('AND'):
self._parse_search_condition()
self._expect('THEN')
self._indent()
if self._match('UPDATE'):
self._expect('SET')
self._parse_assignment_clause(allowdefault=True)
elif self._match('INSERT'):
if self._match('('):
self._parse_ident_list()
self._expect(')')
self._expect('VALUES')
if self._match('('):
self._parse_expression_list(allowdefault=True)
self._expect(')')
else:
if not self._match('DEFAULT'):
self._parse_expression()
if not self._match(','):
break
elif self._match('DELETE'):
pass
elif self._match('SIGNAL'):
self._parse_signal_statement
self._outdent()
if not self._match('WHEN'):
break
self._match_sequence(['ELSE', 'IGNORE'])
def _parse_open_statement(self):
"""Parses an OPEN cursor statement"""
# OPEN already matched
self._expect(TT.IDENTIFIER)
if self._match('('):
if not self._match(')'):
self._parse_expression_list()
self._expect(')')
if self._match('USING'):
self._parse_expression_list()
def _parse_prepare_statement(self):
"""Parses a PREPARE statement"""
# PREPARE already matched
self._expect(TT.IDENTIFIER)
if self._match('OUTPUT'):
self._expect('INTO')
self._expect(TT.IDENTIFIER)
elif self._match('INTO'):
self._expect(TT.IDENTIFIER)
if self._match('INPUT'):
self._expect('INTO')
self._expect(TT.IDENTIFIER)
self._expect('FROM')
self._parse_expression()
def _parse_refresh_table_statement(self):
"""Parses a REFRESH TABLE statement"""
# REFRESH TABLE already matched
while True:
self._parse_table_name()
queryopt = False
if self._match('ALLOW'):
if self._match_one_of(['NO', 'READ', 'WRITE']):
self._expect('ACCESS')
elif self._match_sequence(['QUERY', 'OPTIMIZATION']):
queryopt = True
self._expect_sequence(['USING', 'REFRESH', 'DEFERRED', 'TABLES'])
self._match_sequence(['WITH', 'REFRESH', 'AGE', 'ANY'])
else:
self._expected_one_of(['NO', 'READ', 'WRITE', 'QUERY'])
if not queryopt:
if self._match_sequence(['USING', 'REFRESH', 'DEFERRED', 'TABLES']):
self._match_sequence(['WITH', 'REFRESH', 'AGE', 'ANY'])
if not self._match(','):
break
self._match('NOT')
self._match('INCREMENTAL')
def _parse_release_savepoint_statement(self):
"""Parses a RELEASE SAVEPOINT statement"""
# RELEASE [TO] SAVEPOINT already matched
self._expect(TT.IDENTIFIER)
def _parse_rename_tablespace_statement(self):
"""Parses a RENAME TABLESPACE statement"""
# RENAME TABLESPACE already matched
self._expect_sequence([TT.IDENTIFIER, 'TO', TT.IDENTIFIER])
def _parse_rename_statement(self):
"""Parses a RENAME statement"""
# RENAME already matched
if self._match('INDEX'):
self._parse_index_name()
else:
self._match('TABLE')
self._parse_table_name()
self._expect_sequence(['TO', TT.IDENTIFIER])
def _parse_repeat_statement(self, label=None):
"""Parses a REPEAT-loop in a procedure"""
# REPEAT already matched
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
if self._match('UNTIL'):
break
else:
self._newline()
self._outdent(-1)
self._parse_search_condition()
self._expect_sequence(['END', 'REPEAT'])
if label:
self._match((TT.IDENTIFIER, label))
def _parse_resignal_statement(self):
"""Parses a RESIGNAL statement in a dynamic compound statement"""
# SIGNAL already matched
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect_one_of([TT.IDENTIFIER, TT.STRING])
else:
if not self._match(TT.IDENTIFIER):
return
if self._match('SET'):
self._expect_sequence(['MESSAGE_TEXT', '='])
self._parse_expression()
def _parse_return_statement(self):
"""Parses a RETURN statement in a compound statement"""
# RETURN already matched
self._save_state()
try:
# Try and parse a select-statement
self._parse_query()
except ParseError:
# If it fails, rewind and try an expression or tuple instead
self._restore_state()
self._save_state()
try:
self._parse_expression()
except ParseError:
self._restore_state()
# If parsing an expression fails, assume it's a parameter-less
# RETURN (as can be used in a procedure)
else:
self._forget_state()
else:
self._forget_state()
def _parse_revoke_statement(self):
"""Parses a REVOKE statement"""
# REVOKE already matched
self._parse_grant_revoke(grant=False)
def _parse_rollback_statement(self):
"""Parses a ROLLBACK statement"""
# ROLLBACK already matched
self._match('WORK')
if self._match('TO'):
self._expect('SAVEPOINT')
self._match(TT.IDENTIFIER)
def _parse_savepoint_statement(self):
"""Parses a SAVEPOINT statement"""
# SAVEPOINT already matched
self._expect(TT.IDENTIFIER)
self._match('UNIQUE')
self._expect_sequence(['ON', 'ROLLBACK', 'RETAIN', 'CURSORS'])
self._match_sequence(['ON', 'ROLLBACK', 'RETAIN', 'LOCKS'])
def _parse_select_statement(self, allowinto=False):
"""Parses a SELECT statement"""
# A top-level select-statement never permits DEFAULTS, although it
# might permit INTO in a procedure
self._parse_query(allowdefault=False, allowinto=allowinto)
# Parse optional SELECT attributes (FOR UPDATE, WITH isolation, etc.)
valid = ['WITH', 'FOR', 'OPTIMIZE']
while valid:
t = self._match_one_of(valid)
if t:
self._newline(-1)
t = t.value
valid.remove(t)
else:
break
if t == 'FOR':
if self._match_one_of(['READ', 'FETCH']):
self._expect('ONLY')
elif self._match('UPDATE'):
if self._match('OF'):
self._parse_ident_list()
else:
self._expected_one_of(['READ', 'FETCH', 'UPDATE'])
elif t == 'OPTIMIZE':
self._expect_sequence(['FOR', TT.NUMBER])
self._expect_one_of(['ROW', 'ROWS'])
elif t == 'WITH':
if self._expect_one_of(['RR', 'RS', 'CS', 'UR']).value in ('RR', 'RS'):
if self._match('USE'):
self._expect_sequence(['AND', 'KEEP'])
self._expect_one_of(['SHARE', 'EXCLUSIVE', 'UPDATE'])
self._expect('LOCKS')
def _parse_set_integrity_statement(self):
"""Parses a SET INTEGRITY statement"""
def parse_access_mode():
if self._match_one_of(['NO', 'READ']):
self._expect('ACCESS')
def parse_cascade_clause():
if self._match('CASCADE'):
if self._expect_one_of(['DEFERRED', 'IMMEDIATE']).value == 'IMMEDIATE':
if self._match('TO'):
if self._match('ALL'):
self._expect('TABLES')
else:
while True:
if self._match('MATERIALIZED'):
self._expect_sequence(['QUERY', 'TABLES'])
elif self._match('FOREIGN'):
self._expect_sequence(['KEY', 'TABLES'])
elif self._match('STAGING'):
self._expect('TABLES')
else:
self._expected_one_of(['MATERIALIZED', 'STAGING', 'FOREIGN'])
if not self._match(','):
break
def parse_check_options():
valid = [
'INCREMENTAL',
'NOT',
'FORCE',
'PRUNE',
'FULL',
'FOR',
]
while valid:
t = self._match_one_of(valid)
if t:
t = t.value
valid.remove(t)
else:
break
if t == 'INCREMENTAL':
valid.remove('NOT')
elif t == (TT.KEYWORD, 'NOT'):
self._expect('INCREMENTAL')
valid.remove('INCREMENTAL')
elif t == 'FORCE':
self._expect('GENERATED')
elif t == 'PRUNE':
pass
elif t == 'FULL':
self._expect('ACCESS')
elif t == 'FOR':
self._expect('EXCEPTION')
while True:
self._expect('IN')
self._parse_table_name()
self._expect('USE')
self._parse_table_name()
if not self._match(','):
break
def parse_integrity_options():
if not self._match('ALL'):
while True:
if self._match('FOREIGN'):
self._expect('KEY')
elif self._match('CHECK'):
pass
elif self._match('DATALINK'):
self._expect_sequence(['RECONCILE', 'PENDING'])
elif self._match('MATERIALIZED'):
self._expect('QUERY')
elif self._match('GENERATED'):
self._expect('COLUMN')
elif self._match('STAGING'):
pass
else:
self._expected_one_of([
'FOREIGN',
'CHECK',
'DATALINK',
'MATERIALIZED',
'GENERATED',
'STAGING',
])
if not self._match(','):
break
# SET INTEGRITY already matched
self._expect('FOR')
# Ambiguity: SET INTEGRITY ... CHECKED and SET INTEGRITY ... UNCHECKED
# have very different syntaxes, but only after initial similarities.
reraise = False
self._save_state()
try:
# Try and parse SET INTEGRITY ... IMMEDIATE CHECKED
while True:
self._parse_table_name()
if self._match(','):
reraise = True
else:
break
if self._match('OFF'):
reraise = True
parse_access_mode()
parse_cascade_clause()
elif self._match('TO'):
reraise = True
self._expect_sequence(['DATALINK', 'RECONCILE', 'PENDING'])
elif self._match('IMMEDIATE'):
reraise = True
self._expect('CHECKED')
parse_check_options()
elif self._match('FULL'):
reraise = True
self._expect('ACCESS')
elif self._match('PRUNE'):
reraise = True
else:
self._expected_one_of(['OFF', 'TO', 'IMMEDIATE', 'FULL', 'PRUNE'])
except ParseError:
# If that fails, parse SET INTEGRITY ... IMMEDIATE UNCHECKED
self._restore_state()
if reraise: raise
while True:
self._parse_table_name()
parse_integrity_options()
if self._match('FULL'):
self._expect('ACCESS')
if not self._match(','):
break
else:
self._forget_state()
def _parse_set_isolation_statement(self):
"""Parses a SET ISOLATION statement"""
# SET [CURRENT] ISOLATION already matched
self._match('=')
self._expect_one_of(['UR', 'CS', 'RR', 'RS', 'RESET'])
def _parse_set_lock_timeout_statement(self):
"""Parses a SET LOCK TIMEOUT statement"""
# SET [CURRENT] LOCK TIMEOUT already matched
self._match('=')
if self._match('WAIT'):
self._match(TT.NUMBER)
elif self._match('NOT'):
self._expect('WAIT')
elif self._match('NULL'):
pass
elif self._match(TT.NUMBER):
pass
else:
self._expected_one_of(['WAIT', 'NOT', 'NULL', TT.NUMBER])
def _parse_set_path_statement(self):
"""Parses a SET PATH statement"""
# SET [CURRENT] PATH already matched
self._match('=')
while True:
if self._match_sequence([(TT.REGISTER, 'SYSTEM'), (TT.REGISTER, 'PATH')]):
pass
elif self._match((TT.REGISTER, 'USER')):
pass
elif self._match((TT.REGISTER, 'CURRENT')):
self._match((TT.REGISTER, 'PACKAGE'))
self._expect((TT.REGISTER, 'PATH'))
elif self._match((TT.REGISTER, 'CURRENT_PATH')):
pass
else:
self._expect_one_of([TT.IDENTIFIER, TT.STRING])
if not self._match(','):
break
def _parse_set_schema_statement(self):
"""Parses a SET SCHEMA statement"""
# SET [CURRENT] SCHEMA already matched
self._match('=')
t = self._expect_one_of([
(TT.REGISTER, 'USER'),
(TT.REGISTER, 'SESSION_USER'),
(TT.REGISTER, 'SYSTEM_USER'),
(TT.REGISTER, 'CURRENT_USER'),
TT.IDENTIFIER,
TT.STRING,
])
if t.type in (TT.IDENTIFIER, TT.STRING):
self.current_schema = t.value
def _parse_set_session_auth_statement(self):
"""Parses a SET SESSION AUTHORIZATION statement"""
# SET SESSION AUTHORIZATION already matched
self._match('=')
self._expect_one_of([
(TT.REGISTER, 'USER'),
(TT.REGISTER, 'SYSTEM_USER'),
(TT.REGISTER, 'CURRENT_USER'),
TT.IDENTIFIER,
TT.STRING,
])
self._match_sequence(['ALLOW', 'ADMINISTRATION'])
def _parse_set_statement(self):
"""Parses a SET statement in a dynamic compound statement"""
# SET already matched
if self._match('CURRENT'):
if self._match_sequence(['DECFLOAT', 'ROUNDING', 'MODE']):
self._match('=')
self._expect_one_of([
'ROUND_CEILING',
'ROUND_FLOOR',
'ROUND_DOWN',
'ROUND_HALF_EVEN',
'ROUND_HALF_UP',
TT.STRING,
])
if self._match('DEGREE'):
self._match('=')
self._expect(TT.STRING)
elif self._match('EXPLAIN'):
if self._match('MODE'):
self._match('=')
if self._match_one_of(['EVALUATE', 'RECOMMEND']):
self._expect_one_of(['INDEXES', 'PARTITIONINGS'])
elif self._match_one_of(['NO', 'YES', 'REOPT', 'EXPLAIN']):
pass
else:
self._expected_one_of([
'NO',
'YES',
'REOPT',
'EXPLAIN',
'EVALUATE',
'RECOMMEND',
])
elif self._match('SNAPSHOT'):
self._expect_one_of(['NO', 'YES', 'EXPLAIN', 'REOPT'])
else:
self._expected_one_of(['MODE', 'SNAPSHOT'])
elif self._match_sequence(['FEDERATED', 'ASYNCHRONY']):
self._match('=')
self._expect_one_of(['ANY', TT.NUMBER])
elif self._match_sequence(['IMPLICIT', 'XMLPARSE', 'OPTION']):
self._match('=')
self._expect(TT.STRING)
elif self._match('ISOLATION'):
self._parse_set_isolation_statement()
elif self._match_sequence(['LOCK', 'TIMEOUT']):
self._parse_set_lock_timeout_statement()
elif self._match('MAINTAINED'):
self._match('TABLE')
self._expect('TYPES')
self._match_sequence(['FOR', 'OPTIMIZATION'])
self._match('=')
while True:
if self._match_one_of(['ALL', 'NONE']):
break
elif self._match_one_of(['FEDERATED_TOOL', 'USER', 'SYSTEM']):
pass
elif self._match('CURRENT'):
self._expect('MAINTAINED')
self._match('TABLE')
self._expect('TYPES')
self._match_sequence(['FOR', 'OPTIMIZATION'])
if not self._match(','):
break
elif self._match_sequence(['MDC', 'ROLLOUT', 'MODE']):
self._expect_one_of(['NONE', 'IMMEDATE', 'DEFERRED'])
elif self._match_sequence(['OPTIMIZATION', 'PROFILE']):
self._match('=')
if not self._match(TT.STRING) and not self._match('NULL'):
self._parse_subschema_name()
elif self._match_sequence(['QUERY', 'OPTIMIZATION']):
self._match('=')
self._expect(TT.NUMBER)
elif self._match_sequence(['REFRESH', 'AGE']):
self._match('=')
self._expect_one_of(['ANY', TT.NUMBER])
elif self._match('PATH'):
self._parse_set_path_statement()
elif self._match('SCHEMA'):
self._parse_set_schema_statement()
else:
self._expected_one_of([
'DEGREE',
'EXPLAIN',
'ISOLATION',
'LOCK',
'MAINTAINED',
'QUERY',
'REFRESH',
'PATH',
'SCHEMA',
])
elif self._match_sequence(['COMPILATION', 'ENVIRONMENT']):
self._match('=')
self._expect(TT.IDENTIFIER)
elif self._match('ISOLATION'):
self._parse_set_isolation_statement()
elif self._match_sequence(['LOCK', 'TIMEOUT']):
self._parse_set_lock_timeout_statement()
elif self._match_sequence(['ENCRYPTION', 'PASSWORD']):
self._match('=')
self._expect(TT.STRING)
elif self._match_sequence(['EVENT', 'MONITOR']):
self._expect(TT.IDENTIFIER)
self._expect('STATE')
self._match('=')
self._expect(TT.NUMBER)
elif self._match('PASSTHRU'):
self._expect_one_of(['RESET', TT.IDENTIFIER])
elif self._match('PATH'):
self._parse_set_path_statement()
elif self._match('ROLE'):
self._match('=')
self._expect(TT.IDENTIFIER)
elif self._match('CURRENT_PATH'):
self._parse_set_path_statement()
elif self._match('SCHEMA'):
self._parse_set_schema_statement()
elif self._match_sequence(['SERVER', 'OPTION']):
self._expect_sequence([TT.IDENTIFIER, 'TO', TT.STRING, 'FOR', 'SERVER', TT.IDENTIFIER])
elif self._match_sequence(['SESSION', 'AUTHORIZATION']):
self._parse_set_session_auth_statement()
elif self._match('SESSION_USER'):
self._parse_set_session_auth_statement()
else:
self._parse_assignment_clause(allowdefault=True)
def _parse_signal_statement(self):
"""Parses a SIGNAL statement in a dynamic compound statement"""
# SIGNAL already matched
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect_one_of([TT.IDENTIFIER, TT.STRING])
else:
self._expect(TT.IDENTIFIER)
if self._match('SET'):
self._expect_sequence(['MESSAGE_TEXT', '='])
self._parse_expression()
elif self._match('('):
# XXX Ensure syntax only valid within a trigger
self._parse_expression()
self._expect(')')
def _parse_transfer_ownership_statement(self):
"""Parses a TRANSFER OWNERSHIP statement"""
# TRANSFER OWNERSHIP already matched
self._expect('OF')
if self._match_one_of(['ALIAS', 'TABLE', 'VIEW', 'NICKNAME', 'VARIABLE']):
self._parse_subschema_name()
elif self._match_sequence(['FUNCTION', 'MAPPING']):
self._parse_function_name()
elif self._match_one_of(['FUNCTION', 'PROCEDURE']):
self._parse_routine_name()
if self._match('('):
self._parse_datatype_list()
self._expect(')')
elif self._match('SPECIFIC'):
self._expect_one_of(['FUNCTION', 'PROCEDURE'])
self._parse_routine_name()
elif self._match('INDEX'):
self._parse_index_name()
elif self._match('SEQUENCE'):
self._parse_sequence_name()
elif self._match('DISTINCT'):
self._expect('TYPE')
self._parse_type_name()
elif self._match_sequence(['TYPE', 'MAPPING']):
self._parse_type_name()
elif self._match('TYPE'):
self._parse_type_name()
elif (self._match_sequence(['EVENT', 'MONITOR']) or
self._match('NODEGROUP') or
self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']) or
self._match('SCHEMA') or
self._match('TABLESPACE') or
self._match('TRIGGER')):
self._expect(TT.IDENTIFIER)
else:
self._expected_one_of([
'ALIAS',
'DATABASE',
'DISTINCT',
'EVENT',
'FUNCTION',
'INDEX',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'SCHEMA',
'SEQUENCE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'TRIGGER',
'TYPE',
'VARIABLE',
'VIEW',
])
if self._match('USER'):
self._expect(TT.IDENTIFIER)
else:
self._expect_one_of([
(TT.REGISTER, 'USER'),
(TT.REGISTER, 'SESSION_USER'),
(TT.REGISTER, 'SYSTEM_USER'),
])
self._expect_sequence(['PERSERVE', 'PRIVILEGES'])
def _parse_truncate_statement(self):
"""Parses a TRUNCATE statement"""
# TRUNCATE already matched
self._match('TABLE')
self._parse_table_name()
if self._match_one_of(['DROP', 'REUSE']):
self._expect('STORAGE')
if self._match('IGNORE') or self._match_sequence(['RESTRICT', 'WHEN']):
self._expect_sequence(['DELETE', 'TRIGGERS'])
self._match_sequence(['CONTINUE', 'IDENTITY'])
self._expect('IMMEDIATE')
def _parse_update_statement(self):
"""Parses an UPDATE statement"""
# UPDATE already matched
if self._match('('):
self._indent()
self._parse_full_select()
self._outdent()
self._expect(')')
else:
self._parse_subschema_name()
# Ambiguity: INCLUDE is an identifier and hence can look like a table
# correlation name
reraise = False
self._save_state()
try:
# Try and parse a mandatory table correlation followed by a
# mandatory INCLUDE
self._parse_table_correlation(optional=False)
self._newline()
self._expect('INCLUDE')
reraise = True
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
except ParseError:
# If that fails, rewind and parse an optional INCLUDE or an
# optional table correlation
self._restore_state()
if reraise: raise
if self._match('INCLUDE'):
self._newline(-1)
self._expect('(')
self._indent()
self._parse_ident_type_list(newlines=True)
self._outdent()
self._expect(')')
else:
self._parse_table_correlation()
else:
self._forget_state()
# Parse mandatory assignment clause allow DEFAULT values
self._expect('SET')
self._indent()
self._parse_assignment_clause(allowdefault=True)
self._outdent()
if self._match('WHERE'):
self._indent()
self._parse_search_condition()
self._outdent()
if self._match('WITH'):
self._expect_one_of(['RR', 'RS', 'CS', 'UR'])
def _parse_while_statement(self, label=None):
"""Parses a WHILE-loop in a dynamic compound statement"""
# WHILE already matched
self._parse_search_condition(newlines=False)
self._newline()
self._expect('DO')
self._indent()
while True:
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
if self._match('END'):
self._outdent(-1)
break
else:
self._newline()
self._expect('WHILE')
if label:
self._match((TT.IDENTIFIER, label))
# COMPOUND STATEMENTS ####################################################
def _parse_compiled_statement(self):
"""Parses a procedure statement within a procedure body"""
# XXX Should PREPARE be supported here?
try:
label = self._expect(TT.LABEL).value
self._newline()
except ParseError:
label = None
# Procedure specific statements
if self._match('ALLOCATE'):
self._parse_allocate_cursor_statement()
elif self._match('ASSOCIATE'):
self._parse_associate_locators_statement()
elif self._match('BEGIN'):
self._parse_compiled_compound_statement(label=label)
elif self._match('CASE'):
self._parse_case_statement()
elif self._match('CLOSE'):
self._parse_close_statement()
elif self._match_sequence(['EXECUTE', 'IMMEDIATE']):
self._parse_execute_immediate_statement()
elif self._match('EXECUTE'):
self._parse_execute_statement()
elif self._match('FETCH'):
self._parse_fetch_statement()
elif self._match('GOTO'):
self._parse_goto_statement()
elif self._match('LOOP'):
self._parse_loop_statement(label=label)
elif self._match('PREPARE'):
self._parse_prepare_statement()
elif self._match('OPEN'):
self._parse_open_statement()
elif self._match('REPEAT'):
self._parse_repeat_statement(label=label)
# Dynamic compound specific statements
elif self._match('FOR'):
self._parse_for_statement(label=label)
elif self._match_sequence(['GET', 'DIAGNOSTICS']):
self._parse_get_diagnostics_statement()
elif self._match('IF'):
self._parse_if_statement()
elif self._match('ITERATE'):
self._parse_iterate_statement()
elif self._match('LEAVE'):
self._parse_leave_statement()
elif self._match('RETURN'):
self._parse_return_statement()
elif self._match('SET'):
self._parse_set_statement()
elif self._match('SIGNAL'):
self._parse_signal_statement()
elif self._match('WHILE'):
self._parse_while_statement(label=label)
# Generic SQL statements
elif self._match('AUDIT'):
self._parse_audit_statement()
elif self._match('CALL'):
self._parse_call_statement()
elif self._match_sequence(['COMMENT', 'ON']):
self._parse_comment_statement()
elif self._match('COMMIT'):
self._parse_commit_statement()
elif self._match('CREATE'):
self._match_sequence(['OR', 'REPLACE'])
if self._match('TABLE'):
self._parse_create_table_statement()
elif self._match('VIEW'):
self._parse_create_view_statement()
elif self._match('UNIQUE'):
self._expect('INDEX')
self._parse_create_index_statement()
elif self._match('INDEX'):
self._parse_create_index_statement()
else:
self._expected_one_of(['TABLE', 'VIEW', 'INDEX', 'UNIQUE'])
elif self._match_sequence(['DECLARE', 'GLOBAL', 'TEMPORARY', 'TABLE']):
self._parse_declare_global_temporary_table_statement()
elif self._match('DELETE'):
self._parse_delete_statement()
elif self._match('DROP'):
# XXX Limit this to tables, views and indexes somehow?
self._parse_drop_statement()
elif self._match('EXPLAIN'):
self._parse_explain_statement()
elif self._match_sequence(['FLUSH', 'OPTIMIZATION', 'PROFILE', 'CACHE']):
self._parse_flush_optimization_profile_cache_statement()
elif self._match_sequence(['FREE', 'LOCATOR']):
self._parse_free_locator_statement()
elif self._match('GRANT'):
self._parse_grant_statement()
elif self._match('INSERT'):
self._parse_insert_statement()
elif self._match_sequence(['LOCK', 'TABLE']):
self._parse_lock_table_statement()
elif self._match('MERGE'):
self._parse_merge_statement()
elif self._match('RELEASE'):
self._match('TO')
self._expect('SAVEPOINT')
self._parse_release_savepoint_statement()
elif self._match('RESIGNAL'):
self._parse_resignal_statement()
elif self._match('ROLLBACK'):
self._parse_rollback_statement()
elif self._match('SAVEPOINT'):
self._parse_savepoint_statement()
elif self._match_sequence(['TRANSFER', 'OWNERSHIP']):
self._parse_transfer_ownership_statement()
elif self._match('TRUNCATE'):
self._parse_truncate_statement()
elif self._match('UPDATE'):
self._parse_update_statement()
else:
self._parse_select_statement(allowinto=True)
def _parse_compiled_compound_statement(self, label=None):
"""Parses a procedure compound statement (body)"""
# BEGIN already matched
if self._match('NOT'):
self._expect('ATOMIC')
else:
self._match('ATOMIC')
self._indent()
# Ambiguity: there's several statements beginning with DECLARE that can
# occur mixed together or in a specific order here, so we use saved
# states to test for each consecutive block of DECLAREs
# Try and parse DECLARE variable|condition|return-code
while True:
reraise = False
self._save_state()
try:
self._expect('DECLARE')
if self._match('SQLSTATE'):
reraise = True
self._expect_one_of(['CHAR', 'CHARACTER'])
self._expect_sequence(['(', (TT.NUMBER, 5), ')'], prespace=False)
self._match_sequence(['DEFAULT', TT.STRING])
elif self._match('SQLCODE'):
reraise = True
self._expect_one_of(['INT', 'INTEGER'])
self._match_sequence(['DEFAULT', TT.NUMBER])
else:
count = len(self._parse_ident_list())
if count == 1 and self._match('CONDITION'):
reraise = True
self._expect('FOR')
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect(TT.STRING)
else:
self._parse_datatype()
if self._match('DEFAULT'):
reraise = True
self._parse_expression()
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Try and parse DECLARE statement
while True:
reraise = False
self._save_state()
try:
self._expect('DECLARE')
self._parse_ident_list()
self._expect('STATEMENT')
reraise = True
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Try and parse DECLARE CURSOR
while True:
reraise = False
self._save_state()
try:
self._expect_sequence(['DECLARE', TT.IDENTIFIER, 'CURSOR'])
reraise = True
if self._match('WITH'):
if self._match('RETURN'):
self._expect('TO')
self._expect_one_of(['CALLER', 'CLIENT'])
else:
self._expect('HOLD')
if self._match('WITH'):
self._expect_sequence(['RETURN', 'TO'])
self._expect_one_of(['CALLER', 'CLIENT'])
self._expect('FOR')
# Ambiguity: statement name could be reserved word
self._save_state()
try:
# Try and parse a SELECT statement
# XXX Is SELECT INTO permitted in a DECLARE CURSOR?
self._parse_select_statement()
except ParseError:
# If that fails, rewind and parse a simple statement name
self._restore_state()
self._expect(TT.IDENTIFIER)
else:
self._forget_state()
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Try and parse DECLARE HANDLER
while True:
reraise = False
self._save_state()
try:
self._expect('DECLARE')
self._expect_one_of(['CONTINUE', 'UNDO', 'EXIT'])
self._expect('HANDLER')
reraise = True
self._expect('FOR')
self._save_state()
try:
while True:
if self._match('NOT'):
self._expect('FOUND')
else:
self._expect_one_of(['NOT', 'SQLEXCEPTION', 'SQLWARNING'])
if not self._match(','):
break
except ParseError:
self._restore_state()
while True:
if self._match('SQLSTATE'):
self._match('VALUE')
self._expect(TT.STRING)
else:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
else:
self._forget_state()
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
except ParseError:
self._restore_state()
if reraise: raise
break
else:
self._forget_state()
# Parse procedure statements
while not self._match('END'):
self._parse_compiled_statement()
self._expect((TT.TERMINATOR, ';'))
self._newline()
self._outdent(-1)
if label:
self._match((TT.IDENTIFIER, label))
def _parse_statement(self):
"""Parses a top-level statement in an SQL script"""
# XXX CREATE EVENT MONITOR
# If we're reformatting WHITESPACE, add a blank WHITESPACE token to the
# output - this will suppress leading whitespace in front of the first
# word of the statement
self._output.append(Token(TT.WHITESPACE, None, '', 0, 0))
if self._match('ALTER'):
if self._match('TABLE'):
self._parse_alter_table_statement()
elif self._match('SEQUENCE'):
self._parse_alter_sequence_statement()
elif self._match('FUNCTION'):
self._parse_alter_function_statement(specific=False)
elif self._match('PROCEDURE'):
self._parse_alter_procedure_statement(specific=False)
elif self._match('SPECIFIC'):
if self._match('FUNCTION'):
self._parse_alter_function_statement(specific=True)
elif self._match('PROCEDURE'):
self._parse_alter_procedure_statement(specific=True)
else:
self._expected_one_of(['FUNCTION', 'PROCEDURE'])
elif self._match('NICKNAME'):
self._parse_alter_nickname_statement()
elif self._match('TABLESPACE'):
self._parse_alter_tablespace_statement()
elif self._match('BUFFERPOOL'):
self._parse_alter_bufferpool_statement()
elif self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']):
self._parse_alter_partition_group_statement()
elif self._match('DATABASE'):
self._parse_alter_database_statement()
elif self._match('NODEGROUP'):
self._parse_alter_partition_group_statement()
elif self._match('SERVER'):
self._parse_alter_server()
elif self._match_sequence(['HISTOGRAM', 'TEMPLATE']):
self._parse_alter_histogram_template_statement()
elif self._match_sequence(['AUDIT', 'POLICY']):
self._parse_alter_audit_policy_statement()
elif self._match_sequence(['SECURITY', 'LABEL', 'COMPONENT']):
self._parse_alter_security_label_component_statement()
elif self._match_sequence(['SECURITY', 'POLICY']):
self._parse_alter_security_policy_statement()
elif self._match_sequence(['SERVICE', 'CLASS']):
self._parse_alter_service_class_statement()
elif self._match('THRESHOLD'):
self._parse_alter_threshold_statement()
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._parse_alter_trusted_context_statement()
elif self._match_sequence(['USER', 'MAPPING']):
self._parse_alter_user_mapping_statement()
elif self._match('VIEW'):
self._parse_alter_view_statement()
elif self._match_sequence(['WORK', 'ACTION', 'SET']):
self._parse_alter_work_action_set_statement()
elif self._match_sequence(['WORK', 'CLASS', 'SET']):
self._parse_alter_work_class_set_statement()
elif self._match('WORKLOAD'):
self._parse_alter_workload_statement()
elif self._match('WRAPPER'):
self._parse_alter_wrapper_statement()
elif self._match('MODULE'):
self._parse_alter_module_statement()
else:
self._expected_one_of([
'AUDIT',
'BUFFERPOOL',
'DATABASE',
'FUNCTION',
'HISTOGRAM',
'MODULE',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'SECURITY',
'SEQUENCE',
'SERVER',
'SERVICE',
'SPECIFIC',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRUSTED',
'USER',
'VIEW',
'WORK',
'WORKLOAD',
'WRAPPER',
])
elif self._match('AUDIT'):
self._parse_audit_statement()
elif self._match('BEGIN'):
self._parse_compiled_compound_statement()
elif self._match('CALL'):
self._parse_call_statement()
elif self._match_sequence(['COMMENT', 'ON']):
self._parse_comment_statement()
elif self._match('COMMIT'):
self._parse_commit_statement()
elif self._match('CREATE'):
self._match_sequence(['OR', 'REPLACE'])
if self._match('TABLE'):
self._parse_create_table_statement()
elif self._match('VIEW'):
self._parse_create_view_statement()
elif self._match('ALIAS'):
self._parse_create_alias_statement()
elif self._match_sequence(['UNIQUE', 'INDEX']):
self._parse_create_index_statement(unique=True)
elif self._match('INDEX'):
self._parse_create_index_statement(unique=False)
elif self._match('DISTINCT'):
self._expect('TYPE')
self._parse_create_type_statement()
elif self._match('SEQUENCE'):
self._parse_create_sequence_statement()
elif self._match_sequence(['FUNCTION', 'MAPPING']):
self._parse_create_function_mapping_statement()
elif self._match('FUNCTION'):
self._parse_create_function_statement()
elif self._match('PROCEDURE'):
self._parse_create_procedure_statement()
elif self._match('TABLESPACE'):
self._parse_create_tablespace_statement()
elif self._match('BUFFERPOOL'):
self._parse_create_bufferpool_statement()
elif self._match_sequence(['DATABASE', 'PARTITION', 'GROUP']):
self._parse_create_database_partition_group_statement()
elif self._match('NODEGROUP'):
self._parse_create_database_partition_group_statement()
elif self._match('TRIGGER'):
self._parse_create_trigger_statement()
elif self._match('SCHEMA'):
self._parse_create_schema_statement()
elif self._match_sequence(['AUDIT', 'POLICY']):
self._parse_create_audit_policy_statement()
elif self._match_sequence(['EVENT', 'MONITOR']):
self._parse_create_event_monitor_statement()
elif self._match_sequence(['HISTOGRAM', 'TEMPLATE']):
self._parse_create_histogram_template_statement()
elif self._match('NICKNAME'):
self._parse_create_nickname_statement()
elif self._match('ROLE'):
self._parse_create_role_statement()
elif self._match_sequence(['SECURITY', 'LABEL', 'COMPONENT']):
self._parse_create_security_label_component_statement()
elif self._match_sequence(['SECURITY', 'LABEL']):
self._parse_create_security_label_statement()
elif self._match_sequence(['SECURITY', 'POLICY']):
self._parse_create_security_policy_statement()
elif self._match_sequence(['SERVICE', 'CLASS']):
self._parse_create_service_class_statement()
elif self._match('SERVER'):
self._parse_create_server_statement()
elif self._match('THRESHOLD'):
self._parse_create_threshold_statement()
elif self._match_sequence(['TRUSTED', 'CONTEXT']):
self._parse_create_trusted_context_statement()
elif self._match_sequence(['TYPE', 'MAPPING']):
self._parse_create_type_mapping_statement()
elif self._match('TYPE'):
self._parse_create_type_statement()
elif self._match_sequence(['USER', 'MAPPING']):
self._parse_create_user_mapping_statement()
elif self._match('VARIABLE'):
self._parse_create_variable_statement()
elif self._match_sequence(['WORK', 'ACTION', 'SET']):
self._parse_create_work_action_set_statement()
elif self._match_sequence(['WORK', 'CLASS', 'SET']):
self._parse_create_work_class_set_statement()
elif self._match('WORKLOAD'):
self._parse_create_workload_statement()
elif self._match('WRAPPER'):
self._parse_create_wrapper_statement()
elif self._match('MODULE'):
self._parse_create_module_statement()
else:
tbspacetype = self._match_one_of([
'REGULAR',
'LONG',
'LARGE',
'TEMPORARY',
'USER',
'SYSTEM',
])
if tbspacetype:
tbspacetype = tbspacetype.value
if tbspacetype in ('USER', 'SYSTEM'):
self._expect('TEMPORARY')
elif tbspacetype == 'TEMPORARY':
tbspacetype = 'SYSTEM'
elif tbspacetype == 'LONG':
tbspacetype = 'LARGE'
self._expect('TABLESPACE')
self._parse_create_tablespace_statement(tbspacetype)
else:
self._expected_one_of([
'ALIAS',
'AUDIT',
'BUFFERPOOL',
'DATABASE',
'DISTINCT',
'EVENT',
'FUNCTION',
'INDEX',
'MODULE',
'NICKNAME',
'NODEGROUP',
'PROCEDURE',
'ROLE',
'SECURITY',
'SEQUENCE',
'SERVER',
'SERVICE',
'TABLE',
'TABLESPACE',
'THRESHOLD',
'TRIGGER',
'TRUSTED',
'TYPE',
'UNIQUE',
'USER',
'VARIABLE',
'VIEW',
'WORK',
'WORKLOAD',
'WRAPPER',
])
elif self._match('DELETE'):
self._parse_delete_statement()
elif self._match('DROP'):
self._parse_drop_statement()
elif self._match_sequence(['DECLARE', 'GLOBAL', 'TEMPORARY', 'TABLE']):
self._parse_declare_global_temporary_table_statement()
elif self._match('DECLARE'):
self._parse_declare_cursor_statement()
elif self._match('EXPLAIN'):
self._parse_explain_statement()
elif self._match_sequence(['FLUSH', 'OPTIMIZATION', 'PROFILE', 'CACHE']):
self._parse_flush_optimization_profile_cache_statement()
elif self._match_sequence(['FREE', 'LOCATOR']):
self._parse_free_locator_statement()
elif self._match('GRANT'):
self._parse_grant_statement()
elif self._match('INSERT'):
self._parse_insert_statement()
elif self._match_sequence(['LOCK', 'TABLE']):
self._parse_lock_table_statement()
elif self._match('MERGE'):
self._parse_merge_statement()
elif self._match_sequence(['REFRESH', 'TABLE']):
self._parse_refresh_table_statement()
elif self._match('RELEASE'):
self._match('TO')
self._expect('SAVEPOINT')
self._parse_release_savepoint_statement()
elif self._match_sequence(['RENAME', 'TABLESPACE']):
self._parse_rename_tablespace_statement()
elif self._match('RENAME'):
self._parse_rename_statement()
elif self._match('REVOKE'):
self._parse_revoke_statement()
elif self._match('ROLLBACK'):
self._parse_rollback_statement()
elif self._match('SAVEPOINT'):
self._parse_savepoint_statement()
elif self._match_sequence(['SET', 'INTEGRITY']):
self._parse_set_integrity_statement()
elif self._match('SET'):
self._parse_set_statement()
elif self._match_sequence(['TRANSFER', 'OWNERSHIP']):
self._parse_transfer_ownership_statement()
elif self._match('TRUNCATE'):
self._parse_truncate_statement()
elif self._match('UPDATE'):
self._parse_update_statement()
else:
self._parse_select_statement()
def parse_routine_prototype(self, tokens):
"""Parses a routine prototype"""
# It's a bit of hack sticking this here. This method doesn't really
# belong here and should probably be in a sub-class (it's only used
# for syntax highlighting function prototypes in the documentation
# system)
self._parse_init(tokens)
# Skip leading whitespace
if self._token().type in (TT.COMMENT, TT.WHITESPACE):
self._index += 1
self._parse_function_name()
# Parenthesized parameter list is mandatory
self._expect('(', prespace=False)
if not self._match(')'):
while True:
self._match_one_of(['IN', 'OUT', 'INOUT'])
self._save_state()
try:
self._expect(TT.IDENTIFIER)
self._parse_datatype()
except ParseError:
self._restore_state()
self._parse_datatype()
else:
self._forget_state()
if not self._match(','):
break
self._expect(')')
# Parse the return type
if self._match('RETURNS'):
if self._match_one_of(['ROW', 'TABLE']):
self._expect('(')
self._parse_ident_type_list()
self._expect(')')
else:
self._parse_datatype()
self._parse_finish()
return self._output
Connection = namedtuple('Connection', ('instance', 'database', 'username', 'password'))
class DB2ZOSScriptParser(DB2ZOSParser):
"""Parser which handles the DB2 UDB CLP dialect.
This class inherits from the DB2 SQL language parser and as such is capable
of parsing all the statements that the parent class is capable of. In
addition, it adds the ability to parse the non-SQL CLP commands (like
IMPORT, EXPORT, LOAD, CREATE DATABASE, etc).
"""
def __init__(self):
super(DB2ZOSScriptParser, self).__init__()
self.connections = []
self.produces = []
self.consumes = []
self.current_user = None
self.current_instance = None
self.current_connection = None
def _match_clp_string(self, password=False):
"""Attempts to match the current tokens as a CLP-style string.
The _match_clp_string() method is used to match a CLP-style string.
The "real" CLP has a fundamentally different style of parser to the
DB2 SQL parser, and includes several behaviours that are difficult
to replicate in this parser (which was primarily targetted at the
DB2 SQL dialect). One of these is the CLP's habit of treating an
unquoted run of non-whitespace tokens as a string, or allowing a
quoted identifier to be treated as a string.
When this method is called it will return a STRING token consisting
of the content of the aforementioned tokens (or None if a CLP-style
string is not found in the source at the current position).
"""
token = self._token()
if token.type == TT.STRING:
# STRINGs are treated verbatim
self._index += 1
elif token.type == TT.IDENTIFIER and token.source[0] == '"':
# Double quoted identifier are converted to STRING tokens
token = Token(TT.STRING, token.value, quote_str(token.value, "'"), token.line, token.column)
self._index += 1
elif not token.type in (TT.TERMINATOR, TT.EOF):
# Otherwise, any run of non-whitepace tokens is converted to a
# single STRING token
start = self._index
self._index += 1
while True:
token = self._token()
if token.type == TT.STRING:
raise ParseError(self._tokens, token, "Quotes (') not permitted in identifier")
if token.type == TT.IDENTIFIER and token.source[0] == '"':
raise ParseError(self._tokens, token, 'Quotes (") not permitted in identifier')
if token.type in (TT.WHITESPACE, TT.COMMENT, TT.TERMINATOR, TT.EOF):
break
self._index += 1
content = ''.join([token.source for token in self._tokens[start:self._index]])
token = Token(TT.STRING, content, quote_str(content, "'"), self._tokens[start].line, self._tokens[start].column)
else:
token = None
if token:
if not (self._output and self._output[-1].type in (TT.INDENT, TT.WHITESPACE)):
self._output.append(Token(TT.WHITESPACE, None, ' ', 0, 0))
if password:
token = Token(TT.PASSWORD, token.value, token.source, token.line, token.column)
self._output.append(token)
# Skip WHITESPACE and COMMENTS
while self._token().type in (TT.COMMENT, TT.WHITESPACE):
if self._token().type == TT.COMMENT or TT.WHITESPACE not in self.reformat:
self._output.append(self._token())
self._index += 1
return token
def _expect_clp_string(self, password=False):
"""Matches the current tokens as a CLP-style string, or raises an error.
See _match_clp_string() above for details of the algorithm.
"""
result = self._match_clp_string(password)
if not result:
raise ParseExpectedOneOfError(self._tokens, self._token(), [TT.PASSWORD if password else TT.STRING])
return result
# PATTERNS ###############################################################
def _parse_clp_string_list(self):
"""Parses a comma separated list of strings.
This is a common pattern in CLP, for example within the LOBS TO clause of
the EXPORT command. The method returns the list of strings found.
"""
result = []
while True:
result.append(self._expect_clp_string().value)
if not self._match(','):
break
return result
def _parse_number_list(self):
"""Parses a comma separated list of number.
This is a common pattern in CLP, for example within the METHOD clause of
the IMPORT or LOAD commands. The method returns the list of numbers
found.
"""
result = []
while True:
result.append(self._expect(TT.NUMBER).value)
if not self._match(','):
break
return result
def _parse_login(self, optional=True, allowchange=False):
"""Parses a set of login credentials"""
username = None
password = None
if self._match('USER'):
username = self._expect_clp_string().value
if self._match('USING'):
password = self._expect_clp_string(password=True).value
if allowchange:
if self._match('NEW'):
password = self._expect_clp_string(password=True).value
self._expect('CONFIRM')
self._expect_clp_string(password=True)
else:
self._match_sequence(['CHANGE', 'PASSWORD'])
elif not optional:
self._expected('USER')
return (username, password)
# COMMANDS ###############################################################
def _parse_activate_database_command(self):
"""Parses an ACTIVATE DATABASE command"""
# ACTIVATE [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_add_contact_command(self):
"""Parses an ADD CONTACT command"""
# ADD CONTACT already matched
self._expect_clp_string()
self._expect('TYPE')
if self._expect_one_of(['EMAIL', 'PAGE']).value == 'PAGE':
if self._match_sequence(['MAXIMUM', 'PAGE', 'LENGTH']) or self._match_sequence(['MAX', 'LEN']):
self._expect(TT.NUMBER)
self._expect('ADDRESS')
self._expect_clp_string()
if self._match('DESCRIPTION'):
self._expect_clp_string()
def _parse_add_contactgroup_command(self):
"""Parses an ADD CONTACTGROUP command"""
# ADD CONTACTGROUP already matched
self._expect_clp_string()
while True:
self._expect_one_of(['CONTACT', 'GROUP'])
self._expect_clp_string()
if not self_match(','):
break
if self._match('DESCRIPTION'):
self._expect_clp_string()
def _parse_add_dbpartitionnum_command(self):
"""Parses an ADD DBPARTITIONNUM command"""
# ADD DBPARTITIONNUM already matched
if self._match('LIKE'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
elif self._match('WITHOUT'):
self._expect('TABLESPACES')
def _parse_add_xmlschema_document_command(self):
"""Parses an ADD XMLSCHEMA DOCUMENT command"""
# ADD XMLSCHEMA DOCUMENT already matched
self._expect('TO')
self._parse_subschema_name()
self._expect('ADD')
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
if self._match('COMPLETE'):
if self._match('WITH'):
self._expect_clp_string()
self._match_sequence(['ENABLE', 'DECOMPOSITION'])
def _parse_archive_log_command(self):
"""Parses an ARCHIVE LOG command"""
# ARCHIVE LOG already matched
self._expect('FOR')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
if self._match('USER'):
self._expect_clp_string()
if self._match('USING'):
self._expect_clp_string()
self._parse_db_partitions_clause()
def _parse_attach_command(self):
"""Parses an ATTACH command"""
# ATTACH already matched
if self._match('TO'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=True)
def _parse_autoconfigure_command(self):
"""Parses an AUTOCONFIGURE command"""
# AUTOCONFIGURE already matched
if self._match('USING'):
while True:
self._expect(TT.IDENTIFIER)
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
if self._match('APPLY'):
break
else:
self._expect('APPLY')
if self._match('DB'):
if self._match('AND'):
self._expect('DBM')
else:
self._expect('ONLY')
elif self._match('NONE'):
pass
else:
self._expected_one_of(['DB', 'NONE'])
self._match_sequence(['ON', 'CURRENT', 'NODE'])
def _parse_backup_command(self):
"""Parses a BACKUP DB command"""
# BACKUP [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
self._parse_db_partitions_clause()
if self._match('TABLESPACE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._match('ONLINE')
if self._match('INCREMENTAL'):
self._match('DELTA')
if self._match('USE'):
if self._match('SNAPSHOT'):
if self._match('LIBRARY'):
self._expect_clp_string()
elif self._match_one_of(['TSM', 'XBSA']):
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
elif self._match('TO'):
self._parse_clp_string_list()
elif self._match('LOAD'):
self._expect_clp_string()
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
self._match('DEDUP_DEVICE')
if self._match('WITH'):
self._expect(TT.NUMBER)
self._expect('BUFFERS')
if self._match('BUFFER'):
self._expect(TT.NUMBER)
if self._match('PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('COMPRESS'):
if self._match('COMPRLIB'):
self._expect_clp_string()
self._match('EXCLUDE')
if self._match('COMPROPTS'):
self._expect_clp_string()
if self._match('UTIL_IMPACT_PRIORITY'):
self._match(TT.NUMBER)
if self._match_one_of(['EXCLUDE', 'INCLUDE']):
self._expect('LOGS')
if self._match('WITHOUT'):
self._expect('PROMPTING')
# XXX Add support for BIND command
def _parse_catalog_command(self):
"""Parses a CATALOG command"""
# CATALOG already matched
if self._match_one_of(['USER', 'SYSTEM']):
self._expect('ODBC')
if self._match_sequence(['DATA', 'SOURCE']):
self._expect_clp_string()
else:
self._expect_sequence(['ALL', 'DATA', 'SOURCES'])
elif self._match('ODBC'):
if self._match_sequence(['DATA', 'SOURCE']):
self._expect_clp_string()
else:
self._expect_sequence(['ALL', 'DATA', 'SOURCES'])
elif self._match_one_of(['DATABASE', 'DB']):
self._expect_clp_string()
if self._match('AS'):
self._expect_clp_string()
if self._match('ON'):
self._expect_clp_string()
elif self._match_sequence(['AT', 'NODE']):
self._expect_clp_string()
if self._match('AUTHENTICATION'):
if self._match_sequence(['KERBEROS', 'TARGET', 'PRINCIPAL']):
self._expect_clp_string()
else:
self._expect_one_of([
'SERVER',
'CLIENT',
'SERVER_ENCRYPT',
'SERVER_ENCRYPT_AES',
'KERBEROS',
'DATA_ENCRYPT',
'DATA_ENCRYPT_CMP',
'GSSPLUGIN',
'DCS',
'DCS_ENCRYPT',
])
if self._match('WITH'):
self._expect_clp_string()
elif self._match('DCS'):
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
if self._match('AS'):
self._expect_clp_string()
if self._match('AR'):
self._expect_clp_string()
if self._match('PARMS'):
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
elif self._match('LDAP'):
if self._match_one_of(['DATABASE', 'DB']):
self._expect_clp_string()
if self._match('AS'):
self._expect_clp_string()
if self._match_sequence(['AT', 'NODE']):
self._expect_clp_string()
if self._match('GWNODE'):
self._expect_clp_string()
if self._match('PARMS'):
self._expect_clp_string()
if self._match('AR'):
self._expect_clp_string()
if self._match_sequence(['KERBEROS', 'TARGET', 'PRINCIPAL']):
self._expect_clp_string()
else:
self._expect_one_of([
'SERVER',
'CLIENT',
'SERVER_ENCRYPT',
'SERVER_ENCRYPT_AES',
'KERBEROS',
'DCS',
'DCS_ENCRYPT',
'DATA_ENCRYPT',
'GSSPLUGIN',
])
if self._match('WITH'):
self._expect_clp_string()
elif self._match('NODE'):
self._expect_clp_string()
self._expect('AS')
self._expect_clp_string()
else:
self._expected_one_of(['DATABASE', 'DB', 'NODE'])
self._parse_login(optional=True, allowchange=False)
else:
self._match('ADMIN')
if self._match_sequence(['LOCAL', 'NODE']):
self._expect_clp_string()
if self._match('INSTANCE'):
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
elif self._match_sequence(['NPIPE', 'NODE']):
self._expect_clp_string()
self._expect('REMOTE')
self._expect_clp_string()
self._expect('INSTANCE')
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
elif self._match_sequence(['NETBIOS', 'NODE']):
self._expect_clp_string()
self._expect('REMOTE')
self._expect_clp_string()
self._expect('ADAPTER')
self._expect(TT.NUMBER)
if self._match('REMOTE_INSTANCE'):
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
elif self._match_one_of(['TCPIP', 'TCPIP4', 'TCPIP6']):
self._expect('NODE')
self._expect_clp_string()
self._expect('REMOTE')
self._expect_clp_string()
self._expect('SERVER')
self._expect_clp_string()
if self._match('SECURITY'):
self._match_one_of(['SOCKS', 'SSL'])
if self._match('REMOTE_INSTANCE'):
self._expect_clp_string()
if self._match('SYSTEM'):
self._expect_clp_string()
if self._match('OSTYPE'):
self._expect(TT.IDENTIFIER)
if self._match('WITH'):
self._expect_clp_string()
else:
self._expected_one_of([
'LOCAL',
'NPIPE',
'NETBIOS',
'TCPIP',
'TCPIP4',
'TCPIP6',
])
def _parse_connect_command(self):
"""Parses a CONNECT command"""
# CONNECT already matched
if self._expect_one_of(['TO', 'RESET']).value == 'RESET':
self.current_connection = None
else:
database = self._expect_clp_string().value
if self._match('IN'):
if self._expect_one_of(['SHARE', 'EXCLUSIVE']).value == 'EXCLUSIVE':
self._expect('MODE')
if self._match('ON'):
self._expect('SINGLE')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
else:
self._expect('MODE')
(username, password) = self._parse_login(optional=True, allowchange=True)
self.current_connection = Connection(self.current_instance, database, username, password)
self.connections.append(self.current_connection)
def _parse_create_database_command(self):
"""Parses a CREATE DATABASE command"""
def parse_tablespace_definition():
self._expect('MANAGED')
self._expect('BY')
if self._match('SYSTEM'):
self._expect('USING')
self._parse_system_container_clause()
elif self._match('DATABASE'):
self._expect('USING')
self._parse_database_container_clause()
elif self._match('AUTOMATIC'):
self._expect('STORAGE')
if self._match('EXTENTSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M'])
if self._match('PREFETCHSIZE'):
self._expect(TT.NUMBER)
self._match_one_of(['K', 'M', 'G'])
if self._match('OVERHEAD'):
self._expect(TT.NUMBER)
if self._match('TRANSFERRATE'):
self._expect(TT.NUMBER)
if self._match('NO'):
self._expect_sequence(['FILE', 'SYSTEM', 'CACHING'])
elif self._match('FILE'):
self._expect_sequence(['SYSTEM', 'CACHING'])
self._parse_tablespace_size_attributes()
# CREATE [DATABASE|DB] already matched
self._expect_clp_string()
# XXX Implement AT DBPARTITIONNUM? (not for general use, etc.)
if self._match('AUTOMATIC'):
self._expect('STORAGE')
self._expect_one_of(['NO', 'YES'])
if self._match('ON'):
self._parse_clp_string_list()
if self._match('DBPATH'):
self._expect('ON')
self._expect_clp_string()
if self._match('ALIAS'):
self._expect_clp_string()
if self._match('USING'):
self._expect('CODESET')
self._expect_clp_string()
if self._match('TERRITORY'):
self._expect_clp_string()
if self._match('COLLATE'):
self._expect('USING')
self._expect(TT.IDENTIFIER)
if self._match('PAGESIZE'):
self._expect(TT.NUMBER)
self._match('K')
if self._match('NUMSEGS'):
self._expect(TT.NUMBER)
if self._match('DFT_EXTENT_SZ'):
self._expect(TT.NUMBER)
self._match('RESTRICTIVE')
if self._match('CATALOG'):
self._expect('TABLESPACE')
parse_tablespace_definition()
if self._match('USER'):
self._expect('TABLESPACE')
parse_tablespace_definition()
if self._match('TEMPORARY'):
self._expect('TABLESPACE')
parse_tablespace_definition()
if self._match('WITH'):
self._expect_clp_string()
if self._match('AUTOCONFIGURE'):
self._parse_autoconfigure_command()
def _parse_create_tools_catalog_command(self):
"""Parses a CREATE TOOLS CATALOG command"""
# CREATE TOOLS CATALOG already matched
self._expect_clp_string()
if self._match('CREATE'):
self._expect('NEW')
self._expect('DATABASE')
self._expect_clp_string()
elif self._match('USE'):
self._expect('EXISTING')
if self._match('TABLESPACE'):
self._expect(TT.IDENTIFIER)
self._expect('DATABASE')
self._expect_clp_string()
self._match('FORCE')
if self._match('KEEP'):
self._expect('INACTIVE')
def _parse_deactivate_database_command(self):
"""Parses a DEACTIVATE DATABASE command"""
# DEACTIVATE [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_decompose_xml_document(self):
"""Parses a DECOMPOSE XML DOCUMENT command"""
# DECOMPOSE XML DOCUMENT already matched
self._expect_clp_string()
self._expect('XMLSCHEMA')
self._parse_subschema_name()
self._match('VALIDATE')
def _parse_decompose_xml_documents(self):
"""Parses a DECOMPOSE XML DOCUMENTS command"""
# DECOMPOSE XML DOCUMENTS already matched
self._expect('IN')
self._parse_select_statement()
self._expect('XMLSCHEMA')
self._parse_subschema_name()
self._match('VALIDATE')
if self._match('ALLOW'):
self._match('NO')
self._expect('ACCESS')
if self._match('COMMITCOUNT'):
self._expect(TT.NUMBER)
self._match_sequence(['CONTINUE', 'ON', 'ERROR'])
if self._match('MESSAGES'):
self._expect_clp_string()
def _parse_deregister_command(self):
"""Parses a DEREGISTER command"""
# DEREGISTER already matched
self._match_sequence(['DB2', 'SERVER'])
self._match('IN')
self._expect_sequence(['LDAP', 'NODE', TT.IDENTIFIER])
self._parse_login(optional=True, allowchange=False)
def _parse_describe_command(self):
"""Parses a DESCRIBE command"""
# DESCRIBE already matched
table = True
if self._match('TABLE'):
pass
elif self._match_sequence(['INDEXES', 'FOR', 'TABLE']):
pass
elif self._match_sequence(['RELATIONAL', 'DATA']) or self._match_sequence(['XML', 'DATA']) or self._match_sequence(['TEXT', 'SEARCH']):
self._expect_sequence(['INDEXES', 'FOR', 'TABLE'])
elif self._match_sequence(['DATA', 'PARTITIONS', 'FOR', 'TABLE']):
pass
else:
table = False
if table:
self._parse_table_name()
self._match_sequence(['SHOW', 'DETAIL'])
else:
self._match('OUTPUT')
self._save_state()
try:
self._parse_select_statement()
except ParseError:
self._restore_state()
self._parse_call_statement()
else:
self._forget_state()
# XXX Add support for XQUERY?
def _parse_detach_command(self):
"""Parses a DETACH command"""
# DETACH already matched
pass
def _parse_disconnect_command(self):
"""Parses a DISCONNECT command"""
# DISCONNECT already matched
if self._match('ALL'):
self._match('SQL')
self.current_connection = None
elif self._match('CURRENT'):
self.current_connection = None
else:
t = self._expect_clp_string()
if isinstance(self.current_connection.database, basestring) and s.lower() == t.value.lower():
self.current_connection = None
def _parse_drop_contact_command(self):
"""Parses a DROP CONTACT command"""
# DROP CONTACT already matched
self._expect_clp_string()
def _parse_drop_contactgroup_command(self):
"""Parses a DROP CONTACTGROUP command"""
# DROP CONTACTGROUP already matched
self._expect_clp_string()
def _parse_drop_database_command(self):
"""Parses a DROP DATABASE command"""
# DROP [DATABASE|DB] already matched
self._expect_clp_string()
if self._match('AT'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
def _parse_drop_dbpartitionnum_verify_command(self):
"""Parses a DROP DBPARTITIONNUM VERIFY command"""
# DROP DBPARTITIONNUM VERIFY already matched
pass
def _parse_drop_tools_catalog_command(self):
"""Parses a DROP TOOLS CATALOG command"""
# DROP TOOLS CATALOG already matched
self._expect_clp_string()
self._expect('IN')
self._expect('DATABASE')
self._expect_clp_string()
self._match('FORCE')
def _parse_echo_command(self):
"""Parses an ECHO command"""
# ECHO already matched
self._match_clp_string()
def _parse_export_command(self):
"""Parses a EXPORT command"""
# EXPORT already matched
self._expect('TO')
self.produces.append((self._expect_clp_string().value, self.current_connection))
self._expect('OF')
self._expect_one_of(['DEL', 'IXF', 'WSF'])
if self._match('LOBS'):
self._expect('TO')
self._parse_clp_string_list()
if self._match('LOBFILE'):
self._parse_clp_string_list()
if self._match_sequence(['XML', 'TO']):
self._parse_clp_string_list()
if self._match('XMLFILE'):
self._parse_clp_string_list()
if self._match('MODIFIED'):
self._expect('BY')
# The syntax of MODIFIED BY is so incongruous with the parser that
# we don't even try and parse it, just skip tokens until we find
# some "normal" syntax again. Unfortunately, this means the error
# handling becomes rather dumb
i = self._index
while True:
if self._token(i).value in [
'XMLSAVESCHEMA',
'METHOD',
'MESSAGES',
'HIERARCHY',
'WITH',
'SELECT',
'VALUES',
]:
while self._index < i:
self._output.append(self._token())
self._index += 1
break
if self._token(i).type == TT.EOF:
raise ParseError("Unable to find end of file-modifiers in EXPORT statement")
i += 1
self._match('XMLSAVESCHEMA')
if self._match('METHOD'):
self._expect('N')
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('MESSAGES'):
self._expect_clp_string()
if self._match('HIERARCHY'):
if self._match('STARTING'):
self._expect(TT.IDENTIFIER)
else:
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('WHERE'):
self._parse_search_condition()
else:
self._parse_select_statement()
# XXX Add support for XQUERY?
def _parse_force_application_command(self):
"""Parses a FORCE APPLICATION command"""
# FORCE APPLICATION already matched
if self._match('('):
self._parse_number_list()
self._expect(')')
else:
self._expect('ALL')
if self._match('MODE'):
self._expect('ASYNC')
def _parse_get_admin_cfg_command(self):
"""Parses a GET ADMIN CFG command"""
# GET ADMIN [CONFIGURATION|CONFIG|CFG] already matched
if self._match('FOR'):
self._expect_sequence(['NODE', TT.IDENTIFIER])
self._parse_login(optional=True, allowchange=False)
def _parse_get_alert_cfg_command(self):
"""Parses a GET ALERT CFG command"""
# GET ALERT [CONFIGURATION|CONFIG|CFG] already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match_one_of(['DBM', 'DATABASES', 'CONTAINERS', 'TABLESPACES'])
):
self._match('DEFAULT')
elif (
self._match('DATABASE')
or self._match_sequence(['TABLESPACE', TT.IDENTIFIER])
or self._match_sequence(['CONTAINER', TT.IDENTIFIER, 'FOR', TT.IDENTIFIER])
):
self._expect('ON')
self._expect_clp_string()
else:
self._expected_one_of([
'DB',
'DBM',
'DATABASE',
'DATABASES',
'TABLESPACE',
'TABLESPACES',
'CONTAINER',
'CONTAINERS',
])
if self._match('USING'):
self._parse_clp_string_list()
def _parse_get_cli_cfg_command(self):
"""Parses a GET CLI CFG command"""
# GET CLI [CONFIGURATION|CONFIG|CFG] already matched
self._match_sequence(['AT', 'GLOBAL', 'LEVEL'])
if self._match_sequence(['FOR', 'SECTION']):
self._expect_clp_string()
def _parse_get_connection_state_command(self):
"""Parses a GET CONNECTION STATE command"""
# GET CONNECTION STATE already matched
pass
def _parse_get_contactgroup_command(self):
"""Parses a GET CONTACTGROUP command"""
# GET CONTACTGROUP already matched
self._expect_clp_string()
def _parse_get_contactgroups_command(self):
"""Parses a GET CONTACTGROUPS command"""
# GET CONTACTGROUPS already matched
pass
def _parse_get_contacts_command(self):
"""Parses a GET CONTACTS command"""
# GET CONTACTS already matched
pass
def _parse_get_db_cfg_command(self):
"""Parses a GET DB CFG command"""
# GET [DATABASE|DB] [CONFIGURATION|CONFIG|CFG] already matched
if self._match('FOR'):
self._expect_clp_string()
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_get_dbm_cfg_command(self):
"""Parses a GET DBM CFG command"""
# GET [DATABASE MANAGER|DB MANAGER|DBM] [CONFIGURATION|CONFIG|CFG] already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_get_dbm_monitor_switches_command(self):
"""Parses a GET DBM MONITOR SWITCHES command"""
# GET [DATABASE MANAGER|DB MANAGER|DBM] MONITOR SWITCHES already matched
self._parse_db_partition_clause()
def _parse_get_description_for_health_indicator_command(self):
"""Parses a GET DESCRIPTION FOR HEALTH INDICATOR command"""
# GET DESCRIPTION FOR HEALTH INDICATOR already matched
self._expect_clp_string()
def _parse_get_notification_list_command(self):
"""Parses a GET NOTIFICATION LIST command"""
# GET [HEALTH] NOTIFICATION [CONTACT] LIST already matched
pass
def _parse_get_health_snapshot_command(self):
"""Parses a GET HEALTH SNAPSHOT command"""
# GET HEALTH SNAPSHOT already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match('DBM')
or self._match_sequence(['ALL', 'DATABASES'])
):
pass
elif self._match_one_of(['ALL', 'DATABASE', 'DB', 'TABLESPACES']):
self._expect('ON')
self._expect_clp_string()
else:
self._expected_one_of([
'DB',
'DATABASE',
'DBM',
'ALL',
'TABLESPACES',
])
self._parse_db_partition_clause()
self._match_sequence(['SHOW', 'DETAIL'])
self._match_sequence(['WITH', 'FULL', 'COLLECTION'])
def _parse_get_instance_command(self):
"""Parses a GET INSTANCE command"""
# GET INSTANCE already matched
pass
def _parse_get_monitor_switches_command(self):
"""Parses a GET MONITOR SWITCHES command"""
# GET MONITOR SWITCHES already matched
self._parse_db_partition_clause()
def _parse_get_recommendations_for_health_indicator_command(self):
"""Parses a GET RECOMMENDATIONS FOR HEALTH INDICATOR command"""
# GET RECOMMENDATIONS FOR HEALTH INDICATOR already matched
self._expect_clp_string()
if self._match('FOR'):
if not self._match('DBM'):
if self._match('TABLESPACE'):
self._expect(TT.IDENTIFIER)
elif self._match('CONTAINER'):
self._expect_clp_string()
self._expect_sequence(['FOR', 'TABLESPACE', TT.IDENTIFIER])
elif self._match('DATABASE'):
pass
else:
self._expected_one_of(['TABLESPACE', 'CONTAINER', 'DATABASE', 'DBM'])
self._expect('ON')
self._expect_clp_string()
self._parse_db_partition_clause()
def _parse_get_routine_command(self):
"""Parses a GET ROUTINE command"""
# GET ROUTINE already matched
self._expect('INTO')
self._expect_clp_string()
self._expect('FROM')
self._match('SPECIFIC')
self._expect('PROCEDURE')
self._parse_routine_name()
self._match_sequence(['HIDE', 'BODY'])
def _parse_get_snapshot_command(self):
"""Parses a GET SNAPSHOT command"""
# GET SNAPSHOT already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match('DBM')
or self._match_sequence(['ALL', 'DCS', 'DATABASES'])
or self._match_sequence(['ALL', 'DATABASES'])
or self._match_sequence(['ALL', 'DCS', 'APPLICATIONS'])
or self._match_sequence(['ALL', 'APPLICATIONS'])
or self._match_sequence(['ALL', 'BUFFERPOOLS'])
or self._match_sequence(['FCM', 'FOR', 'ALL', 'DBPARTITIONNUMS'])
or self._match_sequence(['FCM', 'FOR', 'ALL', 'NODES'])
or (self._match_sequence(['DCS', 'APPLICATION', 'APPLID']) and self._match_clp_string())
or self._match_sequence(['DCS', 'APPLICATION', 'AGENTID', TT.NUMBER])
or (self._match_sequence(['APPLICATION', 'APPLID']) and self._match_clp_string())
or self._match_sequence(['APPLICATION', 'AGENTID', TT.NUMBER])
or (self._match_sequence(['LOCKS', 'FOR', 'APPLICATION', 'APPLID']) and self._match_clp_string())
or self._match_sequence(['LOCKS', 'FOR', 'APPLICATION', 'AGENTID', TT.NUMBER])
or self._match_sequence(['ALL', 'REMOTE_DATABASES'])
or self._match_sequence(['ALL', 'REMOTE_APPLICATIONS'])
):
pass
elif self._match_sequence(['DYNAMIC', 'SQL', 'ON']):
self._expect_clp_string()
self._match_sequence(['WRITE', 'TO', 'FILE'])
elif (
self._match('ALL')
or self._match_sequence(['DCS', 'DATABASE'])
or self._match_sequence(['DCS', 'DB'])
or self._match_sequence(['DCS', 'APPLICATIONS'])
or self._match_one_of([
'DATABASE',
'APPLICATIONS',
'TABLES',
'TABLESPACES',
'LOCKS',
'BUFFERPOOLS',
'REMOTE_DATABASES',
'REMOTE_APPLICATIONS'
])
):
self._expect('ON')
self._expect_clp_string()
else:
self._expected_one_of([
'ALL',
'DCS',
'DB',
'DBM',
'DATABASE',
'FCM',
'DYNAMIC',
'APPLICATION',
'APPLICATIONS',
'TABLES',
'TABLESPACES',
'LOCKS',
'BUFFERPOOLS',
'REMOTE_DATABASES',
'REMOTE_APPLICATIONS',
])
self._parse_db_partition_clause()
def _parse_import_method(self):
"""Parses the METHOD clause of an IMPORT/LOAD command"""
# METHOD already matched
if self._match('L'):
self._expect('(')
while True:
self._expect(TT.NUMBER) # col start
self._expect(TT.NUMBER) # col end
if not self._match(','):
break
self._expect(')')
if self._match('NULL'):
self._expect('INDICATORS')
self._expect('(')
self._parse_number_list()
self._expect(')')
elif self._match('N'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
elif self._match('P'):
self._expect('(')
self._parse_number_list()
self._expect(')')
else:
self._expected_one_of(['L', 'N', 'P'])
def _parse_import_command(self):
"""Parses a IMPORT command"""
# IMPORT already matched
self._expect('FROM')
self.consumes.append((self._expect_clp_string().value, self.current_connection))
self._expect('OF')
self._expect_one_of(['ASC', 'DEL', 'IXF', 'WSF'])
if self._match('LOBS'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('XML'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('MODIFIED'):
self._expect('BY')
# See _parse_export_command() above for an explanation...
i = self._index
while True:
if self._token(i).value in [
'METHOD',
'COMMITCOUNT',
'RESTARTCOUNT',
'SKIPCOUNT',
'ROWCOUNT',
'WARNINGCOUNT',
'NOTIMEOUT',
'INSERT_UPDATE',
'REPLACE',
'REPLACE_CREATE',
'MESSAGES',
'INSERT',
'CREATE',
'ALLOW',
'XMLPARSE',
'XMLVALIDATE',
]:
while self._index < i:
self._output.append(self._token())
self._index += 1
break
if self._token(i).type == TT.EOF:
raise ParseError("Unable to find end of file-modifiers in IMPORT statement")
i += 1
if self._match('METHOD'):
self._parse_import_method()
if self._match('XMLPARSE'):
self._expect_one_of(['STRIP', 'PRESERVE'])
self._expect('WHITESPACE')
if self._match('XMLVALIDATE'):
self._expect('USING')
if self._match('XDS'):
if self._match('DEFAULT'):
self._parse_subschema_name()
if self._match('IGNORE'):
self._expect('(')
while True:
self._parse_subschema_name()
if not self._match(','):
break
self._expect(')')
if self._match('MAP'):
self._expect('(')
while True:
self._expect('(')
self._parse_subschema_name()
self._expect(',')
self._parse_subschema_name()
self._expect(')')
if not self._match(','):
break
self._expect(')')
elif self._match('SCHEMA'):
self._parse_subschema_name()
elif self._match('SCHEMALOCATION'):
self._expect('HINTS')
if self._match('ALLOW'):
self._expect_one_of(['NO', 'WRITE'])
self._expect('ACCESS')
if self._match('COMMITCOUNT'):
self._expect_one_of([TT.NUMBER, 'AUTOMATIC'])
if self._match_one_of(['RESTARTCOUNT', 'SKIPCOUNT']):
self._expect(TT.NUMBER)
if self._match('ROWCOUNT'):
self._expect(TT.NUMBER)
if self._match('WARNINGCOUNT'):
self._expect(TT.NUMBER)
if self._match('NOTIMEOUT'):
pass
if self._match('MESSAGES'):
self._expect_clp_string()
# Parse the action (CREATE/INSERT/etc.)
t = self._expect_one_of([
'CREATE',
'INSERT',
'INSERT_UPDATE',
'REPLACE',
'REPLACE_CREATE',
])
self._expect('INTO')
self._parse_table_name()
if self._match('('):
self._parse_ident_list()
self._expect(')')
if (t.value == 'CREATE') and self._match('IN'):
self._expect(TT.IDENTIFIER)
if self._match('INDEX'):
self._expect('IN')
self._expect(TT.IDENTIFIER)
if self._match('LONG'):
self._expect('IN')
self._expect(TT.IDENTIFIER)
def _parse_initialize_tape_command(self):
"""Parses an INTIALIZE TAPE command"""
# INITIALIZE TAPE already matched
if self._match('ON'):
self._expect_clp_string()
if self._match('USING'):
self._expect(TT.NUMBER)
def _parse_inspect_command(self):
"""Parses an INSPECT command"""
# INSPECT already matched
if self._match('ROWCOMPESTIMATE'):
self._expect('TABLE')
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif self._match('TBSPACEID'):
self._expect_sequence([TT.NUMBER, 'OBJECTID', TT.NUMBER])
elif self._match('CHECK'):
if self._match('DATABASE'):
if self._match('BEGIN'):
self._expect_sequence(['TBSPACEID', TT.NUMBER])
self._match_sequence(['OBJECTID', TT.NUMBER])
elif self._match('TABLESPACE'):
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
elif self._match('TBSPACEID'):
self._expect(TT.NUMBER)
if self._match('BEGIN'):
self._expect_sequence(['OBJECTID', TT.NUMBER])
if self._match('TABLE'):
if self._match('NAME'):
self._expect(TT.IDENTIFIER)
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif self._match('TBSPACEID'):
self._expect_sequence([TT.NUMBER, 'OBJECTID', TT.NUMBER])
else:
self._expected_one_of(['ROWCOMPESTIMATE', 'CHECK'])
self._match_sequence(['FOR', 'ERROR', 'STATE', 'ALL'])
if self._match_sequence(['LIMIT', 'ERROR', 'TO']):
self._expect_one_of(['DEFAULT', 'ALL', TT.NUMBER])
if self._match('EXTENTMAP'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('DATA'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('BLOCKMAP'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('INDEX'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('LONG'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('LOB'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
if self._match('XML'):
self._expect_one_of(['NORMAL', 'NONE', 'LOW'])
self._match('INDEXDATA')
self._expect('RESULTS')
self._match('KEEP')
self._expect_clp_string()
self._parse_db_partitions_clause()
def _parse_instance_command(self):
"""Parses the custom (non-CLP) INSTANCE command"""
# INSTANCE already matched
self.current_instance = self._expect_clp_string().value
self.current_connection = None
def _parse_list_active_databases_command(self):
"""Parses a LIST ACTIVE DATABASES command"""
# LIST ACTIVE DATABASES already matched
self._parse_db_partition_clause()
def _parse_list_applications_command(self):
"""Parses a LIST APPLICATIONS command"""
# LIST APPLICATIONS already matched
if self._match('FOR'):
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_db_partition_clause()
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_command_options_command(self):
"""Parses a LIST COMMAND OPTIONS command"""
# LIST COMMAND OPTIONS already matched
pass
def _parse_list_db_directory_command(self):
"""Parses a LIST DB DIRECTORY command"""
# LIST [DATABASE|DB] DIRECTORY already matched
if self._match('ON'):
self._expect_clp_string()
def _parse_list_database_partition_groups_command(self):
"""Parses a LIST DATABASE PARTITION GROUPS command"""
# LIST DATABASE PARTITION GROUPS already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_nodes_command(self):
"""Parses a LIST NODES command"""
# LIST DBPARTITIONNUMS|NODES already matched
pass
def _parse_list_dcs_applications_command(self):
"""Parses a LIST DCS APPLICATIONS command"""
# LIST DCS APPLICATIONS already matched
if not self._match('EXTENDED'):
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_dcs_directory_command(self):
"""Parses a LIST DCS DIRECTORY command"""
# LIST DCS DIRECTORY already matched
pass
def _parse_list_drda_indoubt_transactions_command(self):
"""Parses a LIST DRDA INDOUBT TRANSACTIONS command"""
# LIST DRDA INDOUBT TRANSACTIONS already matched
self._match_sequence(['WITH', 'PROMPTING'])
def _parse_list_history_command(self):
"""Parses a LIST HISTORY command"""
# LIST HISTORY already matched
if self._match_one_of(['CREATE', 'ALTER', 'RENAME']):
self._expect('TABLESPACE')
elif self._match('ARCHIVE'):
self._expect('LOG')
elif self._match('DROPPED'):
self._expect('TABLE')
else:
self._match_one_of(['BACKUP', 'ROLLFORWARD', 'LOAD', 'REORG'])
if self._match('SINCE'):
self._expect(TT.NUMBER)
elif self._match('CONTAINING'):
self._parse_subschema_name()
elif not self._match('ALL'):
self._expected_one_of(['ALL', 'SINCE', 'CONTAINING'])
self._expect('FOR')
self._match_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
def _parse_list_indoubt_transactions_command(self):
"""Parses a LIST INDOUBT TRANSACTIONS command"""
# LIST INDOUBT TRANSACTIONS already matched
self._match_sequence(['WITH', 'PROMPTING'])
def _parse_list_node_directory_command(self):
"""Parses a LIST NODE DIRECTORY command"""
# LIST [ADMIN] NODE DIRECTORY already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_odbc_data_sources_command(self):
"""Parses a LIST ODBC DATA SOURCES command"""
# LIST [USER|SYSTEM] ODBC DATA SOURCES already matched
pass
def _parse_list_tables_command(self):
"""Parses a LIST TABLES command"""
# LIST PACKAGES|TABLES already matched
if self._match('FOR'):
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif not self._match_one_of(['USER', 'SYSTEM', 'ALL']):
self._expected_one_of(['USER', 'SYSTEM', 'ALL', 'SCHEMA'])
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_tablespace_containers_command(self):
"""Parses a LIST TABLESPACE CONTAINERS command"""
# LIST TABLESPACE CONTAINERS already matched
self._expect_sequence(['FOR', TT.NUMBER])
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_tablespaces_command(self):
"""Parses a LIST TABLESPACES command"""
# LIST TABLESPACES already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_list_utilities_command(self):
"""Parses a LIST UTILITIES command"""
# LIST UTILITIES already matched
self._match_sequence(['SHOW', 'DETAIL'])
def _parse_load_command(self):
"""Parses a LOAD command"""
# LOAD already matched
self._match('CLIENT')
self._expect('FROM')
filename = self._expect_clp_string().value
self._expect('OF')
if self._expect_one_of(['ASC', 'DEL', 'IXF', 'CURSOR']).value != 'CURSOR':
self.consumes.append((filename, self.current_connection))
if self._match('LOBS'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('XML'):
self._expect('FROM')
self._parse_clp_string_list()
if self._match('MODIFIED'):
self._expect('BY')
# See _parse_export_command() above for an explanation...
i = self._index
while True:
if self._token(i)[1] in [
'INSERT',
'MESSAGES',
'METHOD',
'REPLACE',
'RESTART',
'ROWCOUNT',
'SAVECOUNT',
'TEMPFILES',
'TERMINATE',
'WARNINGCOUNT',
'XMLPARSE',
'XMLVALIDATE',
]:
while self._index < i:
self._output.append(self._token())
self._index += 1
break
if self._token(i).type == TT.EOF:
raise ParseError("Unable to find end of file-modifiers in LOAD statement")
i += 1
if self._match('METHOD'):
self._parse_import_method()
if self._match('XMLPARSE'):
self._expect_one_of(['STRIP', 'PRESERVE'])
self._expect('WHITESPACE')
if self._match('XMLVALIDATE'):
self._expect('USING')
if self._match('XDS'):
if self._match('DEFAULT'):
self._parse_subschema_name()
if self._match('IGNORE'):
self._expect('(')
while True:
self._parse_subschema_name()
if not self._match(','):
break
self._expect(')')
if self._match('MAP'):
self._expect('(')
while True:
self._expect('(')
self._parse_subschema_name()
self._expect(',')
self._parse_subschema_name()
self._expect(')')
if not self._match(','):
break
self._expect(')')
elif self._match('SCHEMA'):
self._parse_subschema_name()
elif self._match('SCHEMALOCATION'):
self._expect('HINTS')
if self._match('SAVECOUNT'):
self._expect(TT.NUMBER)
if self._match('ROWCOUNT'):
self._expect(TT.NUMBER)
if self._match('WARNINGCOUNT'):
self._expect(TT.NUMBER)
if self._match('MESSAGES'):
self._expect_clp_string()
if self._match('TEMPFILES'):
self._expect('PATH')
self._expect_clp_string()
if self._expect_one_of(['INSERT', 'RESTART', 'REPLACE', 'TERMINATE']).value == 'REPLACE':
self._match_one_of(['KEEPDICTIONARY', 'RESETDICTIONARY'])
self._expect('INTO')
self._parse_table_name()
if self._match('('):
self._parse_ident_list()
self._expect(')')
if self._match('FOR'):
self._expect('EXCEPTION')
self._parse_table_name()
if self._match_one_of(['NORANGEEXC', 'NOUNIQUEEXC']):
if self._match(','):
self._expect_one_of(['NORANGEEXC', 'NOUNIQUEEXC'])
if self._match('STATISTICS'):
if self._expect_one_of(['NO', 'USE']).value == 'USE':
self._expect('PROFILE')
if self._match('COPY'):
if self._expect_one_of(['NO', 'YES']).value == 'YES':
if self._match('USE'):
self._expect('TSM')
if self._match('OPEN'):
self._expect_sequence([TT.NUMBER, 'SESSIONS'])
elif self._match('TO'):
self._parse_clp_string_list()
elif self._match('LOAD'):
self._expect_clp_string()
if self._match('OPEN'):
self._expect_sequence([TT.NUMBER, 'SESSIONS'])
else:
self._expected_one_of(['USE', 'TO', 'LOAD'])
elif self._match('NONRECOVERABLE'):
pass
if self._match('WITHOUT'):
self._expect('PROMPTING')
if self._match('DATA'):
self._expect('BUFFER')
self._expect(TT.NUMBER)
if self._match('SORT'):
self._expect('BUFFER')
self._expect(TT.NUMBER)
if self._match('CPU_PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('DISK_PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('FETCH_PARALLELISM'):
self._expect_one_of(['YES', 'NO'])
if self._match('INDEXING'):
self._expect('MODE')
self._expect_one_of(['AUTOSELECT', 'REBUILD', 'INCREMENTAL', 'DEFERRED'])
if self._match('ALLOW'):
if self._match_sequence(['READ', 'ACCESS']):
self._match_sequence(['USE', TT.IDENTIFIER])
elif self._match_sequence(['NO', 'ACCESS']):
pass
else:
self._expected_one_of(['READ', 'NO'])
if self._match_sequence(['SET', 'INTEGRITY']):
self._expect_sequence(['PENDING', 'CASCADE'])
self._expect_one_of(['DEFERRED', 'IMMEDIATE'])
if self._match('LOCK'):
self._expect_sequence(['WITH', 'FORCE'])
if self._match('SOURCEUSEREXIT'):
self._expect_clp_string()
if self._match('REDIRECT'):
if self._match('INPUT'):
self._expect('FROM')
self._expect_one_of(['BUFFER', 'FILE'])
self._expect_clp_string()
if self._match('OUTPUT'):
self._expect_sequence(['TO', 'FILE'])
self._expect_clp_string()
self._match_sequence(['PARTITIONED', 'DB', 'CONFIG'])
while True:
if self._match('MODE'):
self._expect_one_of([
'PARTITION_AND_LOAD',
'PARTITION_ONLY',
'LOAD_ONLY',
'LOAD_ONLY_VERIFY_PART',
'ANALYZE',
])
elif self._match('ISOLATE_PART_ERRS'):
self._expect_one_of([
'SETUP_ERRS_ONLY',
'LOAD_ERRS_ONLY',
'SETUP_AND_LOAD_ERRS',
'NO_ISOLATION',
])
elif self._match_one_of(['PART_FILE_LOCATION', 'MAP_FILE_INPUT', 'MAP_FILE_OUTPUT', 'DISTFILE']):
self._expect_clp_string()
elif self._match_one_of(['OUTPUT_DBPARTNUMS', 'PARTITIONING_DBPARTNUMS']):
self._expect('(')
while True:
self._expect(TT.NUMBER)
if self._match('TO'):
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
elif self._match_one_of(['MAXIMUM_PART_AGENTS', 'STATUS_INTERVAL', 'TRACE', 'RUN_STAT_DBPARTNUM']):
self._expect(TT.NUMBER)
elif self._match('PORT_RANGE'):
self._expect_sequence(['(', TT.NUMBER, ',', TT.NUMBER, ')'])
elif self._match_one_of(['CHECK_TRUNCATION', 'NEWLINE', 'OMIT_HEADER']):
pass
else:
break
def _parse_load_query_command(self):
"""Parses a LOAD QUERY command"""
# LOAD QUERY already matched
self._expect('TABLE')
self._parse_table_name()
if self._match('TO'):
self._expect_clp_string()
self._match_one_of(['NOSUMMARY', 'SUMMARYONLY'])
self._match('SHOWDELTA')
def _parse_migrate_db_command(self):
"""Parses a MIGRATE DB command"""
# MIGRATE [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_on_command(self):
"""Parses the custom (non-CLP) ON SQLCODE|SQLSTATE|ERROR|REGEX command"""
# ON already matched
if self._match('SQLCODE'):
if self._match((TT.OPERATOR, '-')):
self._expect(TT.NUMBER)
else:
self._expect_one_of([TT.STRING, TT.NUMBER])
elif self._match('SQLSTATE'):
self._expect(TT.STRING)
elif self._match('ERROR'):
pass
elif self._match('REGEX'):
self._expect(TT.STRING)
else:
self._expected_one_of(['SQLCODE', 'SQLSTATE', 'ERROR', 'REGEX'])
wait = False
if self._match('WAIT'):
wait = True
self._expect(TT.NUMBER)
self._expect_one_of(['SECOND', 'SECONDS', 'MINUTE', 'MINUTES', 'HOUR', 'HOURS'])
self._match('AND')
retry = False
if self._match('RETRY'):
retry = True
self._expect_one_of(['STATEMENT', 'SCRIPT'])
if self._match(TT.NUMBER):
self._expect_one_of(['TIME', 'TIMES'])
self._match('THEN')
if wait and not retry:
self._expected('RETRY')
self._expect_one_of(['FAIL', 'STOP', 'CONTINUE', 'IGNORE'])
def _parse_ping_command(self):
"""Parses a PING command"""
# PING already matched
self._expect_clp_string()
if self._match('REQUEST'):
self._expect(TT.NUMBER)
if self._match('RESPONSE'):
self._expect(TT.NUMBER)
if self._match(TT.NUMBER):
self._match_one_of(['TIME', 'TIMES'])
def _parse_precompile_command(self):
"""Parses a PRECOMPILE command"""
# [PREP|PRECOMPILE] already matched
# XXX Can these parameters be specified in any order?
self._expect_clp_string()
if self._match('ACTION'):
if self._match_one_of(['ADD', 'REPLACE']).value == 'ADD':
pass
else:
if self._match('RETAIN'):
self._expect_one_of(['YES', 'NO'])
if self_match('REPLVER'):
self._expect_clp_string()
if self._match('APREUSE'):
self._expect_one_of(['YES', 'NO'])
if self._match('BINDFILE'):
if self._match('USING'):
self._expect_clp_string()
if self._match('BLOCKING'):
self._expect_one_of(['UNAMBIG', 'ALL', 'NO'])
if self._match('COLLECTION'):
self._expect(TT.IDENTIFIER)
if self._match('CALL_RESOLUTION'):
self._expect_one_of(['IMMEDIATE', 'DEFERRED'])
if self._match('CCSIDG'):
self._expect(TT.NUMBER)
if self._match('CCSIDM'):
self._expect(TT.NUMBER)
if self._match('CCSIDS'):
self._expect(TT.NUMBER)
if self._match('CHARSUB'):
self._expect_one_of(['DEFAULT', 'BIT', 'MIXED', 'SBCS'])
if self._match('CNULREQD'):
self._expect_one_of(['YES', 'NO'])
if self._match('COLLECTION'):
self._expect(TT.IDENTIFIER)
self._match_one_of(['COMPILE', 'PRECOMPILE'])
if self._match('CONCURRENTACCESSRESOLUTION'):
if self._expect_one_of(['USE', 'WAIT']).value == 'USE':
self._expect_sequence(['CURRENTLY', 'COMMITTED'])
else:
self._expect_sequence(['FOR', 'OUTCOME'])
if self._match('CONNECT'):
self._expect(TT.NUMBER)
if self._match('DATETIME'):
self._expect_one_of(['DEF', 'EUR', 'ISO', 'JIS', 'LOC', 'USA'])
if self._match('DBPROTOCOL'):
self._expect_one_of(['DRDA', 'PRIVATE'])
if self._match('DEC'):
self._expect(TT.NUMBER)
if self._match('DECDEL'):
self._expect_one_of(['PERIOD', 'COMMA'])
if self._match('DEFERRED_PREPARE'):
self._expect_one_of(['NO', 'ALL', 'YES'])
if self._match('DEGREE'):
self._expect_one_of([TT.NUMBER, 'ANY'])
if self._match('DISCONNECT'):
self._expect_one_of(['EXPLICIT', 'AUTOMATIC', 'CONDITIONAL'])
if self._match('DYNAMICRULES'):
self._expect_one_of(['RUN', 'BIND', 'INVOKERUN', 'INVOKEBIND', 'DEFINERUN', 'DEFINEBIND'])
if self._match('ENCODING'):
self._expect_one_of(['ASCII', 'EBCDIC', 'UNICODE', 'CCSID'])
if self._match('EXPLAIN'):
self._expect_one_of(['NO', 'ALL', 'ONLY', 'REOPT', 'YES'])
if self._match('EXPLSNAP'):
self._expect_one_of(['NO', 'ALL', 'REOPT', 'YES'])
if self._match('EXTENDEDINDICATOR'):
self._expect_one_of(['YES', 'NO'])
if self._match('FEDERATED'):
self._expect_one_of(['YES', 'NO'])
if self._match('FEDERATED_ASYNCHRONY'):
self._expect_one_of([TT.NUMBER, 'ANY'])
if self._match('FUNCPATH'):
self._parse_ident_list()
if self._match('GENERIC'):
self._expect_clp_string()
if self._amtch('IMMEDWRITE'):
self._expect_one_of(['NO', 'YES', 'PH1'])
if self._match('INSERT'):
self._expect_one_of(['DEF', 'BUF'])
if self._match('ISOLATION'):
self._expect_one_of(['CS', 'NC', 'RR', 'RS', 'UR'])
if self._match('KEEPDYNAMIC'):
self._expect_one_of(['YES', 'NO'])
if self._match('LANGLEVEL'):
self._expect_one_of(['SAA1', 'MIA', 'SQL92E'])
if self._match('LEVEL'):
self._expect(TT.IDENTIFIER)
if self._match('LONGERROR'):
self._expect_one_of(['YES', 'NO'])
if self._match('MESSAGES'):
self._expect_clp_string()
if self._match('NOLINEMACRO'):
pass
if self._match('OPTHINT'):
self._expect_clp_string()
if self._match('OPTLEVEL'):
self._expect(TT.NUMBER)
if self._match('OPTPROFILE'):
self._expect_clp_string()
if self._match('OS400NAMING'):
self._expect_one_of(['SYSTEM', 'SQL'])
if self._match('OUTPUT'):
self._expect_clp_string()
if self._match('OWNER'):
self._expect(TT.IDENTIFIER)
if self._match('PACKAGE'):
if self._match('USING'):
self._expect(TT.IDENTIFIER)
if self._match('PREPROCESSOR'):
self._expect_clp_string()
if self._match('QUALIFIER'):
self._expect(TT.IDENTIFIER)
if self._match('QUERYOPT'):
self._expect(TT.NUMBER)
if self._match('RELEASE'):
self._expect_one_of(['COMMIT', 'DEALLOCATE'])
if self._match('REOPT'):
self._expect_one_of(['NONE', 'ONCE', 'ALWAYS', 'VARS'])
if self._match_one_of(['REOPT', 'NOREOPT']):
self._expect('VARS')
if self._match('SQLCA'):
self._expect_one_of(['NONE', 'SAA'])
if self._match('SQLERROR'):
self._expect_one_of(['NOPACKAGE', 'CHECK', 'CONTINUE'])
if self._match('SQLFLAG'):
self._expect_one_of(['SQL92E', 'MVSDB2V23', 'MVSDB2V31', 'MVSDB2V41'])
self._expect('SYNTAX')
if self._match('SORTSEQ'):
self._expect_one_of(['JOBRUN', 'HEX'])
if self._match('SQLRULES'):
self._expect_one_of(['DB2', 'STD'])
if self._match('SQLWARN'):
self._expect_one_of(['YES', 'NO'])
if self._match('STATICREADONLY'):
self._expect_one_of(['YES', 'NO', 'INSENSITIVE'])
if self._match('STRDEL'):
self._expect_one_of(['APOSTROPHE', 'QUOTE'])
if self._match('SYNCPOINT'):
self._expect_one_of(['ONEPHASE', 'NONE', 'TWOPHASE'])
if self._match('SYNTAX'):
pass
if self._match('TARGET'):
self._expect_one_of(['IBMCOB', 'MFCOB', 'ANSI_COBOL', 'C', 'CPLUSPLUS', 'FORTRAN', 'BORLAND_C', 'BORLAND_CPLUSPLUS'])
if self._match('TEXT'):
self._expect_clp_string()
if self._match('TRANSFORM'):
self._expect('GROUP')
self._expect(TT.IDENTIFIER)
if self._match('VALIDATE'):
self._expect_one_of(['BIND', 'RUN'])
if self._match('WCHARTYPE'):
self._expect_one_of(['NOCONVERT', 'CONVERT'])
if self._match('VERSION'):
self._expect_clp_string()
def _parse_prune_history_command(self):
"""Parses a PRUNE HISTORY command"""
# PRUNE HISTORY already matched
self._expect(TT.NUMBER)
self._match_sequence(['WITH', 'FORCE', 'OPTION'])
self._match_sequence(['AND', 'DELETE'])
def _parse_prune_logfile_command(self):
"""Parses a PRUNE LOGFILE command"""
# PRUNT LOGFILE already matched
self._expect_sequence(['PRIOR', 'TO'])
self._expect_clp_string()
def _parse_put_routine_command(self):
"""Parses a PUT ROUTINE command"""
# PUT ROUTINE already matched
self._expect('FROM')
self._expect_clp_string()
if self._match('OWNER'):
self._expect(TT.IDENTIFIER)
self._match_sequence(['USE', 'REGISTERS'])
def _parse_query_client_command(self):
"""Parses a QUERY CLIENT command"""
# QUERY CLIENT already matched
pass
def _parse_quiesce_command(self):
"""Parses a QUIESCE DB / INSTANCE command"""
# QUIESCE already matched
if self._match('INSTANCE'):
self._expect_clp_string()
if self._match_one_of(['USER', 'GROUP']):
self._expect(TT.IDENTIFIER)
self._match_sequence(['RESTRICTED', 'ACCESS'])
elif self._match_one_of(['DATABASE', 'DB']):
pass
else:
self._expected_one_of(['DATABASE', 'DB', 'INSTANCE'])
if self._expect_one_of(['IMMEDIATE', 'DEFER'])[1] == 'DEFER':
if self._match('WITH'):
self._expect_sequence(['TIMEOUT', TT.NUMBER])
self._match_sequence(['FORCE', 'CONNECTIONS'])
def _parse_quiesce_tablespaces_command(self):
"""Parses a QUIESCE TABLESPACES command"""
# QUIESCE TABLESPACES already matched
self._expect_sequence(['FOR', 'TABLE'])
self._parse_table_name()
if self._expect_one_of(['SHARE', 'INTENT', 'EXCLUSIVE', 'RESET']).value == 'INTENT':
self._expect_sequence(['TO', 'UPDATE'])
def _parse_quit_command(self):
"""Parses a QUIT command"""
# QUIT already matched
pass
def _parse_rebind_command(self):
"""Parses a REBIND command"""
# REBIND already matched
self._match('PACKAGE')
self._parse_subschema_name()
if self._match('VERSION'):
self._expect_clp_string()
if self._match('APREUSE'):
self._expect_one_of(['YES', 'NO'])
if self._match('RESOLVE'):
self._expect_one_of(['ANY', 'CONSERVATIVE'])
if self._match('REOPT'):
self._expect_one_of(['NONE', 'ONCE', 'ALWAYS'])
def _parse_recover_db_command(self):
"""Parses a RECOVER DB command"""
# RECOVER [DATABASE|DB] already matched
self._expect_clp_string()
if self._match('TO'):
if self._match('END'):
self._expect_sequence(['OF', 'LOGS'])
self._parse_db_partitions_clause()
else:
self._expect_clp_string()
if self._match('USING'):
self._expect_one_of(['LOCAL', 'UTC'])
self._expect('TIME')
if self._match('ON'):
self._expect('ALL')
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
self._parse_login(optional=True, allowchange=False)
if self._match('USING'):
self._expect_sequence(['HISTORY', 'FILE'])
self._expect('(')
self._expect_clp_string()
if self._match(','):
while True:
self._expect_clp_string()
self._expect('ON')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
if self._match('OVERFLOW'):
self._expect_sequence(['LOG', 'PATH'])
self._expect('(')
self._expect_clp_string()
if self._match(','):
while True:
self._expect_clp_string()
self._expect('ON')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
if self._match('COMPRLIB'):
self._expect_clp_string()
if self._match('COMPROPTS'):
self._expect_clp_string()
self._match('RESTART')
def _parse_redistribute_database_partition_group_command(self):
"""Parses a REDISTRIBUTE DATABASE PARTITION GROUP command"""
# REDISTRIBUTE DATABASE PARTITION GROUP already matched
self._expect_clp_string()
self._match_sequence(['NOT', 'ROLLFORWARD', 'RECOVERABLE'])
t = self._expect_one_of(['UNIFORM',' USING', 'COTNINUE', 'ABORT']).value
partitions = False
if t == 'USING':
if self._expect_one_of(['DISTFILE', 'TARGETMAP']).value == 'DISTFILE':
partitions = True
self._expect_clp_string()
elif t == 'UNIFORM':
partitions = True
if partitions:
if self._match('ADD'):
self._parse_db_partition_list_clause(size=False)
if self._match('DROP'):
self._parse_db_partition_list_clause(size=False)
if self._match('TABLE'):
self._expect('(')
while True:
self._parse_table_name()
if not self._match(','):
break
self._expect(')')
self._match_one_of(['ONCE', 'FIRST'])
if self._match('INDEXING'):
self._expect('MODE')
self._expect_one_of(['REBUILD', 'DEFERRED'])
elif self._match('DATA'):
self._expect('BUFFER')
self._expect(TT.NUMBER)
elif self._match('STATISTICS'):
if self._expect_one_of(['USE', 'NONE']).value == 'USE':
self._expect('PROFILE')
elif self._match('STOP'):
self._expect('AT')
self._expect_clp_string()
def _parse_refresh_ldap_command(self):
"""Parses a REFRESH LDAP command"""
# REFRESH LDAP already matched
if self._match('CLI'):
self._expect('CFG')
elif self._match_one_of(['DB', 'NODE']):
self._expect('DIR')
elif self._match('IMMEDIATE'):
self._match('ALL')
else:
self._expected_one_of(['CLI', 'DB', 'NODE', 'IMMEDIATE'])
def _parse_register_command(self):
"""Parses a REGISTER command"""
# REGISTER already matched
self._match_sequence(['DB2', 'SERVER'])
self._match('IN')
self._match('ADMIN')
self._expect('LDAP')
self._expect_one_of(['NODE', 'AS'])
self._expect_clp_string()
self._expect('PROTOCOL')
if self._expect_one_of(['TCPIP', 'TCPIP4', 'TCPIP6', 'NPIPE']).value != 'NPIPE':
if self._match('HOSTNAME'):
self._expect_clp_string()
if self._match('SVCENAME'):
self._expect_clp_string()
self._match_sequence(['SECURITY', 'SOCKS'])
if self._match('REMOTE'):
self._expect_clp_string()
if self._match('INSTANCE'):
self._expect_clp_string()
if self._match('NODETYPE'):
self._expect_one_of(['SERVER', 'MPP', 'DCS'])
if self._match('OSTYPE'):
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_register_xmlschema_command(self):
"""Parses a REGISTER XMLSCHEMA command"""
# REGISTER XMLSCHEMA already matched
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
if self._match('AS'):
self._parse_subschema_name()
if self._match('('):
while True:
self._expect('ADD')
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
if self._match(')'):
break
if self._match('COMPLETE'):
if self._match('WITH'):
self._expect_clp_string()
if self._match('ENABLE'):
self._expect('DECOMPOSITION')
def _parse_register_xsrobject_command(self):
"""Parses a REGISTER XSROBJECT command"""
# REGISTER XSROBJECT already matched
self._expect_clp_string()
if self._match('PUBLIC'):
self._expect_clp_string()
self._expect('FROM')
self._expect_clp_string()
if self._match('AS'):
self._parse_subschema_name()
if self._match('EXTERNAL'):
self._expect('ENTITY')
else:
self._expect_one_of(['DTD', 'EXTERNAL'])
def _parse_reorg_command(self):
"""Parses a REORG command"""
def parse_table_clause():
if self._match('INDEX'):
self._parse_index_name()
if self._match('INPLACE'):
if not self._match_one_of(['STOP', 'PAUSE']):
if self._match('ALLOW'):
self._expect_one_of(['READ', 'WRITE'])
self._expect('ACCESS')
if self._match('NOTRUNCATE'):
self._expect('TABLE')
self._match_one_of(['START', 'RESUME'])
else:
if self._match('ALLOW'):
self._expect_one_of(['READ', 'NO'])
self._expect('ACCESS')
if self._match('USE'):
self._expect(TT.IDENTIFIER)
self._match('INDEXSCAN')
if self._match('LONGLOBDATA'):
if self._match('USE'):
self._expect(TT.IDENTIFIER)
self._match_one_of(['KEEPDICTIONARY', 'RESETDICTIONARY'])
def parse_index_clause():
if self._match('ALLOW'):
self._expect_one_of(['NO', 'WRITE', 'READ'])
self._expect('ACCESS')
if self._match_one_of(['CONVERT', 'CLEANUP']).value == 'CLEANUP':
self._expect('ONLY')
self._match_one_of(['ALL', 'PAGES'])
# REORG already matched
if self._match('TABLE'):
self._parse_table_name()
if self._match('RECLAIM'):
self._expect_sequence(['EXTENTS', 'ONLY'])
if self._match('ALLOW'):
self._expect_one_of(['READ', 'WRITE', 'NO'])
self._expect('ACCESS')
else:
parse_table_clause()
elif self._match('INDEX'):
self._parse_index_name()
if self._match('FOR'):
self._expect('TABLE')
self._parse_table_name()
parse_index_clause()
elif self._match('INDEXES'):
self._expect_sequence(['ALL', 'FOR', 'TABLE'])
self._parse_table_name()
parse_index_clause()
else:
self._expected_one_of(['TABLE', 'INDEX', 'INDEXES'])
if self._match_sequence(['ON', 'DATA', 'PARTITION']):
self._expect(TT.IDENTIFIER)
self._parse_db_partitions_clause()
def _parse_reorgchk_command(self):
"""Parses a REORGCHK command"""
# REORGCHK already matched
if self._match_one_of(['UPDATE', 'CURRENT']):
self._expect('STATISTICS')
if self._match('ON'):
if self._match('SCHEMA'):
self._expect(TT.IDENTIFIER)
elif self._match('TABLE'):
if not self._match_one_of(['SYSTEM', 'USER', 'ALL']):
self._parse_table_name()
else:
self._expected_one_of(['SCHEMA', 'TABLE'])
def _parse_reset_admin_cfg_command(self):
"""Parses a RESET ADMIN CFG command"""
# RESET ADMIN [CONFIGURATION|CONFIG|CFG] already matched
if self._match('FOR'):
self._expect('NODE')
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_reset_alert_cfg_command(self):
"""Parses a RESET ALERT CFG command"""
# RESET ALERT [CONFIGURATION|CONFIG|CFG] already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match_one_of(['DBM', 'CONTAINERS', 'DATABASES', 'TABLESPACES'])
):
pass
elif (
self._match_sequence(['CONTAINER', TT.IDENTIFIER, 'FOR', TT.IDENTIFIER])
or self._match_sequence('TABLESPACE', TT.IDENTIFIER)
or self._match('DATABASE')
):
self._expect('ON')
self._expect_clp_string()
if self._match('USING'):
self._expect_clp_string()
def _parse_reset_db_cfg_command(self):
"""Parses a RESET DB CFG command"""
# RESET [DATABASE|DB] [CONFIGURATION|CONFIG|CFG] already matched
self._expect('FOR')
self._expect_clp_string()
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
def _parse_reset_dbm_cfg_command(self):
"""Parses a RESET DBM CFG command"""
# RESET [DATABASE MANAGER|DB MANAGER|DBM] [CONFIGURATION|CONFIG|CFG] already matched
pass
def _parse_reset_monitor_command(self):
"""Parses a RESET MONITOR command"""
# RESET MONITOR already matched
if self._match('ALL'):
self._match('DCS')
elif self._match('FOR'):
self._match('DCS')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
else:
self._expected_one_of(['ALL', 'FOR'])
self._parse_db_partition_clause()
def _parse_restart_db_command(self):
"""Parses a RESTART DB command"""
# RESTART [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('DROP'):
self._expect_sequence(['PENDING', 'TABLESPACES'])
self._expect('(')
while True:
self._expect(TT.IDENTIFIER)
if not self._match(','):
break
self._expect(')')
self._match_sequence(['WRITE', 'RESUME'])
def _parse_restore_db_command(self):
"""Parses a RESTORE DB command"""
# RESTORE [DATABASE|DB] already matched
self._expect_clp_string()
if self._match_one_of(['CONTINUE', 'ABORT']):
pass
else:
self._parse_login(optional=True, allowchange=False)
if self._match('TABLESPACE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('SCHEMA'):
if self._match('('):
self._parse_ident_list()
self._expect(')')
self._match('ONLINE')
elif (
self._match_sequence(['HISTORY', 'FILE'])
or self._match_sequence(['COMPRESSION', 'LIBRARY'])
or self._match('LOGS')
):
self._match('ONLINE')
elif self._match('REBUILD'):
self._expect('WITH')
if self._match('ALL'):
self._expect_sequence(['TABLESPACES', 'IN'])
self._expect_one_of(['DATABASE', 'IMAGE'])
if self._match('EXCEPT'):
self._expect('TABLESPACE')
self._expect('(')
self._parse_ident_list()
self._expect(')')
else:
self._expect('TABLESPACE')
self._expect('(')
self._parse_ident_list()
self._expect(')')
if self._match('INCREMENTAL'):
self._match_one_of(['AUTO', 'AUTOMATIC', 'ABORT'])
if self._match('USE'):
self._match_one_of(['TSM', 'XBSA'])
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
elif self._match('FROM'):
self._parse_clp_string_list()
elif self._match('LOAD'):
self._expect_clp_string()
if self._match('OPEN'):
self._expect(TT.NUMBER)
self._expect('SESSIONS')
if self._match('OPTIONS'):
self._expect_clp_string()
# XXX Add support for @filename response file
if self._match('TAKEN'):
self._expect('AT')
self._expect(TT.NUMBER)
if self._match('TO'):
self._expect_clp_string()
elif self._match('DBPATH'):
self._expect('ON')
self._expect_clp_string()
elif self._match('ON'):
self._parse_clp_string_list()
if self._match('DBPATH'):
self._expect('ON')
self._expect_clp_string()
if self._match('INTO'):
self._expect_clp_string()
if self._match('LOGTARGET'):
if self._match_one_of(['INCLUDE', 'EXCLUDE']):
self._match('FORCE')
else:
self._expect_clp_string()
if self._match('NEWLOGPATH'):
self._expect_clp_string()
if self._match('WITH'):
self._expect(TT.NUMBER)
self._expect('BUFFERS')
if self._match('BUFFER'):
self._expect(TT.NUMBER)
self._match_sequence(['REPLACE', 'HISTORY', 'FILE'])
self._match_sequence(['REPLACE', 'EXISTING'])
if self._match('REDIRECT'):
if self._match('GENERATE'):
self._expect('SCRIPT')
self._expect_clp_string()
if self._match('PARALLELISM'):
self._expect(TT.NUMBER)
if self._match('COMPRLIB'):
self._expect_clp_string()
if self._match('COMPROPTS'):
self._expect_clp_string()
self._match_sequence(['WITHOUT', 'ROLLING', 'FORWARD'])
self._match_sequence(['WITHOUT', 'PROMPTING'])
def _parse_rewind_tape_command(self):
"""Parses a REWIND TAPE command"""
# REWIND TAPE already matched
if self._match('ON'):
self._expect_clp_string()
def _parse_rollforward_db_command(self):
"""Parses a ROLLFORWARD DB command"""
# ROLLFORWARD [DATABASE|DB] already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('TO'):
if self._match('END'):
self._expect('OF')
if self._expect_one_of(['LOGS', 'BACKUP']).value == 'BACKUP':
if self._match('ON'):
self._expect('ALL')
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
else:
self._parse_db_partitions_clause()
else:
self._expect(TT.NUMBER)
if self._match('ON'):
self._expect('ALL')
self._expect_one_of(['DBPARTITIONNUMS', 'NODES'])
if self._match('USING'):
self._expect_one_of(['UTC', 'LOCAL'])
self._expect('TIME')
if self._match('AND'):
self._expect_one_of(['COMPLETE', 'STOP'])
elif self._match_one_of(['COMPLETE', 'STOP', 'CANCEL']):
self._parse_db_partitions_clause()
elif self._match('QUERY'):
self._expect('STATUS')
if self._match('USING'):
self._expect_one_of(['UTC', 'LOCAL'])
self._expect('TIME')
self._parse_db_partitions_clause()
if self._match('TABLESPACE'):
if not self._match('ONLINE'):
self._expect('(')
self._parse_ident_list()
self._expect(')')
self._match('ONLINE')
if self._match('OVERFLOW'):
self._expect_sequence(['LOG', 'PATH'])
self._expect('(')
self._expect_clp_string()
if self._match(','):
while True:
self._expect_clp_string()
self._expect('ON')
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
self._match('NORETRIEVE')
if self._match('RECOVER'):
self._expect_sequence(['DROPPED', 'TABLE'])
self._expect_clp_string()
self._expect('TO')
self._expect_clp_string()
def _parse_runstats_command(self):
"""Parses a RUNSTATS command"""
def parse_index_options():
"""Parses the indexing clauses of a RUNSTATS command"""
# FOR/AND already matched
if self._match('SAMPLED'):
self._expect('DETAILED')
else:
self._match('DETAILED')
self._expect_one_of(['INDEX', 'INDEXES'])
if not self._match('ALL'):
while True:
self._parse_index_name()
if not self._match(','):
break
def parse_column_options(dist):
"""Parses column options clauses of a RUNSTATS command"""
# ON already matched
if (
self._match_sequence(['ALL', 'COLUMNS', 'AND', 'COLUMNS'])
or self._match_sequence(['KEY', 'COLUMNS', 'AND', 'COLUMNS'])
or self._match('COLUMNS')
):
self._expect('(')
while True:
if self._match('('):
self._parse_ident_list()
self._expect(')')
else:
self._expect(TT.IDENTIFIER)
if self._match('LIKE'):
self._expect('STATISTICS')
if dist:
while self._match_one_of(['NUM_FREQVALUES', 'NUM_QUANTILES']):
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
else:
self._expect_one_of(['ALL', 'KEY', 'COLUMNS'])
self._expect('COLUMNS')
# RUNSTATS already matched
self._expect_sequence(['ON', 'TABLE'])
self._parse_table_name()
if self._match_one_of(['USE', 'UNSET']):
self._expect('PROFILE')
else:
if self._match('FOR'):
parse_index_options()
self._match_sequence(['EXCLUDING', 'XML', 'COLUMNS'])
else:
if self._match('ON'):
parse_column_options(dist=False)
if self._match('WITH'):
self._expect('DISTRIBUTION')
if self._match('ON'):
parse_column_options(dist=True)
if self._match('DEFAULT'):
while self._match_one_of(['NUM_FREQVALUES', 'NUM_QUANTILES']):
self._expect(TT.NUMBER)
self._match_sequence(['EXCLUDING', 'XML', 'COLUMNS'])
if self._match('AND'):
parse_index_options()
if self._match('ALLOW'):
self._expect_one_of(['READ', 'WRITE'])
self._expect('ACCESS')
if self._match('TABLESAMPLE'):
self._expect_one_of(['SYSTEM', 'BERNOULLI'])
self._expect('(')
self._expect(TT.NUMBER)
self._expect(')')
if self._match('REPEATABLE'):
self._expect('(')
self._expect(TT.NUMBER)
self._expect(')')
if self._match('SET'):
self._expect('PROFILE')
self._match_one_of(['NONE', 'ONLY'])
elif self._match('UPDATE'):
self._expect('PROFILE')
self._match('ONLY')
if self._match('UTIL_IMPACT_PRIORITY'):
self._match(TT.NUMBER)
def _parse_set_client_command(self):
"""Parses a SET CLIENT command"""
# SET CLIENT already matched
if self._match('CONNECT'):
self._expect(TT.NUMBER)
if self._match('DISCONNECT'):
self._expect_one_of(['EXPLICIT', 'CONDITIONAL', 'AUTOMATIC'])
if self._match('SQLRULES'):
self._expect_one_of(['DB2', 'STD'])
if self._match('SYNCPOINT'):
self._expect_one_of(['ONEPHASE', 'TWOPHASE', 'NONE'])
if self._match('CONNECT_DBPARTITIONNUM'):
self._expect_one_of(['CATALOG_DBPARTITIONNUM', TT.NUMBER])
if self._match('ATTACH_DBPARTITIONNUM'):
self._expect(TT.NUMBER)
def _parse_set_runtime_degree_command(self):
"""Parses a SET RUNTIME DEGREE command"""
# SET RUNTIME DEGREE already matched
self._expect('FOR')
if not self._match('ALL'):
self._expect('(')
while True:
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
self._expect('TO')
self._expect(TT.NUMBER)
def _parse_set_serveroutput_command(self):
"""Parses a SET SERVEROUTPUT command"""
# SET SERVEROUTPUT already matched
self._expect_one_of(['OFF', 'ON'])
def _parse_set_tablespace_containers_command(self):
"""Parses a SET TABLESPACE CONTAINERS command"""
# SET TABLESPACE CONTAINERS already matched
self._expect('FOR')
self._expect(TT.NUMBER)
if self._match_one_of(['REPLAY', 'IGNORE']):
self._expect_sequence(['ROLLFORWARD', 'CONTAINER', 'OPERATIONS'])
self._expect('USING')
if not self._match_sequence(['AUTOMATIC', 'STORAGE']):
self._expect('(')
while True:
if self._expect_one_of(['FILE', 'DEVICE', 'PATH']).value == 'PATH':
self._expect_clp_string()
else:
self._expect_clp_string()
self._expect(TT.NUMBER)
if not self._match(','):
break
self._expect(')')
def _parse_set_tape_position_command(self):
"""Parses a SET TAPE POSITION command"""
# SET TAPE POSITION already matched
if self._match('ON'):
self._expect_clp_string()
self._expect('TO')
self._expect(TT.NUMBER)
def _parse_set_util_impact_priority_command(self):
"""Parses a SET UTIL_IMPACT_PRIORITY command"""
# SET UTIL_IMPACT_PRIORITY already matched
self._expect('FOR')
self._expect(TT.NUMBER)
self._expect('TO')
self._expect(TT.NUMBER)
def _parse_set_workload_command(self):
"""Parses a SET WORKLOAD command"""
# SET WORKLOAD already matched
self._expect('TO')
self._expect_one_of(['AUTOMATIC', 'SYSDEFAULTADMWORKLOAD'])
def _parse_set_write_command(self):
"""Parses a SET WRITE command"""
# SET WRITE already matched
self._expect_one_of(['SUSPEND', 'RESUME'])
self._expect('FOR')
self._expect_one_of(['DATABASE', 'DB'])
def _parse_start_dbm_command(self):
"""Parses a START DBM command"""
# START [DATABASE MANAGER|DB MANAGER|DBM] already matched
if self._match('REMOTE'):
self._match('INSTANCE')
self._expect_clp_string()
self._expect_one_of(['ADMINNODE', 'HOSTNAME'])
self._expect_clp_string()
self._parse_login(optional=False, allowchange=False)
if self._match('ADMIN'):
self._expect('MODE')
if self._match_one_of(['USER', 'GROUP']):
self._expect(TT.IDENTIFIER)
self._match_sequence(['RESTRICTED', 'ACCESS'])
if self._match('PROFILE'):
self._expect_clp_string()
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
if self._match('ADD'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect('HOSTNAME')
self._expect_clp_string()
self._expect('PORT')
self._expect(TT.NUMBER)
if self._match('COMPUTER'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('NETNAME'):
self._expect_clp_string()
if self._match('LIKE'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
elif self._match('WITHOUT'):
self._expect('TABLESPACES')
elif self._match('RESTART'):
if self._match('HOSTNAME'):
self._expect_clp_string()
if self._match('PORT'):
self._expect(TT.NUMBER)
if self._match('COMPUTER'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match('NETNAME'):
self._expect_clp_string()
elif self._match('STANDALONE'):
pass
def _parse_start_hadr_command(self):
"""Parses a START HADR command"""
# START HADR already matched
self._expect('ON')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
self._expect('AS')
if self._expect_one_of(['PRIMARY', 'STANDBY']).value == 'PRIMARY':
self._match_sequence(['BY', 'FORCE'])
def _parse_stop_dbm_command(self):
"""Parses a STOP DBM command"""
# STOP [DATABASE MANAGER|DB MANAGER|DBM] already matched
if self._match('PROFILE'):
self._expect_clp_string()
if self._match('DROP'):
self._expect_one_of(['DBPARTITIONNUM', 'NODE'])
self._expect(TT.NUMBER)
else:
self._match('FORCE')
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
def _parse_stop_hadr_command(self):
"""Parses a STOP HADR command"""
# STOP HADR already matched
self._expect('ON')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_takeover_hadr_command(self):
"""Parses a TAKEOVER HADR command"""
# TAKEOVER HADR already matched
self._expect('ON')
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
if self._match_sequence(['BY', 'FORCE']):
self._match_sequence(['PEER', 'WINDOW', 'ONLY'])
def _parse_terminate_command(self):
"""Parses a TERMINATE command"""
# TERMINATE already matched
pass
def _parse_uncatalog_command(self):
"""Parses an UNCATALOG command"""
if self._match_one_of(['DATABASE', 'DB', 'NODE']):
self._expect_clp_string()
elif self._match('DCS'):
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
elif self._match('LDAP'):
self._expect_one_of(['DATABASE', 'DB', 'NODE'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
elif self._match_one_of(['USER', 'SYSTEM']):
self._expect_sequence(['ODBC', 'DATA', 'SOURCE'])
self._expect_clp_string()
elif self._match('ODBC'):
self._expect_sequence(['DATA', 'SOURCE'])
self._expect_clp_string()
else:
self._expected_one_of([
'DATABASE',
'DB',
'NODE',
'DCS',
'LDAP',
'USER',
'SYSTEM',
'ODBC',
])
def _parse_unquiesce_command(self):
"""Parses an UNQUIESCE command"""
# UNQUIESCE already matched
if self._match('INSTANCE'):
self._expect_clp_string()
elif self._match_one_of(['DATABASE', 'DB']):
pass
else:
self._expected_one_of(['DATABASE', 'DB', 'INSTANCE'])
def _parse_update_admin_cfg_command(self):
"""Parses an UPDATE ADMIN CFG command"""
# UPDATE ADMIN CONFIGURATION|CONFIG|CFG already matched
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
if self._peek_one_of(['FOR', TT.TERMINATOR, TT.EOF]):
break
if self._match_sequence(['FOR', 'NODE']):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_update_alert_cfg_command(self):
"""Parses an UPDATE ALERT CFG command"""
# UPDATE ALERT CONFIGURATION|CONFIG|CFG already matched
self._expect('FOR')
if (
self._match_sequence(['DATABASE', 'MANAGER'])
or self._match_sequence(['DB', 'MANAGER'])
or self._match_one_of(['DBM', 'CONTAINERS', 'DATABASES', 'TABLESPACES'])
):
pass
elif (
self._match_sequence(['CONTAINER', TT.IDENTIFIER, 'FOR', TT.IDENTIFIER])
or self._match_sequence('TABLESPACE', TT.IDENTIFIER)
or self._match('DATABASE')
):
self._expect('ON')
self._expect_clp_string()
if self._match('USING'):
self._expect_clp_string()
if self._match('SET'):
while True:
self._expect_one_of(['ALARM', 'WARNING', 'SENSITIVITY', 'ACTIONSENABLED', 'THRESHOLDSCHECKED'])
self._expect_one_of([TT.NUMBER, 'YES', 'NO'])
if not self._match(','):
break
elif self._match('ADD'):
while True:
self._expect_one_of(['SCRIPT', 'TASK'])
self._expect_clp_string()
self._expect('TYPE')
if self._match('DB2'):
if (
self._match_sequence(['STATEMENT', 'TERMINATION', 'CHARACTER'])
or self._match_sequence(['STMT', 'TERM', 'CHAR'])
or self._match_sequence(['TERM', 'CHAR'])
):
self._expect_clp_string()
elif self._match_sequence(['OPERATING', 'SYSTEM']) or self._match('OS'):
if (
self._match_sequence(['COMMAND', 'LINE', 'PARAMETERS'])
or self._match('PARMS')
):
self._expect_clp_string()
else:
self._expected_one_of(['DB2', 'OS', 'OPERATING'])
self._expect_sequence(['WORKING', 'DIRECTORY'])
self._expect_clp_string()
self._expect('ON')
if self._expect_one_of(['WARNING', 'ALARM', 'ALLALERT', 'ATTENTION']).value == 'ATTENTION':
self._expect(TT.NUMBER)
if self._match('ON'):
self._expect_clp_string()
self._parse_login(optional=False, allowchange=False)
if not self._match(','):
break
else:
if self._expect_one_of(['SET', 'ADD', 'UPDATE', 'DELETE']).value == 'UPDATE':
update = True
self._expect('ACTION')
while True:
self._expect_one_of(['SCRIPT', 'TASK'])
self._expect_clp_string()
self._expect('ON')
if self._expect_one_of(['WARNING', 'ALARM', 'ALLALERT', 'ATTENTION']).value == 'ATTENTION':
self._expect(TT.NUMBER)
if update:
while True:
self._expect('SET')
self._expect_one_of(['ALARM', 'WARNING', 'SENSITIVITY', 'ACTIONSENABLED', 'THRESHOLDSCHECKED'])
self._expect_one_of([TT.NUMBER, 'YES', 'NO'])
if not self._match(','):
break
if not self._match(','):
break
def _parse_update_alternate_server_command(self):
"""Parses an UPDATE ALTERNATE SERVER command"""
# UPDATE ALTERNATE SERVER already matched
self._expect('FOR')
if self._expect_one_of(['LDAP', 'DATABASE', 'DB']).value == 'LDAP':
self._expect_one_of(['DATABASE', 'DB'])
self._expect_clp_string()
self._expect('USING')
self._expect_one_of(['NODE', 'GWNODE'])
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
else:
self._expect_clp_string()
self._expect_sequence(['USING', 'HOSTNAME'])
self._expect_clp_string()
self._expect('PORT')
self._expect_clp_string()
def _parse_update_cli_cfg_command(self):
"""Parses an UPDATE CLI CFG command"""
# UPDATE CLI CONFIGURATION|CONFIG|CFG already matched
if self._match('AT'):
self._expect_one_of(['GLOBAL', 'USER'])
self._expect('LEVEL')
self._expect_sequence(['FOR', 'SECTION'])
self._expect_clp_string()
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
if self._peek_one_of([TT.TERMINATOR, TT.EOF]):
break
def _parse_update_command_options_command(self):
"""Parses an UPDATE COMMAND OPTIONS command"""
# UPDATE COMMAND OPTIONS already matched
self._expect('USING')
while True:
option = self._expect_one_of([
'A', 'C', 'D', 'E', 'I', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'V', 'W', 'Z',
]).value
value = self._expect_one_of(['ON', 'OFF']).value
if option in ('E', 'L', 'R', 'Z') and value == 'ON':
self._expect_clp_string()
if self._peek_one_of([TT.TERMINATOR, TT.EOF]):
break
def _parse_update_contact_command(self):
"""Parses an UPDATE CONTACT command"""
# UPDATE CONTACT already matched
self._expect_clp_string()
self._expect('USING')
while True:
if self._match('ADDRESS'):
self._expect_clp_string()
elif self._match('TYPE'):
self._expect_one_of(['EMAIL', 'PAGE'])
elif self._match('MAXPAGELEN'):
self._expect(TT.NUMBER)
elif self._match('DESCRIPTION'):
self._expect_clp_string()
else:
self._expected_one_of(['ADDRESS', 'TYPE', 'MAXPAGELEN', 'DESCRIPTION'])
if not self._match(','):
break
def _parse_update_contactgroup_command(self):
"""Parses an UPDATE CONTACTGROUP command"""
# UPDATE CONTACTGROUP already matched
self._expect_clp_string()
self._expect('(')
while True:
self._expect_one_of(['ADD', 'DROP'])
self._expect_one_of(['CONTACT', 'GROUP'])
self._expect_clp_string()
if not self._match(','):
break
self._expect(')')
if self._match('DESCRIPTION'):
self._expect_clp_string()
def _parse_update_db_cfg_command(self):
"""Parses an UPDATE DB CFG command"""
# UPDATE DATABASE|DB CONFIGURATION|CONFIG|CFG already matched
if self._match('FOR'):
self._expect_clp_string()
if self._match_one_of(['DBPARTITIONNUM', 'NODE']):
self._expect(TT.NUMBER)
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
if self._match_one_of(['AUTOMATIC', 'MANUAL']):
pass
else:
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
self._match('AUTOMATIC')
if self._peek_one_of(['IMMEDIATE', 'DEFERRED', TT.TERMINATOR, TT.EOF]):
break
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
def _parse_update_dbm_cfg_command(self):
"""Parses an UPDATE DBM CFG command"""
# UPDATE DATABASE MANAGER|DB MANAGER|DBM CONFIGURATION|CONFIG|CFG already matched
self._expect('USING')
while True:
self._expect(TT.IDENTIFIER)
if self._match_one_of(['AUTOMATIC', 'MANUAL']):
pass
else:
self._expect_one_of([TT.NUMBER, TT.STRING, TT.IDENTIFIER])
self._match('AUTOMATIC')
if self._peek_one_of(['IMMEDIATE', 'DEFERRED', TT.TERMINATOR, TT.EOF]):
break
self._match_one_of(['IMMEDIATE', 'DEFERRED'])
def _parse_update_notification_list_command(self):
"""Parses an UPDATE NOTIFICATION LIST command"""
# UPDATE [HEALTH] NOTIFICATION [CONTACT] LIST already matched
first = True
while True:
if not self._match_one_of(['ADD', 'DROP']):
if not first:
break
else:
self._expected_one_of(['ADD', 'DROP'])
first = False
self._expect_one_of(['CONTACT', 'GROUP'])
self._expect_clp_string()
def _parse_update_history_command(self):
"""Parses an UPDATE HISTORY command"""
# UPDATE HISTORY already matched
self._expect_one_of(['FOR', 'EID'])
self._expect(TT.NUMBER)
self._expect('WITH')
if self._match('LOCATION'):
self._expect_clp_string()
self._expect_sequence(['DEVICE', 'TYPE'])
self._expect_one_of(['D', 'K', 'T', 'A', 'F', 'U', 'P', 'N', 'X', 'Q', 'O'])
elif self._match('COMMENT'):
self._expect_clp_string()
elif self._match('STATUS'):
self._expect_one_of(['A', 'I', 'E', 'D', 'X'])
else:
self._expected_one_of(['LOCATION', 'COMMENT', 'STATUS'])
def _parse_update_ldap_node_command(self):
"""Parses an UPDATE LDAP NODE command"""
# UPDATE LDAP NODE already matched
self._expect_clp_string()
if self._match('HOSTNAME'):
self._expect_clp_string()
if self._match('SVCENAME'):
self._expect_clp_string()
if self._match('WITH'):
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
def _parse_update_monitor_switches_command(self):
"""Parses an UPDATE MONITOR SWITCHES command"""
# UPDATE MONITOR SWITCHES already matched
self._expect('USING')
first = True
while True:
if not self._match_one_of(['BUFFERPOOL', 'LOCK', 'SORT', 'STATEMENT', 'TABLE', 'TIMESTAMP', 'UOW']):
if not first:
break
else:
self._expected_one_of(['BUFFERPOOL', 'LOCK', 'SORT', 'STATEMENT', 'TABLE', 'TIMESTAMP', 'UOW'])
first = False
self._expect_one_of(['OFF', 'ON'])
self._parse_db_partition_clause()
def _parse_update_xmlschema_command(self):
"""Parses an UPDATE XMLSCHEMA command"""
# UPDATE XMLSCHEMA already matched
self._parse_subschema_name()
self._expect('WITH')
self._parse_subschema_name()
self._match_sequence(['DROP', 'NEW', 'SCHEMA'])
def _parse_upgrade_db_command(self):
"""Parses an UPGRADE DB command"""
# UPGRADE DATABASE|DB already matched
self._expect_clp_string()
self._parse_login(optional=True, allowchange=False)
# COMPOUND COMMANDS ######################################################
def _parse_command(self):
"""Parses a top-level CLP command in a DB2 script"""
# Ambiguity: Some CLP commands start with the same keywords as SQL
# statements (e.g. CREATE DATABASE and CREATE DATABASE PARTITION
# GROUP). Attempt to parse the statement as a CLP statement, rewind
# and try to parse as an SQL command if that fails. This is one reason
# for the message "The command was processed as an SQL statement
# because it was not a valid Command Line Processor command" in DB2;
# there are two very different and separate parsers, one for CLP which
# tries to parse a command first, which defers to the secondary SQL
# parser if it fails.
self._save_state()
try:
if self._match('ACTIVATE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_activate_database_command()
elif self._match('ATTACH'):
self._parse_attach_command()
elif self._match('AUTOCONFIGURE'):
self._parse_autoconfigure_command()
elif self._match('BACKUP'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_backup_command()
elif self._match('CATALOG'):
self._parse_catalog_command()
elif self._match('CONNECT'):
self._parse_connect_command()
elif self._match('CREATE'):
if self._match_one_of(['DATABASE', 'DB']):
if self._match('PARTITION'):
raise ParseBacktrack()
self._parse_create_database_command()
elif self._match('TOOLS'):
self._expect('CATALOG')
self._parse_create_tools_catalog_command()
else:
raise ParseBacktrack()
elif self._match('DEACTIVATE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_deactivate_database_command()
elif self._match('DETACH'):
self._parse_detach_command()
elif self._match('DISCONNECT'):
self._parse_disconnect_command()
elif self._match('DROP'):
if self._match_one_of(['DATABASE', 'DB']):
self._parse_drop_database_command()
elif self._match('TOOLS'):
self._expect('CATALOG')
self._parse_drop_tools_catalog_command()
else:
raise ParseBacktrack()
elif self._match('ECHO'):
self._parse_echo_command()
elif self._match('EXPORT'):
self._parse_export_command()
elif self._match('FORCE'):
self._expect('APPLICATION')
self._parse_force_application_command()
elif self._match('GET'):
if self._match('ADMIN'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_get_admin_cfg_command()
elif self._match('ALERT'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_get_alert_cfg_command()
elif self._match('CLI'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_get_cli_cfg_command()
elif self._match('CONNECTION'):
self._expect('STATE')
self._parse_get_connection_state_command()
elif self._match('CONTACTGROUP'):
self._parse_get_contactgroup_command()
elif self._match('CONTACTGROUPS'):
self._parse_get_contactgroups_command()
elif self._match('CONTACTS'):
self._parse_get_contacts_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match_one_of(['CONFIGURATION', 'CONFIG', 'CFG']):
self._parse_get_db_cfg_command()
elif self._match('MANAGER'):
if self._match_one_of(['CONFIGURATION', 'CONFIG', 'CFG']):
self._parse_get_dbm_cfg_command()
elif self._match_sequence(['MONITOR', 'SWITCHES']):
self._parse_get_dbm_monitor_switches_command()
else:
self._expected_one_of(['CONFIGURATION', 'CONFIG', 'CFG', 'MONITOR'])
elif self._match('DBM'):
if self._match_one_of(['CONFIGURATION', 'CONFIG', 'CFG']):
self._parse_get_dbm_cfg_command()
elif self._match_sequence(['MONITOR', 'SWITCHES']):
self._parse_get_dbm_monitor_switches_command()
else:
self._expected_one_of(['CONFIGURATION', 'CONFIG', 'CFG', 'MONITOR'])
elif self._match('DESCRIPTION'):
self._expect_sequence(['FOR', 'HEALTH', 'INDICATOR'])
self._parse_get_description_for_health_indicator_command()
elif self._match('HEALTH'):
if self._match('NOTIFICATION'):
self._expect_sequence(['CONTACT', 'LIST'])
self._parse_get_notification_list_command()
elif self._match('SNAPSHOT'):
self._parse_get_health_snapshot_command()
else:
self._expected_one_of(['NOTIFICATION', 'SNAPSHOT'])
elif self._match('INSTANCE'):
self._parse_get_instance_command()
elif self._match('MONITOR'):
self._expect('SWITCHES')
self._parse_get_monitor_switches_command()
elif self._match('NOTIFICATION'):
self._expect('LIST')
self._parse_get_notification_list_command()
elif self._match('RECOMMENDATIONS'):
self._expect_sequence(['FOR', 'HEALTH', 'INDICATOR'])
self._parse_get_recommendations_for_health_indicator_command()
elif self._match('ROUTINE'):
self._parse_get_routine_command()
elif self._match('SNAPSHOT'):
self._parse_get_snapshot_command()
else:
raise ParseBacktrack()
elif self._match('IMPORT'):
self._parse_import_command()
elif self._match('INITIALIZE'):
self._expect('TAPE')
self._parse_initialize_tape_command()
elif self._match('INSPECT'):
self._parse_inspect_command()
elif self._match('INSTANCE'):
self._parse_instance_command()
elif self._match('LIST'):
if self._match('ACTIVE'):
self._expect('DATABASES')
self._parse_list_active_databases_command()
elif self._match('ADMIN'):
self._expect_sequence(['NODE', 'DIRECTORY'])
self._parse_list_node_directory_command()
elif self._match('APPLICATIONS'):
self._parse_list_applications_command()
elif self._match('COMMAND'):
self._expect('OPTIONS')
self._parse_list_command_options_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match('DIRECTORY'):
self._parse_list_db_directory_command()
elif self._match('PARTITION'):
self._expect('GROUPS')
self._parse_list_database_partition_groups_command()
else:
self._expected_one_of(['DIRECTORY', 'PARTITION'])
elif self._match_one_of(['DBPARTITIONNUMS', 'NODES']):
self._parse_list_nodes_command()
elif self._match('DCS'):
if self._match('APPLICATIONS'):
self._parse_list_dcs_applications_command()
elif self._match('DIRECTORY'):
self._parse_list_dcs_directory_command()
else:
self._expected_one_of(['APPLICATIONS', 'DIRECTORY'])
elif self._match('DRDA'):
self._expect_sequence(['INDOUBT', 'TRANSACTIONS'])
self._parse_list_drda_indoubt_transactions_command()
elif self._match('HISTORY'):
self._parse_list_history_command()
elif self._match('INDOUBT'):
self._expect('TRANSACTIONS')
self._parse_list_indoubt_transactions_command()
elif self._match('NODE'):
self._expect('DIRECTORY')
self._parse_list_node_directory_command()
elif self._match_one_of(['USER', 'SYSTEM']):
self._expect_sequence(['ODBC', 'DATA', 'SOURCES'])
self._parse_list_odbc_data_sources_command()
elif self._match('ODBC'):
self._expect_sequence(['DATA', 'SOURCES'])
self._parse_list_odbc_data_sources_command()
elif self._match_one_of(['PACKAGES', 'TABLES']):
self._parse_list_tables_command(self)
elif self._match('TABLESPACES'):
if self._match('CONTAINERS'):
self._parse_list_tablespace_containers_command()
else:
self._parse_list_tablespaces_command()
elif self._match('UTILITIES'):
self._parse_list_utilities_command()
else:
self._expected_one_of([
'ACTIVE',
'ADMIN',
'APPLICATIONS',
'COMMAND',
'DATABASE',
'DB',
'DBPARTITIONNUMS',
'DCS',
'DRDA',
'HISTORY',
'INDOUBT',
'NODE',
'NODES',
'ODBC',
'PACKAGES',
'SYSTEM',
'TABLES',
'TABLESPACES',
'USER',
'UTILITIES',
])
elif self._match('LOAD'):
if self._match('QUERY'):
self._parse_load_query_command()
else:
self._parse_load_command()
elif self._match('MIGRATE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_migrate_db_command()
elif self._match('ON'):
self._parse_on_command()
elif self._match('PING'):
self._parse_ping_command()
elif self._match_one_of(['PRECOMPILE', 'PREP']):
self._parse_precompile_command()
elif self._match('PRUNE'):
if self._match('HISTORY'):
self._parse_prune_history_command()
elif self._match('LOGFILE'):
self._parse_prune_logfile_command()
else:
self._expected_one_of(['HISTORY', 'LOGFILE'])
elif self._match('PUT'):
self._expect('ROUTINE')
self._parse_put_routine_command()
elif self._match('QUERY'):
self._expect('CLIENT')
self._parse_query_client_command()
elif self._match('QUIESCE'):
if self._match('TABLESPACES'):
self._parse_quiesce_tablespaces_command()
else:
self._parse_quiesce_command()
elif self._match('QUIT'):
self._parse_quit_command()
elif self._match('REBIND'):
self._parse_rebind_command()
elif self._match('RECOVER'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_recover_db_command()
elif self._match('REDISTRIBUTE'):
self._expect_sequence(['DATABASE', 'PARTITION', 'GROUP'])
self._parse_redistribute_database_partition_group_command()
elif self._match('REFRESH'):
if self._match('LDAP'):
self._parse_refresh_ldap_command()
else:
raise ParseBacktrack()
elif self._match('REGISTER'):
if self._match('XMLSCHEMA'):
self._parse_register_xmlschema_command()
elif self._match('XSROBJECT'):
self._parse_register_xsrobject_command()
else:
self._parse_register_command()
elif self._match('REORG'):
self._parse_reorg_command()
elif self._match('REORGCHK'):
self._parse_reorgchk_command()
elif self._match('RESET'):
if self._match('ADMIN'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_admin_cfg_command()
elif self._match('ALERT'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_alert_cfg_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match('MANAGER'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_dbm_cfg_command()
else:
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_db_cfg_command()
elif self._match('DBM'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_reset_dbm_cfg_command()
elif self._match('MONITOR'):
self._parse_reset_monitor_command()
else:
self._expected_one_of([
'ADMIN',
'ALERT',
'DATABASE',
'DB',
'DBM',
'MONITOR',
])
elif self._match('RESTART'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_restart_db_command()
elif self._match('RESTORE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_restore_db_command()
elif self._match('REWIND'):
self._expect('TAPE')
self._parse_rewind_tape_command()
elif self._match('ROLLFORWARD'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_rollforward_db_command()
elif self._match('RUNSTATS'):
self._parse_runstats_command()
elif self._match('SET'):
if self._match('CLIENT'):
self._parse_set_client_command()
elif self._match('RUNTIME'):
self._expect('DEGREE')
self._parse_set_runtime_degree_command()
elif self._match('SERVEROUTPUT'):
self._parse_set_serveroutput_command()
elif self._match('TABLESPACE'):
self._expect('CONTAINERS')
self._parse_set_tablespace_containers_command()
elif self._match('TAPE'):
self._expect('POSITION')
self._parse_set_tape_position_command()
elif self._match('UTIL_IMPACT_PRIORITY'):
self._parse_set_util_impact_priority_command()
elif self._match('WORKLOAD'):
self._parse_set_workload_command()
elif self._match('WRITE'):
self._parse_set_write_command()
else:
raise ParseBacktrack()
elif self._match('START'):
if self._match('HADR'):
self._parse_start_hadr_command()
elif self._match_one_of(['DATABASE', 'DB']):
self._expect('MANAGER')
self._parse_start_dbm_command()
elif self._match('DBM'):
self._parse_start_dbm_command()
else:
self._expected_one_of(['HADR', 'DATABASE', 'DB', 'DBM'])
elif self._match('STOP'):
if self._match('HADR'):
self._parse_stop_hadr_command()
elif self._match_one_of(['DATABASE', 'DB']):
self._expect('MANAGER')
self._parse_stop_dbm_command()
elif self._match('DBM'):
self._parse_stop_dbm_command()
else:
self._expected_one_of(['HADR', 'DATABASE', 'DB', 'DBM'])
elif self._match('TAKEOVER'):
self._parse_takeover_hadr_command()
elif self._match('TERMINATE'):
self._parse_terminate_command()
elif self._match('UNCATALOG'):
self._parse_uncatalog_command()
elif self._match('UNQUIESCE'):
self._parse_unquiesce_command()
elif self._match('UPDATE'):
if self._match('ADMIN'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_admin_cfg_command()
elif self._match('ALERT'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_alert_cfg_command()
elif self._match_sequence(['ALTERNATE', 'SERVER']):
self._parse_update_alternate_server_command()
elif self._match('CLI'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_cli_cfg_command()
elif self._match_sequence(['COMMAND', 'OPTIONS']):
self._parse_update_command_options_command()
elif self._match('CONTACT'):
self._parse_update_contact_command()
elif self._match('CONTACTGROUP'):
self._parse_update_contactgroup_command()
elif self._match_one_of(['DATABASE', 'DB']):
if self._match('MANAGER'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_dbm_cfg_command()
else:
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_db_cfg_command()
elif self._match('DBM'):
self._expect_one_of(['CONFIGURATION', 'CONFIG', 'CFG'])
self._parse_update_dbm_cfg_command()
elif (
self._match_sequence(['HEALTH', 'NOTIFICATION', 'CONTACT', 'LIST'])
or self._match_sequence(['NOTIFICATION', 'LIST'])
):
self._parse_update_notification_list_command()
elif self._match('HISTORY'):
self._parse_update_history_command()
elif self._match_sequence(['LDAP', 'NODE']):
self._parse_update_ldap_node_command()
elif self._match_sequence(['MONITOR', 'SWITCHES']):
self._parse_update_monitor_switches_command()
elif self._match('XMLSCHEMA'):
self._parse_update_xmlschema_command()
else:
raise ParseBacktrack()
elif self._match('UPGRADE'):
self._expect_one_of(['DATABASE', 'DB'])
self._parse_upgrade_db_command()
else:
raise ParseBacktrack()
except ParseBacktrack:
self._restore_state()
self._parse_statement()
else:
self._forget_state()
def _parse_top(self):
# Override _parse_top to make a CLP command the top of the parse tree
self._parse_command()
def _parse_init(self, tokens):
# Override _parse_init to set up the output lists (produces, consumes,
# etc.)
super(DB2ZOSScriptParser, self)._parse_init(tokens)
self.produces = []
self.consumes = []
self.connections = []
self.current_connection = None
self.current_user = None
def _save_state(self):
# Override _save_state to save the state of the output lists (produces,
# consumes, etc.)
self._states.append((
self._index,
self._level,
len(self._output),
self.current_schema,
self.current_user,
self.current_connection,
len(self.produces),
len(self.consumes),
len(self.connections),
))
def _restore_state(self):
# Override _restore_state to restore the state of the output lists
# (produces, consumes, etc.)
(
self._index,
self._level,
output_len,
self.current_schema,
self.current_user,
self.current_connection,
produces_len,
consumes_len,
logins_len,
) = self._states.pop()
del self.produces[produces_len:]
del self.consumes[consumes_len:]
del self.connections[logins_len:]
del self._output[output_len:]
| gpl-3.0 | 2,388,590,240,867,676,700 | 39.435933 | 143 | 0.47767 | false | 4.567151 | false | false | false |
jp-security/LeagueStats | app/auth/forms.py | 1 | 1957 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, ValidationError, SelectField, IntegerField, DecimalField
from wtforms.validators import Required, Email, Length, Regexp, EqualTo, NumberRange
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[Required(), EqualTo('password2', message='Password must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Usernmae already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old Password', validators=[Required()])
password = PasswordField('New Password', validators=[Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Updated Password')
| gpl-3.0 | -6,136,674,587,046,894,000 | 56.558824 | 131 | 0.662749 | false | 4.681818 | false | false | false |
hilarry/cmdb | cmdb/settings.py | 1 | 2308 | """
Django settings for cmdb project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*f)1h7v-ed7bajus^ykj0fe5n*#ld57m@4ca=a3!%v%3@o_7p#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'bootstrap_admin',
#'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'device_manage',
'idcroom_manage',
'operation',
#'bootstrap3',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cmdb.urls'
WSGI_APPLICATION = 'cmdb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cmdb',
'USER': 'cmdb',
'PASSWORD': 'cmdb',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 | 5,920,079,684,958,420,000 | 22.313131 | 71 | 0.69974 | false | 3.283073 | false | false | false |
recipy/recipy | integration_test/test_packages.py | 1 | 18672 | """
recipy test case runner.
Run tests to check that recipy logs information on input and output
functions invoked by scripts which use packages that recipy has been
configured to log.
Tests are specified using a [YAML](http://yaml.org/) (YAML Ain't
Markup Language) configuration file. YAML syntax is:
* `---` indicates the start of a document.
* `:` denotes a dictionary. `:` must be followed by a space.
* `-` denotes a list.
The test configuration file has format:
---
script: SCRIPT
[standalone: True|False]
libraries: [LIBRARY, LIBRARY, ... ]
test_cases:
- libraries: [LIBRARY, LIBRARY, ... ]
arguments: [..., ..., ...]
inputs: [INPUT, INPUT, ...]
outputs: [OUTPUT, OUTPUT, ...]
- libraries: [LIBRARY, LIBRARY, ... ]
arguments: [..., ..., ...]
inputs: [INPUT, INPUT, ...]
outputs: [OUTPUT, OUTPUT, ...]
[ skip: "Known issue with recipy" ]
[ skip_py_version: [3.4, ...] ]
- ...
---
script: SCRIPT
...
where each script to be tested is defined by:
* 'SCRIPT': script, with a relative or absolute path. For recipy
sample scripts, the script is assumed in a sub-directory
"integration_test/packages".
* 'standalone': is the script a standalone script? If "False", or if
omitted, then the script is assumed to be a recipy sample script,
runnable via the command 'python -m
integration_test.packages.<script>'.
* 'libraries': A list of zero or more libraries used by the script,
which are expected to be logged by recipy when the script is
run regardless of arguments (i.e. any libraries common to all test
cases). If none, then this can be omitted.
* One or more test cases, each of which defines:
- 'libraries': A list of zero or more libraries used by the script,
which are expected to be logged by recipy when the script is
run with the given arguments. If none, then this can be
omitted.
- 'arguments': A list of arguments to be passed to the script. If
none, then this can be omitted.
- 'inputs': A list of zero or more input files which the script will
read, and which are expected to be logged by recipy when running
the script with the arguments. If none, then this can be omitted.
- 'outputs': A list of zero or more output files which the script
will write, and which are expected to be logged by recipy when
running the script with the arguments. If none, then this can be
omitted.
- 'skip': An optional value. If present this test case is marked as
skipped. The value is the reason for skipping the test case.
- 'skip_py_version': An optional value. If present this test case is marked
as skipped if the current Python version is in the list of values. Should
be used when a patched library does not support a Python version that is
supported by recipy.
For example:
---
script: run_numpy.py
libraries: [numpy]
test_cases:
- arguments: [loadtxt]
inputs: [input.csv]
- arguments: [savetxt]
outputs: [output.csv]
- arguments: [load_and_save_txt]
inputs: [input.csv]
outputs: [output.csv]
---
script: "/home/users/user/run_my_script.py"
standalone: True
test_cases:
- arguments: [ ]
libraries: [ numpy ]
outputs: [ data.csv ]
It is up to the developer to ensure the 'libraries', 'input' and
'output' lists correctly record the libraries, input and output files
that it is expected recipy will log when the script is run with the
given arguments.
The test configuration file is provided via an environment variable,
'RECIPY_TEST_CONFIG'. If undefined, then a default of
'integration_test/config/test_packages.yml' is assumed.
"""
import os
import os.path
import sys
import pytest
from integration_test.database import DatabaseError
from integration_test import environment
from integration_test.file_utils import load_yaml
from integration_test import helpers
from integration_test import recipy_environment as recipyenv
SCRIPT = "script"
""" Test case configuration key. """
STANDALONE = "standalone"
""" Test case configuration key. """
TEST_CASES = "test_cases"
""" Test case configuration key. """
LIBRARIES = "libraries"
""" Test case configuration key. """
ARGUMENTS = "arguments"
""" Test case configuration key. """
INPUTS = "inputs"
""" Test case configuration key. """
OUTPUTS = "outputs"
""" Test case configuration key. """
SKIP = "skip"
""" Test case configuration key. """
SKIP_PY_VERSION = "skip_py_version"
""" Test case configuration key. """
TEST_CONFIG_ENV = "RECIPY_TEST_CONFIG"
""" Environment variable for recipy test configuration file name """
DEFAULT_CONFIG = "integration_test/config/test_packages.yml"
""" Default recipy test configuration file name """
DEFAULT_SAMPLES = "integration_test/packages"
""" Default recipy sample scripts directory """
class ConfigError(Exception):
"""Test configuration error."""
def __init__(self, message, exception=None):
"""Create error.
:param message: Message
:type message: str or unicode
:param exception: Exception
:type value: Exception
"""
super(ConfigError, self).__init__()
self._message = message
self._exception = exception
def __str__(self):
"""Get error as a formatted string.
:return: formatted string
:rtype: str or unicode
"""
message = self._message
if self._exception is not None:
message += " : " + str(self._exception)
return repr(message)
@property
def exception(self):
"""Get exception.
:param exception: Exception
:type value: Exception
"""
return self._exception
def get_test_cases():
"""
py.test callback to associate each test script with its test
cases. This function:
* Gets the test configuration file name from the environment
variable 'RECIPY_TEST_CONFIG'. If undefined, then a default of
'integration_test/config/test_packages.yml' is assumed.
* Loads the test configuration file.
* Creates a list of standalone tuples, each representing one
test case, using get_script_test_cases.
py.test parameterized tests generates one test function per
tuple.
:return: test cases
:rtype: list of (str or unicode, str or unicode, dict)
"""
config_file = helpers.get_environment_value(TEST_CONFIG_ENV,
DEFAULT_CONFIG)
configuration = load_yaml(config_file)
return get_script_test_cases(configuration, DEFAULT_SAMPLES)
def get_test_case_function_name(script_test_case):
"""
py.test callback to generate test case function names.
Function names are of form 'script_arguments' where 'script'
and 'arguments' are the 'script_path' conjoined to the test case's
'arguments' with with all forward slashes, backslashes, colons,
semi-colons and spaces replaced by '_'.
:param script_test_case: Script path, command, test case
specification (a tuple from get_script_test_cases).
:type script_test_case: (str or unicode, str or unicode, dict)
:return: Test case function name
:rtype: str or unicode
"""
[script_path, _, test_case] = script_test_case
arguments = [str(argument) for argument in test_case[ARGUMENTS]]
function_name = "_".join(arguments)
function_name = os.path.split(script_path)[1] + "_" + function_name
for char in [" ", "\\", "/", ":", ";", "."]:
function_name = function_name.replace(char, "_")
return function_name
def get_script_test_cases(configurations, recipy_samples_directory):
"""
Creates a list of standalone tuples, each representing one test
case.
This function takes test configurations, a list of dictionaries,
each of which has a 'script', optional 'standalone' flag, optional
'libaries' list and 'test_cases', a list of one or more test cases
(each of which is a dictionary of 'libraries', 'arguments',
'inputs', 'outputs', optional 'skip').
It returns a list of tuples (script path, command, test case) where:
* script_path is the path to the script:
- If the test configuration has a 'standalone' value of "False",
or no such value, then the script is assumed to be a recipy
sample script in "integration_test/packages/".
- Otherwise, the 'script' configuration value is used as-is.
* commmand is the command-line invocation that will be used to run
the script (not including "python" or any arguments, which are
test-case specific):
- If the test configuration has a 'standalone' value of "False",
or no such value, then the command to run the script is
assumed to be "-m integration_test.packages.SCRIPT"
- Otherwise, the 'script' configuration value is used as-is.
* test_case is a single test case configuration, with any common
libraries appended to its 'libraries'.
If any test case contains a 'skip' entry then that test case is marked
up via pytest.mark.skip.
:param configurations: Test configurations
:type dict: list of dict
:param recipy_samples_directory: directory with recipy samples
:type recipy_samples_directory: str or unicode
:return: test cases
:rtype: list of (str or unicode, str or unicode, dict)
"""
test_cases = []
for configuration in configurations:
script = configuration[SCRIPT]
if STANDALONE not in configuration:
# recipy sample test
script_path = os.path.join(recipy_samples_directory, script)
# e.g. integration_test/packages/run_numpy.py
script_module = os.path.splitext(script_path)[0]
# e.g. integration_test/packages/run_numpy
script_module = script_module.replace("/", ".")
script_module = script_module.replace("\\", ".")
# e.g. integration_test.packages.run_numpy
command = ["-m", script_module]
# e.g. -m integration_test.packages.run_numpy
else:
script_path = script
command = [script]
if LIBRARIES in configuration:
common_libraries = configuration[LIBRARIES]
else:
common_libraries = []
for test_case in configuration[TEST_CASES]:
if LIBRARIES in test_case:
test_case[LIBRARIES].extend(common_libraries)
else:
test_case[LIBRARIES] = common_libraries
single_test_case = (script_path, command, test_case)
if SKIP in test_case:
reason = get_test_case_function_name(single_test_case)
reason = reason + ": " + test_case[SKIP]
single_test_case = pytest.mark.skip(
reason=reason)((single_test_case))
if SKIP_PY_VERSION in test_case:
py_version = '{}.{}'.format(sys.version_info.major,
sys.version_info.minor)
to_skip = [str(num) for num in test_case[SKIP_PY_VERSION]]
reason = get_test_case_function_name(single_test_case)
reason = reason + ": unsupported Python version " + py_version
single_test_case = pytest.mark.skipif(
py_version in to_skip,
reason=reason)((single_test_case))
test_cases.append(single_test_case)
return test_cases
def run_test_case(script_path, command, test_case):
"""
Run a single test case. This runs a script using arguments in
test_case and validates that recipy has logged information
about the script, also using data in test_case.
test_case is assumed to have the following
entries:
* 'libraries': a list of one or more libraries e.g. ['numpy'].
* 'arguments': a list of script arguments e.g. ['loadtxt'],
['savetxt']. If none, then this can be omitted.
* 'inputs': a list of zero or more input files which running
the script with the argument will read e.g. ['data.csv']. If
none, then this can be omitted.
* 'outputs': a list of zero or more output files which running
the script with the argument will write
e.g. ['data.csv']. If none, then this can be omitted.
:param script_path: Path to the script.
:type script_path: str or unicode
:param commmand: Command-line invocation used to run the script
(not including "python" or any arguments, which are test-case
specific).
:type command: str or unicode
:param test_case: Test case configuration.
:type test_case: dict
"""
number_of_logs = 0
try:
number_of_logs =\
helpers.get_number_of_logs(recipyenv.get_recipydb())
except DatabaseError:
# Database may not exist if running tests for first time so
# give benefit of doubt at this stage and assume running script
# will bring it into life.
pass
libraries = test_case[LIBRARIES]
if ARGUMENTS in test_case:
arguments = test_case[ARGUMENTS]
else:
arguments = []
# Execute script
_, _ = helpers.execute_python(command + arguments, 0)
# Validate recipy database
log, _ = helpers.get_log(recipyenv.get_recipydb())
# Number of logs
new_number_of_logs =\
helpers.get_number_of_logs(recipyenv.get_recipydb())
assert new_number_of_logs == (number_of_logs + 1),\
("Unexpected number of logs " + new_number_of_logs)
# Script that was invoked
check_script(script_path, log["script"],
arguments, log["command_args"])
# Libraries
check_libraries(libraries, log["libraries"])
# Inputs and outputs (local filenames only)
check_input_outputs(test_case, INPUTS, log["inputs"])
check_input_outputs(test_case, OUTPUTS, log["outputs"])
# Dates
check_dates(log["date"], log["exit_date"])
# Execution environment
check_environment(log["command"], log["environment"])
# Miscellaneous
assert environment.get_user() == log["author"], "Unexpected author"
assert log["description"] == "", "Unexpected description"
def check_script(script, logged_script, arguments, logged_arguments):
"""
Check script and arguments logged by recipy.
:param script: Script specified in test configuration
:type script: str or unicode
:param logged_script: Script logged by recipy
:type logged_script: str or unicode
:param arguments: Arguments specified in test configuration
:type arguments: list
:param logged_arguments: Arguments logged by recipy
:type logged_arguments: list
"""
# Use os.path.abspath as os.path.samefile is not supported in
# Python 2 on Windows.
assert os.path.abspath(script) == os.path.abspath(logged_script),\
"Unexpected script"
assert " ".join(arguments) == logged_arguments, "Unexpected command_args"
def check_libraries(libraries, logged_libraries):
"""
Check libraries logged by recipy.
:param libraries: Libraries specified in test configuration
:type libraries: list of str or unicode
:param logged_libraries: Libraries logged by recipy
:type logged_libraries: list of str or unicode
:raises ConfigError: if any library is not installed
"""
packages = environment.get_packages()
for library in libraries:
if environment.is_package_installed(packages, library):
version = environment.get_package_version(packages, library)
library_version = library + " v" + version
assert library_version in logged_libraries,\
("Could not find library " + library_version)
else:
raise ConfigError(("Library {} is not installed".format(library)))
def check_dates(logged_start_date, logged_end_date):
"""
Check dates logged by recipy.
:param logged_start_date: Start date logged by recipy
:type logged_start_date: str or unicode
:param logged_end_date: End date logged by recipy
:type logged_end_date: str or unicode
"""
try:
start_date = environment.get_tinydatestr_as_date(logged_start_date)
except ValueError as _:
assert False, "date is not a valid date string"
try:
exit_date = environment.get_tinydatestr_as_date(logged_end_date)
except ValueError as _:
assert False, "end_date is not a valid date string"
assert start_date <= exit_date, "date is not before exit_date"
def check_environment(logged_command, logged_environment):
"""
Check environment logged by recipy.
:param logged_command: Python executable logged by recipy
:type logged_command: str or unicode
:param logged_environment: Operating system and Python
version logged by recipy
:type logged_environment: list of str or unicore
"""
assert environment.get_python_exe() == logged_command,\
"Unexpected command"
assert environment.get_os() in logged_environment,\
"Cannot find operating system in environment"
python_version = "python " + environment.get_python_version()
assert python_version in logged_environment,\
"Cannot find Python in environment"
def check_input_outputs(test_case, io_key, logged_io):
"""
Check inputs/outputs logged by recipy.
:param test_case: Test case configuration
:type test_case: dict
:param io_key: "inputs" or "outputs", key into test_case
:type io_key: str or unicode
:param logged_io: Inputs/outputs logged by recipy
:type logged_io: list
"""
if io_key in test_case:
io_files = test_case[io_key]
else:
io_files = []
assert len(io_files) == len(logged_io),\
("Unexpected number of " + io_key)
# Convert logged files to local file names.
logged_files = [os.path.basename(file_name)
for [file_name, _] in logged_io]
for io_file in io_files:
assert io_file in logged_files,\
("Could not find " + io_key + " " + io_file)
@pytest.mark.parametrize("script_test_case",
get_test_cases(),
ids=get_test_case_function_name)
def test_scripts(script_test_case):
"""
Run a test defined in the recipy test configuration.
:param script_test_case: Ncript path, command, test case
specification - consistent with a tuple from
get_script_test_cases.
:type script_test_case: (str or unicode, str or unicode, dict)
"""
(script_path, command, test_case) = script_test_case
run_test_case(script_path, command, test_case)
| apache-2.0 | -3,213,694,212,848,131,600 | 36.121272 | 78 | 0.655848 | false | 4.031959 | true | false | false |
decimalbell/devnull | python/sidl/unpacker.py | 1 | 3416 | import struct
class Unpacker(object):
def __init__(self, buf):
self._buffer = buf
self._offset = 0
self._typemethods = {'b': self.unpack_int8, 'B': self.unpack_uint8,
'h': self.unpack_int16, 'H': self.unpack_uint16,
'i': self.unpack_int32, 'I': self.unpack_uint32,
'q': self.unpack_int64, 'Q': self.unpack_uint64,
'f': self.unpack_float, 'd': self.unpack_double,
's': self.unpack_string, 'm': self.unpack_message,
}
@property
def offset(self):
return self._offset
def unpack_integer(self, fmt):
value = struct.unpack_from(fmt, self._buffer, self._offset)
self._offset = self._offset + struct.calcsize(fmt)
return value[0]
def unpack_int8(self):
return self.unpack_integer('<b')
def unpack_int16(self):
return self.unpack_integer('<h')
def unpack_int32(self):
return self.unpack_integer('<l')
def unpack_int64(self):
return self.unpack_integer('<q')
def unpack_uint8(self):
return self.unpack_integer('<B')
def unpack_uint16(self):
return self.unpack_integer('<H')
def unpack_uint32(self):
return self.unpack_integer('<I')
def unpack_uint64(self):
return self.unpack_integer('<Q')
def unpack_float(self):
return float(self.unpack_string())
def unpack_double(self):
return float(self.unpack_string())
def unpack_string(self):
l = self.unpack_uint16()
s = struct.unpack_from('%ds' % (l,), self._buffer, self._offset)
self._offset = self._offset + l
return s[0].decode('utf-8')
def unpack_binary(self):
l = self.unpack_uint32()
s = struct.unpack_from('%ds' % (l,), self._buffer, self._offset)
self._offset = self._offset + l
return s[0].decode('utf-8')
def unpack_message(self, msg):
msg.unpack(self)
def unpack_list(self, l):
length = self.unpack_uint32()
if l.typecode in l.typecodes[:-1]:
for _ in range(0, length):
value = self._typemethods[l.typecode]()
l.append(value)
elif l.typecode == l.typecodes[-1]:
for _ in range(0, length):
msg = l.type()
self._typemethods[l.typecode](msg)
l.append(msg)
def unpack_set(self, s):
length = self.unpack_uint32()
if s.typecode in s.typecodes[:-1]:
for _ in range(0, length):
value = self._typemethods[s.typecode]()
s.add(value)
elif s.typecode == s.typecodes[-1]:
for _ in range(0, length):
msg = s.type()
self._typemethods[s.typecode](msg)
s.add(msg)
def unpack_dict(self, d):
length = self.unpack_uint32()
for _ in range(0, length):
# key
key = self._typemethods[d.key_typecode]()
# value
if d.value_typecode in d.typecodes[:-1]:
value = self._typemethods[d.value_typecode]()
elif d.value_typecode == d.typecodes[-1]:
value = d.value_type()
self._typemethods[d.value_typecode](value)
d[key] = value
| mit | -6,304,016,966,791,125,000 | 31.226415 | 79 | 0.522248 | false | 3.78714 | false | false | false |
kived/kvlang | kvlang/ast_parser.py | 1 | 4502 | from functools import partial
import weakref
from kivy.compat import iteritems
from kivy.factory import Factory
from kivy.lang import ParserRuleProperty, Parser, ParserException, ParserRule as kivy_ParserRule, Builder as kivy_Builder
from kivy.logger import Logger
from kivy.weakproxy import WeakProxy
from kvlang.kvTree import DirectiveNode, WidgetNode, WidgetLikeNode, PropertyNode, CanvasNode, InstructionNode
class ParserRule(kivy_ParserRule):
__slots__ = ('ast_node', '__weakref__')
def load_ast(self, ast, **kwargs):
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
if fn in self.files:
Logger.warning(
'kvlang: The file {} is loaded multiple times, '
'you might have unwanted behaviors.'.format(fn))
try:
parser = ASTParser(ast=ast)
self.rules.extend(parser.rules)
self._clear_matchcache()
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name, cls=partial(self.template, name), is_template=True)
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn)
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contains also non-rules '
'directives' % filename)
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)()
self._apply_rule(widget, parser.root, parser.root)
return widget
finally:
self._current_filename = None
Builder_apply_rule = kivy_Builder._apply_rule
def _apply_rule(self, widget, rule, *args, **kwargs):
Builder_apply_rule(widget, rule, *args, **kwargs)
if hasattr(rule, 'ast_node'):
widget.ast_node = rule.ast_node
widget.ast_node.ast_widget = widget.proxy_ref
kivy_Builder._apply_rule = partial(_apply_rule, kivy_Builder)
class ASTParser(Parser):
def __init__(self, **kwargs):
self.ast = kwargs.get('ast', None)
if self.ast is None:
raise ValueError('No AST passed')
kwargs['content'] = self.ast
super(ASTParser, self).__init__(**kwargs)
def execute_directives(self):
for directive in self.ast.find_all(DirectiveNode):
self.directives.append((directive.token.line,
str(directive).strip()[2:]))
super(ASTParser, self).execute_directives()
def parse(self, ast):
lines = ast.source.splitlines()
if not lines:
return
num_lines = len(lines)
lines = list(zip(list(range(num_lines)), lines))
self.sourcecode = lines[:]
self.execute_directives()
rules = self.parse_tree(ast.tree)
for rule in rules:
rule.precompile()
def parse_tree(self, root):
if not root:
return []
nodes = root.children if root.isNil() else [root]
return self.parse_nodes(nodes)
def parse_nodes(self, nodes, level=0):
objects = []
for node in [n for n in nodes if isinstance(n, WidgetLikeNode)]:
ln = node.get_sourceline()
name = str(node)
if (level != 0
and name not in self.PROP_ALLOWED
and any(ord(z) not in self.PROP_RANGE for z in name)):
raise ParserException(self, ln, 'Invalid class name')
current_object = ParserRule(self, ln, name, level)
objects.append(current_object)
node.ast_rule = weakref.proxy(current_object)
current_object.ast_node = weakref.proxy(node)
for child in node.interesting_children():
if isinstance(child, PropertyNode):
name = child.name
value = child.parsevalue
if name == 'id':
if len(value) == 0:
raise ParserException(self, ln, 'Empty id')
if value in ('self', 'root'):
raise ParserException(self, ln,
'Invalid id, cannot be "self" or "root"')
current_object.id = value
elif len(value):
rule = ParserRuleProperty(self, ln, name, value)
if name[:3] == 'on_':
current_object.handlers.append(rule)
else:
current_object.properties[name] = rule
elif isinstance(child, CanvasNode):
canvas = self.parse_nodes([child], level + 2)
setattr(current_object, child.canvas_object, canvas[0])
elif isinstance(child, (WidgetNode, InstructionNode)):
children = self.parse_nodes([child], level + 1)
children_set = getattr(current_object, 'children', [])
children_set += children
current_object.children = children_set
return objects
| mit | -200,474,541,999,551,460 | 29.835616 | 121 | 0.677477 | false | 3.372285 | false | false | false |
nco/pynco | nco/nco.py | 1 | 19238 | """
nco module. Use Nco class as interface.
"""
import distutils.spawn
import os
import re
import shlex
import six
import subprocess
import tempfile
from distutils.version import LooseVersion
class NCOException(Exception):
def __init__(self, stdout, stderr, returncode):
super(NCOException, self).__init__()
self.stdout = stdout
self.stderr = stderr
self.returncode = returncode
self.msg = "(returncode:{0}) {1}".format(returncode, stderr)
def __str__(self):
return self.msg
class Nco(object):
def __init__(
self,
returnCdf=False,
return_none_on_error=False,
force_output=True,
cdf_module="netcdf4",
debug=0,
**kwargs
):
operators = [
"ncap2",
"ncatted",
"ncbo",
"nces",
"ncecat",
"ncflint",
"ncks",
"ncpdq",
"ncra",
"ncrcat",
"ncrename",
"ncwa",
"ncea",
]
if "NCOpath" in os.environ:
self.nco_path = os.environ["NCOpath"]
else:
self.nco_path = os.path.split(distutils.spawn.find_executable("ncks"))[0]
self.operators = operators
self.return_cdf = returnCdf
self.return_none_on_error = return_none_on_error
self.force_output = force_output
self.cdf_module = cdf_module
self.debug = debug
self.outputOperatorsPattern = [
"-H",
"--data",
"--hieronymus",
"-M",
"--Mtd",
"--Metadata",
"-m",
"--mtd",
"--metadata",
"-P",
"--prn",
"--print",
"-r",
"--revision",
"--vrs",
"--version",
"--u",
"--units",
]
self.OverwriteOperatorsPattern = ["-O", "--ovr", "--overwrite"]
self.AppendOperatorsPattern = ["-A", "--apn", "--append"]
# operators that can function with a single file
self.SingleFileOperatorsPattern = ["ncap2" , "ncatted", "ncks", "ncrename"]
self.DontForcePattern = (
self.outputOperatorsPattern
+ self.OverwriteOperatorsPattern
+ self.AppendOperatorsPattern
)
# I/O from call
self.returncode = 0
self.stdout = ""
self.stderr = ""
if kwargs:
self.options = kwargs
else:
self.options = None
def __dir__(self):
res = dir(type(self)) + list(self.__dict__.keys())
res.extend(self.operators)
return res
def call(self, cmd, inputs=None, environment=None, use_shell=False):
inline_cmd = cmd
if inputs is not None:
if isinstance(inputs, str):
inline_cmd.append(inputs)
else:
# assume it's an iterable
inline_cmd.extend(inputs)
if self.debug:
print("# DEBUG ==================================================")
if environment:
for key, val in list(environment.items()):
print("# DEBUG: ENV: {0} = {1}".format(key, val))
print("# DEBUG: CALL>> {0}".format(" ".join(inline_cmd)))
print("# DEBUG ==================================================")
# if we're using the shell then we need to pass a single string as the command rather than in iterable
if use_shell:
inline_cmd = " ".join(inline_cmd)
try:
proc = subprocess.Popen(
inline_cmd,
shell=use_shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environment,
)
except OSError:
# Argument list may have been too long, so don't use a shell
proc = subprocess.Popen(
inline_cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environment,
)
retvals = proc.communicate()
return {
"stdout": retvals[0],
"stderr": retvals[1],
"returncode": proc.returncode,
}
def has_error(self, method_name, inputs, cmd, retvals):
if self.debug:
print(
"# DEBUG: RETURNCODE: {return_code}".format(
return_code=retvals["returncode"]
)
)
if retvals["returncode"] != 0:
print("Error in calling operator {method} with:".format(method=method_name))
print(">>> {command} <<<".format(command=" ".join(cmd)))
print("Inputs: {0!s}".format(inputs))
print(retvals["stderr"])
return True
else:
return False
def __getattr__(self, nco_command):
# shortcut to avoid calling auto_doc decorator if command doesn't exist
if nco_command not in self.operators:
raise AttributeError("Unknown command: {cmd}".format(cmd=nco_command))
# first run the auto_doc decorator, which runs the command with --help option, in order to pull in usage info
@auto_doc(nco_command, self)
def get(self, input, **kwargs):
"""
This is the function that's called when this __getattr__ "magic" function runs.
Parses options and constructs/calls an appropriate/corresponding NCO command.
:param self:
:param input:
:param kwargs:
:return:
"""
options = kwargs.pop("options", [])
force = kwargs.pop("force", self.force_output)
output = kwargs.pop("output", None)
environment = kwargs.pop("env", None)
debug = kwargs.pop("debug", self.debug)
return_cdf = kwargs.pop("returnCdf", False)
return_array = kwargs.pop("returnArray", False)
return_ma_array = kwargs.pop("returnMaArray", False)
operator_prints_out = kwargs.pop("operator_prints_out", False)
use_shell = kwargs.pop("use_shell", True)
# build the NCO command
# 1. the NCO operator
cmd = [os.path.join(self.nco_path, nco_command)]
if options:
for option in options:
if isinstance(option, str):
cmd.extend(str.split(option))
elif hasattr(option,"prn_option"):
cmd.extend(option.prn_option().split())
else:
# assume it's an iterable
cmd.extend(option)
if debug:
if type(debug) == bool:
# assume debug level is 3
cmd.append("--nco_dbg_lvl=3")
elif type(debug) == int:
cmd.append("--nco_dbg_lvl={0}".format(debug))
else:
raise TypeError(
"Unknown type for debug: \
{0}".format(
type(debug)
)
)
if output and force and os.path.isfile(output):
# make sure overwrite is set
if debug:
print("Overwriting file: {0}".format(output))
if any([i for i in cmd if i in self.DontForcePattern]):
force = False
else:
force = False
# 2b. all other keyword args become options
if kwargs:
for key, val in list(kwargs.items()):
if val and type(val) == bool:
cmd.append("--{0}".format(key))
if cmd[-1] in self.DontForcePattern:
force = False
elif (
isinstance(val, str)
or isinstance(val, int)
or isinstance(val, float)
):
cmd.append("--{option}={value}".format(option=key, value=val))
else:
# we assume it's either a list, a tuple or any iterable
cmd.append(
"--{option}={values}".format(
option=key, values=",".join(val)
)
)
# 2c. Global options come in
if self.options:
for key, val in list(self.options.items()):
if val and type(val) == bool:
cmd.append("--" + key)
elif isinstance(val, str):
cmd.append("--{0}={1}".format(key, val))
else:
# we assume it's either a list, a tuple or any iterable
cmd.append("--{0}={1}".format(key, ",".join(val)))
# 3. Add in overwrite if necessary
if force:
cmd.append("--overwrite")
# Check if operator appends
operator_appends = False
for piece in cmd:
if piece in self.AppendOperatorsPattern:
operator_appends = True
# If operator appends and NCO version >= 4.3.7, remove -H -M -m
# and their ancillaries from outputOperatorsPattern
if operator_appends and nco_command == "ncks":
nco_version = self.version()
if LooseVersion(nco_version) >= LooseVersion("4.3.7"):
self.outputOperatorsPattern = [
"-r",
"--revision",
"--vrs",
"--version",
]
# Check if operator prints out
for piece in cmd:
if piece in self.outputOperatorsPattern:
operator_prints_out = True
if operator_prints_out:
retvals = self.call(cmd, inputs=input)
self.returncode = retvals["returncode"]
self.stdout = retvals["stdout"]
self.stderr = retvals["stderr"]
if not self.has_error(nco_command, input, cmd, retvals):
return retvals["stdout"]
# parsing can be done by 3rd party
else:
if self.return_none_on_error:
return None
else:
raise NCOException(**retvals)
else:
if output is not None:
if isinstance(output, str):
cmd.append("--output={0}".format(output))
else:
# we assume it's an iterable.
if len(output) > 1:
raise TypeError(
"Only one output allowed, must be string or 1 length iterable. "
"Recieved output: {out} with a type of {type}".format(
out=output, type=type(output)
)
)
cmd.extend("--output={0}".format(output))
elif not (nco_command in self.SingleFileOperatorsPattern):
# create a temporary file, use this as the output
file_name_prefix = nco_command + "_" + input.split(os.sep)[-1]
tmp_file = tempfile.NamedTemporaryFile(
mode="w+b", prefix=file_name_prefix, suffix=".tmp", delete=False
)
output = tmp_file.name
cmd.append("--output={0}".format(output))
retvals = self.call(
cmd, inputs=input, environment=environment, use_shell=use_shell
)
self.returncode = retvals["returncode"]
self.stdout = retvals["stdout"]
self.stderr = retvals["stderr"]
if self.has_error(nco_command, input, cmd, retvals):
if self.return_none_on_error:
return None
else:
print(self.stdout)
print(self.stderr)
raise NCOException(**retvals)
if return_array:
return self.read_array(output, return_array)
elif return_ma_array:
return self.read_ma_array(output, return_ma_array)
elif self.return_cdf or return_cdf:
if not self.return_cdf:
self.load_cdf_module()
return self.read_cdf(output)
else:
return output
if (nco_command in self.__dict__) or (nco_command in self.operators):
if self.debug:
print("Found method: {0}".format(nco_command))
# cache the method for later
setattr(self.__class__, nco_command, get)
return get.__get__(self)
else:
# If the method isn't in our dictionary, act normal.
print("#=====================================================")
print("Cannot find method: {0}".format(nco_command))
raise AttributeError("Unknown method {0}!".format(nco_command))
def load_cdf_module(self):
if self.cdf_module == "netcdf4":
try:
import netCDF4 as cdf
self.cdf = cdf
except Exception:
raise ImportError(
"Could not load python-netcdf4 - try to "
"setting 'cdf_module='scipy'"
)
elif self.cdf_module == "scipy":
try:
import scipy.io.netcdf as cdf
self.cdf = cdf
except Exception:
raise ImportError(
"Could not load scipy.io.netcdf - try to "
"setting 'cdf_module='netcdf4'"
)
else:
raise ValueError(
"Unknown value provided for cdf_module. Valid "
"values are 'scipy' and 'netcdf4'"
)
def set_return_array(self, value=True):
self.returnCdf = value
if value:
self.load_cdf_module()
def unset_return_array(self):
self.set_return_array(False)
def has_nco(self, path=None):
if path is None:
path = self.nco_path
if os.path.isdir(path) and os.access(path, os.X_OK):
return True
else:
return False
def check_nco(self):
if self.has_nco():
call = [os.path.join(self.nco_path, "ncra"), "--version"]
proc = subprocess.Popen(
" ".join(call), stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
retvals = proc.communicate()
print(retvals)
def set_nco_path(self, value):
self.nco_path = value
def get_nco_path(self):
return self.nco_path
# ==================================================================
# Additional operators:
# ------------------------------------------------------------------
@property
def module_version(self):
return "0.0.0"
def version(self):
# return NCO's version
proc = subprocess.Popen(
[os.path.join(self.nco_path, "ncra"), "--version"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
ret = proc.communicate()
ncra_help = ret[1]
if isinstance(ncra_help, bytes):
ncra_help = ncra_help.decode("utf-8")
match = re.search(r"NCO netCDF Operators version (\d.*) ", ncra_help)
# some versions write version information in quotation marks
if not match:
match = re.search(r'NCO netCDF Operators version "(\d.*)" ', ncra_help)
return match.group(1).split(" ")[0]
def read_cdf(self, infile):
"""Return a cdf handle created by the available cdf library.
python-netcdf4 and scipy supported (default:scipy)"""
if not self.return_cdf:
self.load_cdf_module()
if self.cdf_module == "scipy":
# making it compatible to older scipy versions
file_obj = self.cdf.netcdf_file(infile, mode="r")
elif self.cdf_module == "netcdf4":
file_obj = self.cdf.Dataset(infile)
else:
raise ImportError(
"Could not import data \
from file {0}".format(
infile
)
)
return file_obj
def open_cdf(self, infile):
"""Return a cdf handle created by the available cdf library.
python-netcdf4 and scipy suported (default:scipy)"""
if not self.return_cdf:
self.load_cdf_module()
if self.cdf_module == "scipy":
# making it compatible to older scipy versions
print("Use scipy")
file_obj = self.cdf.netcdf_file(infile, mode="r+")
elif self.cdf_module == "netcdf4":
print("Use netcdf4")
file_obj = self.cdf.Dataset(infile, "r+")
else:
raise ImportError(
"Could not import data \
from file: {0}".format(
infile
)
)
return file_obj
def read_array(self, infile, var_name):
"""Directly return a numpy array for a given variable name"""
file_handle = self.read_cdf(infile)
try:
# return the data array
return file_handle.variables[var_name][:]
except KeyError:
print("Cannot find variable: {0}".format(var_name))
raise KeyError
def read_ma_array(self, infile, var_name):
"""Create a masked array based on cdf's FillValue"""
file_obj = self.read_cdf(infile)
# .data is not backwards compatible to old scipy versions, [:] is
data = file_obj.variables[var_name][:]
# load numpy if available
try:
import numpy as np
except Exception:
raise ImportError("numpy is required to return masked arrays.")
if hasattr(file_obj.variables[var_name], "_FillValue"):
# return masked array
fill_val = file_obj.variables[var_name]._FillValue
retval = np.ma.masked_where(data == fill_val, data)
else:
# generate dummy mask which is always valid
retval = np.ma.array(data)
return retval
def auto_doc(tool, nco_self):
"""
Generate the __doc__ string of the decorated function by calling the nco help command
:param tool:
:param nco_self:
:return:
"""
def desc(func):
func.__doc__ = nco_self.call([tool, "--help"]).get("stdout")
return func
return desc
| mit | -416,057,038,847,101,900 | 34.429098 | 117 | 0.477025 | false | 4.599092 | false | false | false |
CristianBB/SickRage | sickbeard/dailysearcher.py | 1 | 4268 | # Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import threading
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import network_timezones
from sickrage.show.Show import Show
from sickrage.helper.exceptions import MultipleShowObjectsException
class DailySearcher():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
"""
Runs the daily searcher, queuing selected episodes for search
:param force: Force search
"""
if self.amActive:
return
self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? and airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = Show.find(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
continue
except MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
if show.airs and show.network:
# This is how you assure it is always converted to local time
air_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network).astimezone(network_timezones.sb_timezone)
# filter out any episodes that haven't started airing yet,
# but set them to the default status while they are airing
# so they are snatched faster
if air_time > curTime:
continue
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.season == 0:
logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
else:
logger.log(u"New episode %s airs today, setting to default episode status for this show: %s" % (ep.prettyName(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
else:
logger.log(u"No new released episodes found ...")
# queue episode for daily search
dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
| gpl-3.0 | -8,188,740,517,060,104,000 | 37.45045 | 182 | 0.626992 | false | 4.163902 | false | false | false |
aileron-split/aileron-web | server/blog/models.py | 1 | 1104 | from django.db import models
# Blog app models.
class Post(models.Model):
published = models.BooleanField(default=False)
published_date = models.DateTimeField(null=True, blank=True)
slug = models.SlugField(max_length=80)
title = models.CharField(max_length=80, default='Post Title')
subtitle = models.CharField(max_length=200, null=True, blank=True)
summary = models.TextField(default='Post summary.')
content = models.TextField(default='Post content.')
card_sm_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
card_mat_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
card_lg_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
video = models.URLField(null=True, blank=True)
album = models.ForeignKey('gallery.Album', blank=True, null=True)
author = models.ForeignKey('team.Member', blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
| gpl-3.0 | 2,635,139,199,670,405,000 | 47 | 88 | 0.71558 | false | 3.631579 | false | false | false |
BrendanLeber/adventofcode | 2019/09-sensor_boost/intcode.py | 1 | 7073 | # -*- coding: utf-8 -*-
import pdb
import sys
import traceback
from collections import deque
from enum import IntEnum
from typing import Deque, Dict, List, NamedTuple, Optional, Tuple, Union
class ParameterMode(IntEnum):
POSITIONAL = 0
IMMEDIATE = 1
RELATIVE = 2
class ParameterType(IntEnum):
READ = 0
WRITE = 1
class InstructionInfo(NamedTuple):
name: str
params: Tuple[ParameterType, ...]
INSTRUCTIONS: Dict[int, InstructionInfo] = {
1: InstructionInfo("add", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
2: InstructionInfo("mul", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
3: InstructionInfo("in", (ParameterType.WRITE,)),
4: InstructionInfo("out", (ParameterType.READ,)),
5: InstructionInfo("jnz", (ParameterType.READ, ParameterType.READ)),
6: InstructionInfo("jz", (ParameterType.READ, ParameterType.READ)),
7: InstructionInfo("lt", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
8: InstructionInfo("eq", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
9: InstructionInfo("rbo", (ParameterType.READ,)),
99: InstructionInfo("halt", tuple()),
}
class Intcode:
def __init__(self, program: List[int]) -> None:
self.ip: int = 0
self.program: List[int] = program[:]
self.tape: List[int] = program[:]
# add extra memory space for data buffer
self.tape += [0] * max(1024, len(self.program) * 3)
self.relative_base: int = 0
self.last_output: Optional[int] = None
self.last_input: Optional[int] = None
self.chained_mode: bool = False
self.inputs: Deque = deque()
# self.execution_trace: Dict[int, str] = {}
def _disasm(self) -> str:
addr = f"{self.ip:5}"
opcode = self.tape[self.ip] % 100
opname = INSTRUCTIONS[opcode].name
params = []
mask = 10
for pnum, ptype in enumerate(INSTRUCTIONS[opcode].params, 1):
mask *= 10
pmode = ParameterMode((self.tape[self.ip] // mask) % 10)
if ptype == ParameterType.WRITE:
leader = "$"
elif pmode == ParameterMode.POSITIONAL:
leader = "$"
elif pmode == ParameterMode.RELATIVE:
leader = "@"
else:
leader = ""
params.append(f"{leader}{self.tape[self.ip + pnum]}")
return addr + ": " + f"{opname} " + ", ".join(params)
def decode_instruction(self) -> Tuple[int, List[int]]:
"""Decode the opcode and the arguments for this instruction."""
opcode: int = self.tape[self.ip] % 100
arguments: List[int] = []
mask: int = 10
# start at 1 to skip the opcode in the instruction
for param_num, param_type in enumerate(INSTRUCTIONS[opcode].params, 1):
mask *= 10
param_mode: ParameterMode = ParameterMode((self.tape[self.ip] // mask) % 10)
if param_type == ParameterType.WRITE:
position = self.tape[self.ip + param_num]
if param_mode == ParameterMode.RELATIVE:
position += self.relative_base
arguments.append(position)
elif param_mode == ParameterMode.POSITIONAL:
position = self.tape[self.ip + param_num]
arguments.append(self.tape[position])
elif param_mode == ParameterMode.IMMEDIATE:
arguments.append(self.tape[self.ip + param_num])
elif param_mode == ParameterMode.RELATIVE:
position = self.tape[self.ip + param_num] + self.relative_base
arguments.append(self.tape[position])
else:
raise TypeError(f"unknown parameter mode {param_mode}")
return (opcode, arguments)
def execute(self) -> Union[Optional[int], bool]:
"""Execute the instructions contained in the VM memory."""
while self.ip < len(self.program):
# self.execution_trace[self.ip] = self._disasm()
opcode, params = self.decode_instruction()
if opcode == 1:
self.tape[params[2]] = params[0] + params[1]
self.ip += 1 + len(params)
elif opcode == 2:
self.tape[params[2]] = params[0] * params[1]
self.ip += 1 + len(params)
elif opcode == 3:
if self.chained_mode and self.inputs:
value = self.inputs.popleft()
else:
value = int(input("$ "))
self.last_input = self.tape[params[0]] = value
self.ip += 1 + len(params)
elif opcode == 4:
self.last_output = params[0]
self.ip += 1 + len(params)
if self.chained_mode:
return True
else:
print(self.last_output)
elif opcode == 5:
self.ip = params[1] if params[0] else self.ip + 1 + len(params)
elif opcode == 6:
self.ip = params[1] if not params[0] else self.ip + 1 + len(params)
elif opcode == 7:
self.tape[params[2]] = 1 if params[0] < params[1] else 0
self.ip += 1 + len(params)
elif opcode == 8:
self.tape[params[2]] = 1 if params[0] == params[1] else 0
self.ip += 1 + len(params)
elif opcode == 9:
self.relative_base += params[0]
self.ip += 1 + len(params)
elif opcode == 99:
if self.chained_mode:
return False
else:
return self.last_output
raise EOFError("reached end of tape without finding halt instruction.")
def reset(self) -> None:
"""Reset the VM state before starting a new execution."""
self.tape = self.program[:]
# add extra memory space for data buffer
self.tape += [0] * max(1024, len(self.program) * 3)
self.ip = 0
self.relative_base = 0
# self.execution_trace = {}
def set_inputs(self, inputs: List[int]) -> None:
"""Set the inputs for the VM to read."""
self.inputs = deque(inputs)
def set_noun_and_verb(self, noun: int, verb: int) -> None:
"""Set the noun and verb to initialize the program."""
self.tape[1] = noun
self.tape[2] = verb
if __name__ == "__main__":
program: List[int] = []
with open(sys.argv[1]) as inf:
for line in inf:
program += list(map(int, line.strip().split(",")))
try:
vm = Intcode(program)
vm.execute()
# addrs = list(vm.execution_trace.keys())
# addrs.sort()
# for addr in addrs:
# print(f"{vm.execution_trace[addr]}")
# for ip in range(addrs[-1] + 1, len(vm.program)):
# print(f"{ip:5d}: {vm.program[ip]}")
except Exception:
traceback.print_exc()
pdb.post_mortem()
| mit | -8,612,136,722,465,389,000 | 37.862637 | 93 | 0.548565 | false | 3.846112 | false | false | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/ubiquity/ubiquity/i18n.py | 1 | 12630 | # -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright (C) 2006, 2007, 2008 Canonical Ltd.
# Written by Colin Watson <[email protected]>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import subprocess
import codecs
import os
import locale
import sys
from ubiquity import misc, im_switch
# if 'just_country' is True, only the country is changing
def reset_locale(frontend, just_country=False):
frontend.start_debconf()
di_locale = frontend.db.get('debian-installer/locale')
if not di_locale:
# TODO cjwatson 2006-07-17: maybe fetch
# languagechooser/language-name and set a language based on
# that?
di_locale = 'en_US.UTF-8'
if 'LANG' not in os.environ or di_locale != os.environ['LANG']:
os.environ['LANG'] = di_locale
os.environ['LANGUAGE'] = di_locale
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, e:
print >>sys.stderr, 'locale.setlocale failed: %s (LANG=%s)' % \
(e, di_locale)
if not just_country:
misc.execute_root('fontconfig-voodoo',
'--auto', '--force', '--quiet')
im_switch.start_im()
return di_locale
_strip_context_re = None
def strip_context(unused_question, string):
# po-debconf context
global _strip_context_re
if _strip_context_re is None:
_strip_context_re = re.compile(r'\[\s[^\[\]]*\]$')
string = _strip_context_re.sub('', string)
return string
_translations = None
def get_translations(languages=None, core_names=[], extra_prefixes=[]):
"""Returns a dictionary {name: {language: description}} of translatable
strings.
If languages is set to a list, then only languages in that list will be
translated. If core_names is also set to a list, then any names in that
list will still be translated into all languages. If either is set, then
the dictionary returned will be built from scratch; otherwise, the last
cached version will be returned."""
global _translations
if _translations is None or languages is not None or core_names or extra_prefixes:
if languages is None:
use_langs = None
else:
use_langs = set('c')
for lang in languages:
ll_cc = lang.lower().split('.')[0]
ll = ll_cc.split('_')[0]
use_langs.add(ll_cc)
use_langs.add(ll)
prefixes = 'ubiquity|partman/text/undo_everything|partman/text/unusable|partman-basicfilesystems/bad_mountpoint|partman-basicfilesystems/text/specify_mountpoint|partman-basicmethods/text/format|partman-newworld/no_newworld|partman-partitioning|partman-target/no_root|partman-target/text/method|grub-installer/bootdev|popularity-contest/participate'
prefixes = reduce(lambda x, y: x+'|'+y, extra_prefixes, prefixes)
_translations = {}
devnull = open('/dev/null', 'w')
db = subprocess.Popen(
['debconf-copydb', 'templatedb', 'pipe',
'--config=Name:pipe', '--config=Driver:Pipe',
'--config=InFd:none',
'--pattern=^(%s)' % prefixes],
stdout=subprocess.PIPE, stderr=devnull,
# necessary?
preexec_fn=misc.regain_privileges)
question = None
descriptions = {}
fieldsplitter = re.compile(r':\s*')
for line in db.stdout:
line = line.rstrip('\n')
if ':' not in line:
if question is not None:
_translations[question] = descriptions
descriptions = {}
question = None
continue
(name, value) = fieldsplitter.split(line, 1)
if value == '':
continue
name = name.lower()
if name == 'name':
question = value
elif name.startswith('description'):
namebits = name.split('-', 1)
if len(namebits) == 1:
lang = 'c'
else:
lang = namebits[1].lower()
# TODO: recode from specified encoding
lang = lang.split('.')[0]
if (use_langs is None or lang in use_langs or
question in core_names):
value = strip_context(question, value)
descriptions[lang] = value.replace('\\n', '\n')
elif name.startswith('extended_description'):
namebits = name.split('-', 1)
if len(namebits) == 1:
lang = 'c'
else:
lang = namebits[1].lower()
# TODO: recode from specified encoding
lang = lang.split('.')[0]
if (use_langs is None or lang in use_langs or
question in core_names):
value = strip_context(question, value)
if lang not in descriptions:
descriptions[lang] = value.replace('\\n', '\n')
# TODO cjwatson 2006-09-04: a bit of a hack to get the
# description and extended description separately ...
if question in ('grub-installer/bootdev',
'partman-newworld/no_newworld',
'ubiquity/text/error_updating_installer'):
descriptions["extended:%s" % lang] = \
value.replace('\\n', '\n')
db.wait()
devnull.close()
return _translations
string_questions = {
'new_size_label': 'partman-partitioning/new_size',
'partition_create_heading_label': 'partman-partitioning/text/new',
'partition_create_type_label': 'partman-partitioning/new_partition_type',
'partition_create_mount_label': 'partman-basicfilesystems/text/specify_mountpoint',
'partition_create_use_label': 'partman-target/text/method',
'partition_create_place_label': 'partman-partitioning/new_partition_place',
'partition_edit_use_label': 'partman-target/text/method',
'partition_edit_format_label': 'partman-basicmethods/text/format',
'partition_edit_mount_label': 'partman-basicfilesystems/text/specify_mountpoint',
'grub_device_dialog': 'grub-installer/bootdev',
'grub_device_label': 'grub-installer/bootdev',
# TODO: it would be nice to have a neater way to handle stock buttons
'quit': 'ubiquity/imported/quit',
'back': 'ubiquity/imported/go-back',
'cancelbutton': 'ubiquity/imported/cancel',
'exitbutton': 'ubiquity/imported/quit',
'closebutton1': 'ubiquity/imported/close',
'cancelbutton1': 'ubiquity/imported/cancel',
'okbutton1': 'ubiquity/imported/ok',
}
string_extended = set()
def map_widget_name(prefix, name):
"""Map a widget name to its translatable template."""
if prefix is None:
prefix = 'ubiquity/text'
if '/' in name:
question = name
elif name in string_questions:
question = string_questions[name]
else:
question = '%s/%s' % (prefix, name)
return question
def get_string(name, lang, prefix=None):
"""Get the translation of a single string."""
question = map_widget_name(prefix, name)
translations = get_translations()
if question not in translations:
return None
if lang is None:
lang = 'c'
else:
lang = lang.lower()
if name in string_extended:
lang = 'extended:%s' % lang
if lang in translations[question]:
text = translations[question][lang]
else:
ll_cc = lang.split('.')[0]
ll = ll_cc.split('_')[0]
if ll_cc in translations[question]:
text = translations[question][ll_cc]
elif ll in translations[question]:
text = translations[question][ll]
elif lang.startswith('extended:'):
text = translations[question]['extended:c']
else:
text = translations[question]['c']
return unicode(text, 'utf-8', 'replace')
# Based on code by Walter Dörwald:
# http://mail.python.org/pipermail/python-list/2007-January/424460.html
def ascii_transliterate(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
import unicodedata
s = unicodedata.normalize('NFD', exc.object[exc.start])[:1]
if ord(s) in range(128):
return s, exc.start + 1
else:
return u'', exc.start + 1
codecs.register_error('ascii_transliterate', ascii_transliterate)
# Returns a tuple of (current language, sorted choices, display map).
def get_languages(current_language_index=-1, only_installable=False):
import gzip
import PyICU
current_language = "English"
if only_installable:
from apt.cache import Cache
#workaround for an issue where euid != uid and the
#apt cache has not yet been loaded causing a SystemError
#when libapt-pkg tries to load the Cache the first time.
with misc.raised_privileges():
cache = Cache()
languagelist = gzip.open('/usr/lib/ubiquity/localechooser/languagelist.data.gz')
language_display_map = {}
i = 0
for line in languagelist:
line = unicode(line, 'utf-8')
if line == '' or line == '\n':
continue
code, name, trans = line.strip(u'\n').split(u':')[1:]
if code in ('C', 'dz', 'km'):
i += 1
continue
if only_installable:
pkg_name = 'language-pack-%s' % code
#special case these
if pkg_name.endswith('_CN'):
pkg_name = 'language-pack-zh-hans'
elif pkg_name.endswith('_TW'):
pkg_name = 'language-pack-zh-hant'
elif pkg_name.endswith('_NO'):
pkg_name = pkg_name.split('_NO')[0]
elif pkg_name.endswith('_BR'):
pkg_name = pkg_name.split('_BR')[0]
try:
pkg = cache[pkg_name]
if not (pkg.installed or pkg.candidate):
i += 1
continue
except KeyError:
i += 1
continue
language_display_map[trans] = (name, code)
if i == current_language_index:
current_language = trans
i += 1
languagelist.close()
if only_installable:
del cache
try:
# Note that we always collate with the 'C' locale. This is far
# from ideal. But proper collation always requires a specific
# language for its collation rules (languages frequently have
# custom sorting). This at least gives us common sorting rules,
# like stripping accents.
collator = PyICU.Collator.createInstance(PyICU.Locale('C'))
except:
collator = None
def compare_choice(x):
if language_display_map[x][1] == 'C':
return None # place C first
if collator:
try:
return collator.getCollationKey(x).getByteArray()
except:
pass
# Else sort by unicode code point, which isn't ideal either,
# but also has the virtue of sorting like-glyphs together
return x
sorted_choices = sorted(language_display_map, key=compare_choice)
return current_language, sorted_choices, language_display_map
def default_locales():
languagelist = open('/usr/lib/ubiquity/localechooser/languagelist')
defaults = {}
for line in languagelist:
line = unicode(line, 'utf-8')
if line == '' or line == '\n':
continue
bits = line.strip(u'\n').split(u';')
code = bits[0]
locale = bits[4]
defaults[code] = locale
languagelist.close()
return defaults
# vim:ai:et:sts=4:tw=80:sw=4:
| gpl-3.0 | -2,029,924,248,393,744,000 | 36.698507 | 356 | 0.588645 | false | 4.005392 | false | false | false |
antont/tundra | src/Application/PythonScriptModule/pymodules_old/simiangrid/auth.py | 1 | 2238 | #httplib was ok and httplib2 especially had nice api, but they don't work thru proxies and stuff
#-- curl is the most robust thing
#import httplib
import curl #a high level wrapper over pycurl bindings
import json
import hashlib #only 'cause has a hardcoded pwd here now - for real this comes from connection or launcher
try:
import naali
except ImportError:
naali = None #so that can test standalone too, without Naali
else:
import circuits
class SimiangridAuthentication(circuits.BaseComponent):
pass #put disconnecting to on_exit here to not leave old versions while reloading
url = "http://localhost/Grid/"
c = curl.Curl()
def simiangrid_auth(url, username, md5hex):
params = {'RequestMethod': 'AuthorizeIdentity',
'Identifier': username,
'Type': 'md5hash',
'Credential': md5hex}
rdata = c.post(url, params)
print rdata
r = json.loads(rdata)
#http://code.google.com/p/openmetaverse/wiki/AuthorizeIdentity
success = r.get('Success', False)
#NOTE: docs say reply should have Success:false upon failure.
#however in my test run it doesn't just the Message of missing/invalid creds
#this code works for that too.
return success
def on_connect(conn_id, userconn):
print userconn.GetLoginData()
username = userconn.GetProperty("username")
username = username.replace('_', ' ') #XXX HACK: tundra login doesn't allow spaces, whereas simiangrid frontend demands them
pwd = userconn.GetProperty("password")
md5hex = hashlib.md5(pwd).hexdigest()
success = simiangrid_auth(url, username, md5hex)
print "Authentication success:", success, "for", conn_id, userconn
if not success:
userconn.DenyConnection()
if naali is not None:
s = naali.server
if s.IsAboutToStart():
s.connect("UserAboutToConnect(int, UserConnection*)", on_connect)
print "simiangrid/auth.py running on server - hooked to authorize connections"
else:
on_connect(17, {'username': "Lady Tron",
'password': "They only want you when you're seventeen"})
"""
{ "Success":true, "UserID":"fe5f5ac3-7b28-4276-ae50-133db72040f0" }
Authentication success: True
"""
| apache-2.0 | -1,794,691,461,481,782,000 | 33.430769 | 128 | 0.689455 | false | 3.723794 | false | false | false |
MarkusHackspacher/PythonFarmGame | farmlib/expbar.py | 1 | 2110 | '''
Created on 31-05-2012
@author: orneo1212
'''
import pygame
from pygameui import Label
class ExpBar(Label):
"""ExpBar class
"""
def __init__(self, player):
self.player = player
self.oldexp = -1.0
Label.__init__(self, "", (9, 58))
def update_text(self):
"""update text
:return:
"""
# get data
exp = self.player.exp
nextlvlexp = self.player.nextlvlexp
level = self.player.level
self.oldexp = self.player.exp
# calculate progress and set text
progress = int(exp / nextlvlexp * 100)
self.settext("Level: " + str(level) + " Exp: {0!s}/{1!s} ({2!s} %)".
format(int(exp), int(nextlvlexp), progress))
def update(self):
"""update
:return:
"""
if self.oldexp != self.player.exp:
self.repaint()
def repaint(self):
"""repaint
:return:
"""
self.update_text()
self.size = self.width, self.height = ((48 + 2) * 6 - 1, 16)
self.create_widget_image()
# draw background
pygame.draw.rect(self.img, (0, 32, 0),
(1, 1, self.width - 1, self.height - 1))
# draw background (progress)
progresswidth = self.width / self.player.nextlvlexp * self.player.exp
pygame.draw.rect(self.img, (0, 100, 0),
(1, 1, int(progresswidth) - 1, self.height - 1))
# draw border
pygame.draw.rect(self.img, (0, 255, 0),
(1, 1, self.width - 1, self.height - 1), 1)
# draw text
text = self.gettext()
txtimg = self.labelfont.render(text, 0, (64, 255, 100), (255, 0, 255))
txtimg.set_colorkey((255, 0, 255))
# Draw centered
px = self.width / 2 - txtimg.get_size()[0] / 2
py = self.height / 2 - txtimg.get_size()[1] / 2
self.img.blit(txtimg, (px, py))
def redraw(self, surface):
"""redraw
:param surface:
:return:
"""
surface.blit(self.img, self.position)
| gpl-3.0 | 8,185,247,811,750,345,000 | 26.402597 | 78 | 0.507109 | false | 3.425325 | false | false | false |
simpeg/simpeg | SimPEG/EM/Static/IP/Run.py | 1 | 2114 | import numpy as np
from SimPEG import (Maps, DataMisfit, Regularization,
Optimization, Inversion, InvProblem, Directives)
def run_inversion(
m0, survey, actind, mesh,
std, eps,
maxIter=15, beta0_ratio=1e0,
coolingFactor=5, coolingRate=2,
upper=np.inf, lower=-np.inf,
use_sensitivity_weight=False,
alpha_s=1e-4,
alpha_x=1.,
alpha_y=1.,
alpha_z=1.,
):
"""
Run IP inversion
"""
dmisfit = DataMisfit.l2_DataMisfit(survey)
uncert = abs(survey.dobs) * std + eps
dmisfit.W = 1./uncert
# Map for a regularization
regmap = Maps.IdentityMap(nP=int(actind.sum()))
# Related to inversion
if use_sensitivity_weight:
reg = Regularization.Sparse(
mesh, indActive=actind, mapping=regmap
)
reg.alpha_s = alpha_s
reg.alpha_x = alpha_x
reg.alpha_y = alpha_y
reg.alpha_z = alpha_z
else:
reg = Regularization.Sparse(
mesh, indActive=actind, mapping=regmap,
cell_weights=mesh.vol[actind]
)
reg.alpha_s = alpha_s
reg.alpha_x = alpha_x
reg.alpha_y = alpha_y
reg.alpha_z = alpha_z
opt = Optimization.ProjectedGNCG(maxIter=maxIter, upper=upper, lower=lower)
invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt)
beta = Directives.BetaSchedule(
coolingFactor=coolingFactor, coolingRate=coolingRate
)
betaest = Directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio)
target = Directives.TargetMisfit()
# Need to have basice saving function
if use_sensitivity_weight:
updateSensW = Directives.UpdateSensitivityWeights()
update_Jacobi = Directives.UpdatePreconditioner()
directiveList = [
beta, betaest, target, update_Jacobi
]
else:
directiveList = [
beta, betaest, target
]
inv = Inversion.BaseInversion(
invProb, directiveList=directiveList
)
opt.LSshorten = 0.5
opt.remember('xc')
# Run inversion
mopt = inv.run(m0)
return mopt, invProb.dpred
| mit | 3,890,722,355,732,215,300 | 28.361111 | 79 | 0.621097 | false | 3.36089 | false | false | false |
howknows/Ropper | ropperapp/disasm/chain/arch/ropchainx86.py | 1 | 36189 | # coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ropperapp.disasm.gadget import Category
from ropperapp.common.error import *
from ropperapp.common.utils import *
from ropperapp.disasm.rop import Ropper
from ropperapp.disasm.arch import x86
from ropperapp.disasm.chain.ropchain import *
from ropperapp.loaders.loader import Type
from re import match
import itertools
import math
class RopChainX86(RopChain):
MAX_QUALI = 7
def _printHeader(self):
toReturn = ''
toReturn += ('#!/usr/bin/env python\n')
toReturn += ('# Generated by ropper ropchain generator #\n')
toReturn += ('from struct import pack\n')
toReturn += ('\n')
toReturn += ('p = lambda x : pack(\'I\', x)\n')
toReturn += ('\n')
return toReturn
def _printRebase(self):
toReturn = ''
for binary,section in self._usedBinaries:
imageBase = binary.manualImagebase + section.offset if binary.manualImagebase != None else section.virtualAddress
toReturn += ('IMAGE_BASE_%d = %s # %s\n' % (self._usedBinaries.index((binary, section)),toHex(imageBase , 4), binary.fileName))
toReturn += ('rebase_%d = lambda x : p(x + IMAGE_BASE_%d)\n\n'% (self._usedBinaries.index((binary, section)),self._usedBinaries.index((binary, section))))
return toReturn
@classmethod
def name(cls):
return ''
@classmethod
def availableGenerators(cls):
return [RopChainX86System, RopChainX86Mprotect, RopChainX86VirtualProtect]
@classmethod
def archs(self):
return [x86]
def _createDependenceChain(self, gadgets):
"""
gadgets - list with tuples
tuple contains:
- method to create chaingadget
- list with arguments
- dict with named arguments
- list with registers which are not allowed to override in the gadget
"""
failed = []
cur_len = 0
cur_chain = ''
counter = 0
max_perm = math.factorial(len(gadgets))
for x in itertools.permutations(gadgets):
counter += 1
self._printer.puts('\r[*] Try permuation %d / %d' % (counter, max_perm))
found = False
for y in failed:
if x[:len(y)] == y:
found = True
break
if found:
continue
try:
fail = []
chain2 = ''
dontModify = []
badRegs = []
c = 0
for idx in range(len(x)):
g = x[idx]
if idx != 0:
badRegs.extend(x[idx-1][3])
dontModify.extend(g[3])
fail.append(g)
chain2 += g[0](*g[1], badRegs=badRegs, dontModify=dontModify,**g[2])[0]
cur_chain += chain2
break
except RopChainError as e:
pass
if len(fail) > cur_len:
cur_len = len(fail)
cur_chain = '# Filled registers: '
for fa in fail[:-1]:
cur_chain += (fa[2]['reg']) + ', '
cur_chain += '\n'
cur_chain += chain2
failed.append(tuple(fail))
else:
self._printer.println('')
self._printer.printInfo('Cannot create chain which fills all registers')
# print('Impossible to create complete chain')
self._printer.println('')
return cur_chain
def _isModifiedOrDereferencedAccess(self, gadget, dontModify):
regs = []
for line in gadget.lines[1:]:
line = line[1]
if '[' in line:
return True
if dontModify:
m = match('[a-z]+ (e?[abcds][ixlh]),?.*', line)
if m and m.group(1) in dontModify:
return True
return False
def _paddingNeededFor(self, gadget):
regs = []
for idx in range(1,len(gadget.lines)):
line = gadget.lines[idx][1]
matched = match('^pop (...)$', line)
if matched:
regs.append(matched.group(1))
return regs
def _printRopInstruction(self, gadget, padding=True):
toReturn = ('rop += rebase_%d(%s) # %s\n' % (self._usedBinaries.index((gadget._binary, gadget._section)),toHex(gadget.lines[0][0],4), gadget.simpleInstructionString()))
if padding:
regs = self._paddingNeededFor(gadget)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return toReturn
def _printAddString(self, string):
return ('rop += \'%s\'\n' % string)
def _printRebasedAddress(self, addr, comment='', idx=0):
return ('rop += rebase_%d(%s)\n' % (idx,addr))
def _printPaddingInstruction(self, addr='0xdeadbeef'):
return ('rop += p(%s)\n' % addr)
def _containsZeroByte(self, addr):
return addr & 0xff == 0 or addr & 0xff00 == 0 or addr & 0xff0000 == 0 or addr & 0xff000000 == 0
def _createZeroByteFillerForSub(self, number):
start = 0x01010101
for i in xrange(start, 0x02020202):
if not self._containsZeroByte(i) and not self._containsZeroByte(i+number):
return i
def _createZeroByteFillerForAdd(self, number):
start = 0x01010101
for i in xrange(start, 0x02020202):
if not self._containsZeroByte(i) and not self._containsZeroByte(number-i):
return i
def _find(self, category, reg=None, srcdst='dst', badDst=[], badSrc=None, dontModify=None, srcEqDst=False, switchRegs=False ):
quali = 1
while quali < RopChainX86System.MAX_QUALI:
for binary in self._binaries:
for section, gadgets in binary.gadgets.items():
for gadget in gadgets:
if gadget.category[0] == category and gadget.category[1] == quali:
if badSrc and gadget.category[2]['src'] in badSrc:
continue
if badDst and gadget.category[2]['dst'] in badDst:
continue
if not gadget.lines[len(gadget.lines)-1][1].strip().endswith('ret') or 'esp' in gadget.simpleString():
continue
if srcEqDst and (not (gadget.category[2]['dst'] == gadget.category[2]['src'])):
continue
elif not srcEqDst and 'src' in gadget.category[2] and (gadget.category[2]['dst'] == gadget.category[2]['src']):
continue
if self._isModifiedOrDereferencedAccess(gadget, dontModify):
continue
if reg:
if gadget.category[2][srcdst] == reg:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
elif switchRegs:
other = 'src' if srcdst == 'dst' else 'dst'
if gadget.category[2][other] == reg:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
else:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
quali += 1
def _createWriteStringWhere(self, what, where, reg=None, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build writewhatwhere gadget!')
write4 = self._find(Category.WRITE_MEM, reg=popReg.category[2]['dst'], badDst=
badDst, srcdst='src')
if not write4:
badRegs.append(popReg.category[2]['dst'])
continue
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[popReg.category[2]['dst']]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
if len(what) % 4 > 0:
what += ' ' * (4 - len(what) % 4)
toReturn = ''
for index in range(0,len(what),4):
part = what[index:index+4]
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printAddString(part)
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where+index,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,popReg.category[2]['dst'], popReg2.category[2]['dst'])
def _createWriteRegValueWhere(self, what, where, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
write4 = self._find(Category.WRITE_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not write4:
raise RopChainError('Cannot build writewhatwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[what]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFrom(self, what, from_reg, dontModify=[], idx=0):
try:
return self._createLoadRegValueFromMov(what, from_reg, dontModify, idx)
except:
return self._createLoadRegValueFromXchg(what, from_reg, dontModify, idx)
def _createLoadRegValueFromMov(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.LOAD_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='dst')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=load4.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(from_re,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFromXchg(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.XCHG_REG, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
mov = self._find(Category.LOAD_MEM, reg=load4.category[2]['dst'], badDst=badDst, dontModify=[load4.category[2]['dst']]+dontModify, srcdst='dst')
if not mov:
badDst.append(load4.category[2]['dst'])
continue
popReg2 = self._find(Category.LOAD_REG, reg=mov.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(from_reg,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(mov)
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createNumberSubtract(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.SUB_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with subtract gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append=[sub.category[2]['dst']]
continue
else:
break;
filler = self._createZeroByteFillerForSub(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(filler+number,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberAddition(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.ADD_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with addition gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append(sub.category[2]['dst'])
continue
else:
break;
filler = self._createZeroByteFillerForAdd(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(number - filler,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberPop(self, number, reg=None, badRegs=None, dontModify=None):
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number with xor gadget!')
incReg = self._find(Category.INC_REG, reg=popReg.category[2]['dst'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(popReg.category[2]['dst'])
else:
break
toReturn = self._printRopInstruction(popReg)
toReturn += self._printPaddingInstruction(toHex(0xffffffff,4))
for i in range(number+1):
toReturn += self._printRopInstruction(incReg)
return (toReturn ,popReg.category[2]['dst'],)
def _createNumberXOR(self, number, reg=None, badRegs=None, dontModify=None):
while True:
clearReg = self._find(Category.CLEAR_REG, reg=reg, badDst=badRegs, badSrc=badRegs,dontModify=dontModify, srcEqDst=True)
if not clearReg:
raise RopChainError('Cannot build number with xor gadget!')
if number > 0:
incReg = self._find(Category.INC_REG, reg=clearReg.category[2]['src'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(clearReg.category[2]['src'])
else:
break
else:
break
toReturn = self._printRopInstruction(clearReg)
for i in range(number):
toReturn += self._printRopInstruction(incReg)
return (toReturn, clearReg.category[2]['dst'],)
def _createNumberXchg(self, number, reg=None, badRegs=None, dontModify=None):
xchg = self._find(Category.XCHG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not xchg:
raise RopChainError('Cannot build number gadget with xchg!')
other = xchg.category[2]['src'] if xchg.category[2]['dst'] else xchg.category[2]['dst']
toReturn = self._createNumber(number, other, badRegs, dontModify)[0]
toReturn += self._printRopInstruction(xchg)
return (toReturn, reg, other)
def _createNumberNeg(self, number, reg=None, badRegs=None, dontModify=None):
if number == 0:
raise RopChainError('Cannot build number gadget with neg if number is 0!')
neg = self._find(Category.NEG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not neg:
raise RopChainError('Cannot build number gadget with neg!')
pop = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not pop:
raise RopChainError('Cannot build number gadget with neg!')
toReturn = self._printRopInstruction(pop)
toReturn += self._printPaddingInstruction(toHex((~number)+1)) # two's complement
toReturn += self._printRopInstruction(neg)
return (toReturn, reg,)
def _createNumber(self, number, reg=None, badRegs=None, dontModify=None, xchg=True):
try:
if self._containsZeroByte(number):
try:
return self._createNumberNeg(number, reg, badRegs,dontModify)
except RopChainError as e:
if number < 50:
try:
return self._createNumberXOR(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberPop(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else :
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else:
popReg =self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number gadget!')
toReturn = self._printRopInstruction(popReg)
toReturn += self._printPaddingInstruction(toHex(number,4))
return (toReturn , popReg.category[2]['dst'])
except:
return self._createNumberXchg(number, reg, badRegs, dontModify)
def _createAddress(self, address, reg=None, badRegs=None, dontModify=None):
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build address gadget!')
toReturn = ''
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printRebasedAddress(toHex(address, 4), idx=self._usedBinaries.index((popReg._binary, popReg._section)))
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return (toReturn,popReg.category[2]['dst'])
def _createSyscall(self, reg=None, badRegs=None, dontModify=None):
syscall = self._find(Category.SYSCALL, reg=None, badDst=None, dontModify=dontModify)
if not syscall:
raise RopChainError('Cannot build syscall gadget!')
toReturn = ''
toReturn += self._printRopInstruction(syscall)
return (toReturn,)
def _createOpcode(self, opcode):
return self._printRopInstruction(self._searchOpcode(opcode))
def _searchOpcode(self, opcode):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchOpcode(section.bytes, opcode.decode('hex'), section.offset, True, section=section))
if len(gadgets) > 0:
return gadgets[0]
else:
raise RopChainError('Cannot create gadget for opcode: %x' % opcode)
def create(self):
pass
class RopChainX86System(RopChainX86):
@classmethod
def name(cls):
return 'execve'
def _createCommand(self, what, where, reg=None, dontModify=[], idx=0):
if len(what) % 4 > 0:
what = '/' * (4 - len(what) % 4) + what
return self._createWriteStringWhere(what,where, idx=idx)
def create(self, cmd='/bin/sh'):
if len(cmd.split(' ')) > 1:
raise RopChainError('No argument support for execve commands')
self._printer.printInfo('ROPchain Generator for syscall execve:\n')
self._printer.println('\nwrite command into data section\neax 0xb\nebx address to cmd\necx address to null\nedx address to null\n')
section = self._binaries[0].getSection(b'.data')
length = math.ceil(float(len(cmd))/4) * 4
chain = self._printHeader()
chain_tmp = '\n'
chain_tmp += self._createCommand(cmd,section.struct.sh_offset+0x1000)[0]
badregs = []
while True:
ret = self._createNumber(0x0, badRegs=badregs)
chain_tmp += ret[0]
try:
chain_tmp += self._createWriteRegValueWhere(ret[1], section.struct.sh_offset+0x1000+length)[0]
break
except BaseException as e:
raise e
badregs.append(ret[1])
gadgets = []
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000+length],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000+length],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0xb],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printer.printInfo('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printer.printInfo('syscall gadget found')
except RopChainError:
try:
self._printer.printInfo('No syscall gadget found!')
self._printer.printInfo('Look for int 0x80 opcode')
chain_tmp += self._createOpcode('cd80')
self._printer.printInfo('int 0x80 opcode found')
except:
try:
self._printer.printInfo('No int 0x80 opcode found')
self._printer.printInfo('Look for call gs:[0x10] opcode')
chain_tmp += self._createOpcode('65ff1510000000')
self._printer.printInfo('call gs:[0x10] found')
except RopChainError:
self._printer.printInfo('No call gs:[0x10] opcode found')
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'print rop'
print(chain)
class RopChainX86Mprotect(RopChainX86):
"""
Builds a ropchain for mprotect syscall
eax 0x7b
ebx address
ecx size
edx 0x7 -> RWE
"""
@classmethod
def name(cls):
return 'mprotect'
def _createJmp(self, reg='esp'):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchJmpReg(section.bytes, reg, vaddr, section=section))
if len(gadgets) > 0:
if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
return self._printRopInstruction(gadgets[0])
else:
return None
def __extract(self, param):
if not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param) or not match('0x[0-9a-fA-F]{1,8},[0-9]+', param):
raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>,<number>')
split = param.split(',')
if isHex(split[1]):
return (int(split[0], 16), int(split[1], 16))
else:
return (int(split[0], 16), int(split[1], 10))
def create(self, param=None):
if not param:
raise RopChainError('Missing parameter: address:size')
address, size = self.__extract(param)
self._printer.printInfo('ROPchain Generator for syscall mprotect:\n')
self._printer.println('eax 0x7b\nebx address\necx size\nedx 0x7 -> RWE\n')
chain = self._printHeader()
chain += 'shellcode = \'\\xcc\'*100\n\n'
gadgets = []
gadgets.append((self._createNumber, [address],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createNumber, [size],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createNumber, [0x7],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0x7d],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp = ''
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printer.printInfo('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printer.printInfo('syscall gadget found')
except RopChainError:
chain_tmp += '\n# ADD HERE SYSCALL GADGET\n\n'
self._printer.printInfo('No syscall gadget found!')
self._printer.printInfo('Look for jmp esp')
jmp_esp = self._createJmp()
if jmp_esp:
self._printer.printInfo('jmp esp found')
chain_tmp += jmp_esp
else:
self-_printer.printInfo('no jmp esp found')
chain_tmp += '\n# ADD HERE JMP ESP\n\n'
chain += self._printRebase()
chain += '\nrop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
print(chain)
class RopChainX86VirtualProtect(RopChainX86):
"""
Builds a ropchain for a VirtualProtect call using pushad
eax 0x90909090
ecx old protection (writable addr)
edx 0x40 (RWE)
ebx size
esp address
ebp return address (jmp esp)
esi pointer to VirtualProtect
edi ret (rop nop)
"""
@classmethod
def name(cls):
return 'virtualprotect'
def _createPushad(self):
pushad = self._find(Category.PUSHAD)
if pushad:
return self._printRopInstruction(pushad)
else:
self._printer.printInfo('No pushad found!')
return '# Add here PUSHAD gadget!'
def _createJmp(self, reg='esp'):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.offset
gadgets.extend(
r.searchJmpReg(section.bytes, reg, vaddr, section=section))
if len(gadgets) > 0:
if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
return gadgets[0]
else:
return ''
def __extract(self, param):
if (not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param)) and (not match('0x[0-9a-fA-F]+', param)):
raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>')
split = param.split(',')
if len(split) == 2:
if isHex(split[1]):
return (int(split[0], 16), int(split[1], 16))
else:
return (None, int(split[0], 16))
def __getVirtualProtectEntry(self):
for binary in self._binaries:
if binary.type == Type.PE:
s = binary.sections['.idata']
for descriptorData in s.importDescriptorTable:
for function in descriptorData.functions:
if str(function[1]) == 'VirtualProtect':
return function[2]
else:
self._printer.printError('File is not a PE file.')
return None
def create(self, param=None):
if not param:
raise RopChainError('Missing parameter: address,size or size')
self._printer.printInfo('Ropchain Generator for VirtualProtect:\n')
self._printer.println('eax 0x90909090\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to VirtualProtect\nedi ret (rop nop)\n')
address, size = self.__extract(param)
given = False
if not address:
address = self.__getVirtualProtectEntry()
if not address:
self._printer.printError('No IAT-Entry for VirtualProtect found!')
raise RopChainError('No IAT-Entry for VirtualProtect found and no address is given')
else:
given = True
writeable_ptr = self._binaries[0].getWriteableSection().offset + 0x4
jmp_esp = self._createJmp()
ret_addr = self._searchOpcode('c3')
chain = self._printHeader()
chain += '\n\nshellcode = \'\\xcc\'*100\n\n'
gadgets = []
to_extend = []
chain_tmp = ''
try:
self._printer.printInfo('Try to create gadget to fill esi with content of IAT address: %s' % address)
chain_tmp += self._createLoadRegValueFrom('esi', address)[0]
if given:
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
else:
gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
to_extend = ['esi','si']
except:
self._printer.printInfo('Cannot create fill esi gadget!')
self._printer.printInfo('Try to create this chain:\n')
self._printer.println('eax Pointer to VirtualProtect\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to jmp [eax]\nedi ret (rop nop)\n')
jmp_eax = self._searchOpcode('ff20') # jmp [eax]
gadgets.append((self._createAddress, [jmp_eax.lines[0][0]],{'reg':'esi'},['esi','si']))
if given:
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
else:
gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
gadgets.append((self._createNumber, [size],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']+to_extend))
gadgets.append((self._createAddress, [writeable_ptr],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']+to_extend))
gadgets.append((self._createAddress, [jmp_esp.lines[0][0]],{'reg':'ebp'},['ebp', 'bp']+to_extend))
gadgets.append((self._createNumber, [0x40],{'reg':'edx'},['edx', 'dx', 'dh', 'dl']+to_extend))
gadgets.append((self._createAddress, [ret_addr.lines[0][0]],{'reg':'edi'},['edi', 'di']+to_extend))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
self._printer.printInfo('Look for pushad gadget')
chain_tmp += self._createPushad()
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
print(chain)
| gpl-2.0 | -8,572,569,844,129,845,000 | 39.707537 | 218 | 0.562022 | false | 3.858925 | false | false | false |
TunnelBlanket/Houdini | Houdini/Data/Stamp.py | 1 | 1225 | # coding: utf-8
from sqlalchemy import Column, Integer, SmallInteger, text, ForeignKey
from sqlalchemy.orm import relationship
from Houdini.Data import Base
metadata = Base.metadata
class Stamp(Base):
__tablename__ = 'stamp'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Stamp = Column(SmallInteger, primary_key=True, nullable=False)
Recent = Column(Integer, nullable=False, server_default=text("1"))
penguin = relationship(u'Penguin')
class CoverStamp(Base):
__tablename__ = 'cover_stamps'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Stamp = Column(SmallInteger, primary_key=True, nullable=False, server_default=text("0"))
X = Column(SmallInteger, nullable=False, server_default=text("0"))
Y = Column(SmallInteger, nullable=False, server_default=text("0"))
Type = Column(SmallInteger, nullable=False, server_default=text("0"))
Rotation = Column(SmallInteger, nullable=False, server_default=text("0"))
Depth = Column(SmallInteger, nullable=False, server_default=text("0"))
penguin = relationship(u'Penguin') | mit | -1,121,676,306,723,954,800 | 41.275862 | 125 | 0.726531 | false | 3.431373 | false | false | false |
Crompulence/cpl-library | examples/interactive_plot_example/python/CFD_recv_and_plot_grid_interactive.py | 1 | 3724 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([16, 6, 16], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in CFD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Plot output
fig, ax = plt.subplots(1,1)
plt.subplots_adjust(bottom=0.25)
axslider = plt.axes([0.25, 0.1, 0.65, 0.03])
freq = 1.
sfreq = Slider(axslider, 'Freq', 0.1, 2.0, valinit=freq)
def update(val):
freq = sfreq.val
global freq
print("CHANGED", freq)
sfreq.on_changed(update)
plt.ion()
plt.show()
# === Plot both grids ===
dx = CPL.get("xl_cfd")/float(CPL.get("ncx"))
dy = CPL.get("yl_cfd")/float(CPL.get("ncy"))
dz = CPL.get("zl_cfd")/float(CPL.get("ncz"))
ioverlap = (CPL.get("icmax_olap")-CPL.get("icmin_olap")+1)
joverlap = (CPL.get("jcmax_olap")-CPL.get("jcmin_olap")+1)
koverlap = (CPL.get("kcmax_olap")-CPL.get("kcmin_olap")+1)
xoverlap = ioverlap*dx
yoverlap = joverlap*dy
zoverlap = koverlap*dz
for time in range(100000):
# recv data to plot
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
recv_array = np.zeros((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
recv_array, ierr = CPL.recv(recv_array, olap_limits)
#Plot CFD and coupler Grid
draw_grid(ax,
nx=CPL.get("ncx"),
ny=CPL.get("ncy"),
nz=CPL.get("ncz"),
px=CPL.get("npx_cfd"),
py=CPL.get("npy_cfd"),
pz=CPL.get("npz_cfd"),
xmin=CPL.get("x_orig_cfd"),
ymin=CPL.get("y_orig_cfd"),
zmin=CPL.get("z_orig_cfd"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=CPL.get("yl_cfd"),
zmax=(CPL.get("kcmax_olap")+1)*dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=CPL.get("npx_md"),
py=CPL.get("npy_md"),
pz=CPL.get("npz_md"),
xmin=CPL.get("x_orig_md"),
ymin=-CPL.get("yl_md")+yoverlap,
zmin=CPL.get("z_orig_md"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=yoverlap,
zmax=(CPL.get("kcmax_olap")+1)*dz,
label='MD')
#Plot x component on grid
x = np.linspace(CPL.get("x_orig_cfd")+.5*dx,xoverlap-.5*dx,ioverlap)
z = np.linspace(CPL.get("z_orig_cfd")+.5*dz,zoverlap-.5*dz,koverlap)
for j in range(joverlap):
ax.plot(x, 0.5*dy*(recv_array[0,:,j,0]+1.+2*j), 's-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print(time, freq)
plt.pause(0.1)
ax.cla()
# send data to update
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = freq*np.ones((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
CPL.send(send_array, olap_limits)
CPL.finalize()
MPI.Finalize()
| gpl-3.0 | 4,246,740,477,422,610,000 | 30.033333 | 81 | 0.583512 | false | 2.528174 | false | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_Put_Tasking.py | 1 | 6883 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_Put_Tasking.py
UPLOADS_DIR = 'Uploads'
MAX_CHUNK_SIZE = 1047552
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
import mcl.tasking.env
import mcl.tasking.resource
import mcl.tasking.technique
import mcl.tasking.virtualdir
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.file.cmd.put', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.file.cmd.put.tasking', globals())
lpParams = mcl.tasking.GetParameters()
if lpParams['chunksize'] == 0 or lpParams['chunksize'] >= MAX_CHUNK_SIZE:
mcl.tasking.OutputError('Invalid chunkSize given')
return False
else:
provider = mcl.tasking.technique.Lookup('PUT', mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, lpParams['method'])
if lpParams['source'] == None or len(lpParams['source']) == 0:
mcl.tasking.OutputError('No local file given')
return False
local = lpParams['source']
if lpParams['remote'] == None or len(lpParams['remote']) == 0:
if local.find('\\') != -1 or local.find('/') != -1:
mcl.tasking.OutputError('You must specify a remote file name if you specify a path for the local file')
return False
remote = local
else:
remote = lpParams['remote']
resFlags = 0
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_ARCH
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_OS
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_LIBC
if lpParams['compiled']:
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_COMPILED
f, openedName, usedProject = mcl.tasking.resource.Open(local, resFlags, UPLOADS_DIR, lpParams['project'])
if f == None:
mcl.tasking.OutputError("Failed to open local file '%s'" % local)
return False
try:
import os.path
import array
fileSize = os.path.getsize(openedName)
if fileSize == 0 or fileSize > 4294967295:
mcl.tasking.OutputError("Invalid file size (%u) for put of '%s'" % (fileSize, openedName))
return False
taskXml = mcl.tasking.Tasking()
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, provider)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
from mcl.object.XmlOutput import XmlOutput
xml = XmlOutput()
xml.Start('PutFile')
xml.AddAttribute('name', openedName)
xml.AddAttribute('size', '%u' % fileSize)
mcl.tasking.OutputXml(xml)
fileBytes = array.array('B', f.read())
if len(fileBytes) != fileSize:
mcl.tasking.OutputError('Failed to read file (read=%u | expected=%u)' % (len(fileBytes), fileSize))
return False
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_PUT_COMPLETE, 'false')
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_BYTES_LEFT, '%u' % fileSize)
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_FILE_SIZE, '%u' % fileSize)
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_FILE_OPENED, 'false')
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED, 'false')
finally:
f.close()
f = None
createParams = mca.file.cmd.put.CreateParams()
createParams.writeOffset = 0
createParams.provider = provider
if lpParams['permanent']:
createParams.flags |= mca.file.cmd.put.PARAMS_CREATE_FLAG_PERMANENT
try:
createParams.filePath = mcl.tasking.virtualdir.GetFullPath(remote)
except:
mcl.tasking.OutputError('Failed to apply virtual directory to remote name')
return False
rpc = mca.file.cmd.put.tasking.RPC_INFO_CREATE
msg = MarshalMessage()
createParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.file.cmd.put.errorStrings)
return False
import time
while not mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_FILE_OPENED):
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
chunkIndex = 0
bytesLeft = fileSize
while bytesLeft > 0:
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
numBytesToSend = bytesLeft
if numBytesToSend > lpParams['chunksize']:
numBytesToSend = lpParams['chunksize']
startIndex = fileSize - bytesLeft
endIndex = startIndex + numBytesToSend
writeParams = mca.file.cmd.put.WriteParams()
writeParams.data = fileBytes[startIndex:endIndex]
writeParams.chunkIndex = chunkIndex
if numBytesToSend >= bytesLeft:
writeParams.lastData = True
chunkIndex = chunkIndex + 1
rpc = mca.file.cmd.put.tasking.RPC_INFO_WRITE
msg = MarshalMessage()
writeParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.file.cmd.put.errorStrings)
return False
newBytesLeft = bytesLeft
while newBytesLeft == bytesLeft:
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
newBytesLeft = int(mcl.tasking.env.GetValue(mca.file.cmd.put.LP_ENV_BYTES_LEFT))
bytesLeft = newBytesLeft
while not mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_PUT_COMPLETE):
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
if not lpParams['permanent']:
mcl.tasking.TaskGoToBackground()
while not mcl.CheckForStop():
time.sleep(1)
return mcl.tasking.TaskSetStatus(mcl.target.CALL_SUCCEEDED)
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1) | unlicense | -8,070,547,693,328,371,000 | 44.289474 | 123 | 0.611361 | false | 3.531555 | false | false | false |
helewonder/knightgame | wargame/game.py | 1 | 3956 | from hut import Hut, create_unit
from functions import print_bold, print_dotted_line, show_health, \
print_wave_line
from knight import Knight
from uniterror import HutNotNumberError, HutOutRangeError
class OrGame():
"""
The Game Class , mainly
"""
def __init__(self, hut_numbers=5):
"""get the game ready with scenario ready, default have 5huts.
:param hut_numbers: in the game, how many huts
:type hut_numbers: int
"""
self.acquired_all_huts = False
self.huts = []
self.player = None
self.hut_numbers = hut_numbers
@property
def get_occupants(self):
"""Show all huts with it's occupant
:return: the message each hut with occupant
:rtype: basestring
"""
msg = "["
for hut in self.huts:
msg += str(hut.number) + ":" + hut.get_occupant_type + ", "
msg += '\b\b]'
return msg
def _process_user_choice(self):
verifying_choice = True
idx = 0
print_dotted_line()
print("Current Occupants:\n\t%s" % self.get_occupants)
print_dotted_line()
while verifying_choice:
user_choice = input(
"Choose a hut number to enter(1~" + str(
self.hut_numbers) + "):")
try:
if not user_choice.isdigit():
raise HutNotNumberError(
"Your input '{}' is not number.".format(user_choice))
idx = int(user_choice)
if idx > self.hut_numbers or idx < 0:
raise HutOutRangeError(
"input not in range(1~" + str(self.hut_numbers) + ")")
except HutNotNumberError as e:
print_wave_line()
print(e)
print(e.error_message)
print_wave_line()
continue
except HutOutRangeError as e:
print_wave_line()
print(e)
print(e.error_message)
print_wave_line()
continue
if self.huts[idx - 1].is_acquired:
print(
"You have already acquired this hut. Try again",
"<Info:You can NOT get healed in already acquired hut.>"
)
else:
verifying_choice = False
return idx
def play(self):
"""
Workhorse method to play the game....
Create a Knight instance, create huts and preoccupy them with a game
Character instance (or leave empty)
"""
self.setup_game_scenario()
while not self.acquired_all_huts:
idx = self._process_user_choice()
self.player.acquire_hut(self.huts[idx - 1])
if self.player.health_meter <= 0:
print("You Lose :( Better luck next time")
break
for hut in self.huts:
if not hut.is_acquired:
break
else:
self.acquired_all_huts = True
if self.acquired_all_huts:
print_bold("You Win!!! Congratulations!!!!!!")
def setup_game_scenario(self):
"""
Create player and huts and then randomly pre-occupy huts...
"""
self.player = Knight("Sir Foo")
for number in range(self.hut_numbers):
self.huts.append(Hut(number + 1, create_unit()))
self._show_mission()
# print_bold("Current Occupants:", self.get_occupants)
show_health(self.player, bold=True, end='\n')
@staticmethod
def _show_mission():
print_dotted_line()
print_bold("Welcome to Play the Knight Game!", end='\n')
print_dotted_line()
print_bold("Mission:")
print("\t1. Defeat the enemy in any hut")
print("\t2. Bring all huts in the village under your contral")
| mit | -2,352,176,094,657,368,000 | 30.903226 | 78 | 0.523509 | false | 4.016244 | false | false | false |
Mach33Labs/labautomation | github.py | 1 | 5717 | #!/usr/bin/python
import datetime
import fcntl
import github3
import gzip
import json
import os
import re
import select
import socket
import subprocess
import sys
import time
import mysql.connector
TARGET_VM = 'devosa'
TARGET_IP_BLOCK = '192.168.53.0/24'
with open(os.path.expanduser('~/.github_automation'), 'r') as f:
config = json.loads(f.read())
ISSUE_URL_RE = re.compile('https://api.github.com/repos/(.*)/(.*)/issues/(.*)')
def github_comment(issue_url, comment):
if not issue_url:
return
g = github3.login(config['github_user'], config['github_password'])
m = ISSUE_URL_RE.match(issue_url)
if not m:
print 'Could not parse issue URL!'
return
issue = g.issue(m.group(1), m.group(2), int(m.group(3)))
issue.create_comment(comment)
def process(job):
ostrich_sha = job['sha']
if job['project'] != 'ostrich':
ostrich_sha = 'master'
state = {}
with open('state.json.%s' % job['flavour'], 'r') as f:
state = json.loads(f.read())
state['complete']['osa-branch'] = job['branch']
state['complete']['ironic-ip-block'] = TARGET_IP_BLOCK
with open('state.json', 'w') as f:
f.write(json.dumps(state, indent=4, sort_keys=True))
short_branch = job['branch'].replace('stable/', '')
now = datetime.datetime.now()
job_id = ('%04d%02d%02d-%02d%02d-%s-%s-%s'
%(now.year, now.month, now.day, now.hour, now.minute,
job['project'], short_branch, job['sha']))
job['short_branch'] = short_branch
job['job_id'] = job_id
job['timestamp'] = job['timestamp'].isoformat()
with open('job.json', 'w') as f:
f.write(json.dumps(job, indent=4, sort_keys=True))
with gzip.open('%s.log.gz' % job_id, 'w') as f:
rc = execute('sudo ./reset_osa.sh %s %s %s %s %s'
%(TARGET_VM, job['distro'], ostrich_sha, job_id,
job['project']), f)
return (rc, job_id)
def execute(command, logfile):
print('Running %s' % command)
obj = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
flags = fcntl.fcntl(obj.stdout, fcntl.F_GETFL)
fcntl.fcntl(obj.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(obj.stderr, fcntl.F_GETFL)
fcntl.fcntl(obj.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
obj.stdin.close()
while obj.poll() is None:
readable, _, _ = select.select([obj.stderr, obj.stdout], [], [], 10)
for f in readable:
d = os.read(f.fileno(), 10000)
sys.stdout.write(d)
logfile.write(d)
logfile.flush()
print('... process complete')
returncode = obj.returncode
print('... exit code %d' % returncode)
return returncode
def main():
while True:
conn = mysql.connector.connect(**config['db'])
cursor = conn.cursor(dictionary=True, buffered=True)
cursor.execute('select * from jobs where processed=0 and '
'machine is null order by timestamp;')
if cursor.rowcount == 0:
print '%s No work, sleeping' % datetime.datetime.now()
time.sleep(60)
continue
job = cursor.fetchone()
cursor.execute('update jobs set machine=%(machine)s where '
'uuid=%(uuid)s and machine is null;',
{
'machine': socket.gethostname(),
'uuid': job['uuid']
})
if cursor.rowcount == 0:
print 'My job got stolen (id %s)!' % job['uuid']
continue
cursor.execute('commit;')
start_time = time.time()
rc, job_id = process(job)
end_time = time.time()
conn = mysql.connector.connect(**config['db'])
cursor = conn.cursor(dictionary=True, buffered=True)
cursor.execute('update jobs set processed=1, outcome=%(outcome)s, '
'log_url=%(log_url)s '
'where uuid=%(uuid)s;',
{
'outcome': rc,
'log_url': ('http://molokai.stillhq.com/lab/logs/%s/'
% job_id),
'uuid': job['uuid']
})
cursor.execute('commit;')
cursor.execute('select * from jobs where event_uuid=%(event_uuid)s;',
{'event_uuid': job['event_uuid']})
report = []
unrun = 0
for job in cursor:
outcome = ''
if str(job['outcome']) == '0':
outcome = 'passed'
elif job['outcome']:
outcome = 'failed'
else:
unrun += 1
outcome = 'not yet run'
logs = ''
if job['log_url']:
logs = ', logs at %s' % job['log_url']
report.append('%s on %s %s%s' %(job['branch'], job['distro'],
outcome, logs))
comment = 'Tests run on %s:' % job['sha']
for r in report:
comment += ('\n %s' % r)
print 'Unrun: %d' % unrun
print comment
if unrun == 0:
github_comment(job['issue_url'], comment)
#if rc != 0:
# print 'Failed test run, stopping to debug'
# sys.exit(1)
if job['type'] == 'manual':
print 'Manual job, stopping for user to operate'
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 | 634,645,749,023,385,500 | 29.902703 | 80 | 0.509183 | false | 3.783587 | false | false | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_aaa_locald_cfg.py | 1 | 1133 | """ Cisco_IOS_XR_aaa_locald_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR aaa\-locald package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-aaa\-lib\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AaaLocaldTaskClassEnum(Enum):
"""
AaaLocaldTaskClassEnum
Aaa locald task class
.. data:: READ = 0
Permits read operation for a Task ID
.. data:: WRITE = 1
Permits write operation for a Task ID
.. data:: EXECUTE = 2
Permits execute operation for a Task ID
.. data:: DEBUG = 3
Permits debug operation for a Task ID
"""
READ = 0
WRITE = 1
EXECUTE = 2
DEBUG = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_locald_cfg as meta
return meta._meta_table['AaaLocaldTaskClassEnum']
| apache-2.0 | -94,935,661,741,093,040 | 16.166667 | 86 | 0.677846 | false | 3.529595 | false | false | false |
acsone/alfodoo | cmis_web_proxy/controllers/cmis.py | 1 | 18684 | # Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
import logging
import urlparse
import werkzeug
from odoo import _, http
from odoo.http import request
from odoo.exceptions import AccessError
from odoo.addons.web.controllers import main
_logger = logging.getLogger(__name__)
try:
import requests
except ImportError:
_logger.debug('Cannot `import requests`.')
CMIS_PROXY_PATH = '/cmis/1.1/browser'
READ_ACCESS_CMIS_ACTIONS = set([
"query",
])
WRITE_ACCESS_CMIS_ACTIONS = set([
"createRelationship",
# "createPolicy", method at repository level: not supported
# "createItem", method at repository level: not supported
"bulkUpdate",
# "createType", method at repository level: not supported
# "updateType", method at repository level: not supported
"createDocument",
"createFolder",
"createDocumentFromSource",
# "createPolicy", method at repository level: not supported
"update",
"setContent",
"checkOut",
"cancelCheckOut",
"checkIn",
# "applyPolicy", method at repository level: not supported
# "applyACL", method at repository level: not supported
])
UNLINK_ACCESS_CMIS_ACTIONS = set([
"delete",
"deleteContent",
"removeObjectFromFolder",
# "removePolicy", method at repository level: not supported
# "deleteType", method at repository level: not supported
])
READ_ACCESS_ALLOWABLE_ACTIONS = set([
"canGetDescendants",
"canGetChildren",
"canGetFolderParent",
"canGetObjectParents",
"canGetProperties",
"canGetContentStream",
"canGetAllVersions",
"canGetObjectRelationships",
"canGetAppliedPolicies",
"canGetACL",
])
WRITE_ACCESS_ALLOWABLE_ACTIONS = set([
"canCreateDocument",
"canCreateFolder",
# "canCreatePolicy",
"canCreateRelationship",
"canUpdateProperties",
"canMoveObject",
"canSetContentStream",
"canAddObjectToFolder",
"canCheckOut",
"canCancelCheckOut",
"canCheckIn",
# "canApplyPolicy",
# "canApplyACL",
])
UNLINK_ACCESS_ALLOWABLE_ACTIONS = set([
"canRemoveObjectFromFolder",
"canDeleteObject",
"canDeleteContentStream",
"canDeleteTree",
# "canRemovePolicy",
])
CMSI_ACTIONS_OPERATION_MAP = {}
for a in READ_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'read'
for a in WRITE_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'write'
for a in UNLINK_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'unlink'
def gen_dict_extract(key, var):
""" This method is used to recusrively find into a json structure (dict)
all values of a given key
credits: http://stackoverflow.com/questions/9807634/
find-all-occurences-of-a-key-in-nested-python-dictionaries-and-lists
"""
if hasattr(var, 'items'):
for k, v in var.items():
if k == key:
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in gen_dict_extract(key, d):
yield result
class CmisProxy(http.Controller):
@property
def _cmis_proxy_base_url(self):
return urlparse.urljoin(request.httprequest.host_url, CMIS_PROXY_PATH)
@classmethod
def _clean_url_in_dict(cls, values, original, new):
"""Replace all occurences of the CMIS container url in the json
returned by a call to the CMIS container by the one of the proxy"""
if original.endswith('/'):
original = original[:-1]
for k, v in values.items():
if isinstance(v, dict):
cls._clean_url_in_dict(v, original, new)
elif hasattr(v, 'replace'):
values[k] = v.replace(original, new)
def _check_access_operation(self, model_inst, operation):
"""
Check if the user has the appropriate rights to perform the operation.
The default is to check the access rights and access rules on the
model instance. This behaviour can be adapted by defining the method
''_check_cmis_access_operation'' on the model.
::
@api.multi
def _check_cmis_access_operation(self, operation, field_name=None):
if my_true_condition:
return 'allow'
if my_false_condition:
return 'deny'
return 'default'
The expected result must be in ('allow', 'deny', 'default').
* allow: Access granted
* deny: Access Denied
* default: The current method will check the access rights and access
rules
"""
try:
if hasattr(model_inst, '_check_cmis_access_operation'):
res = model_inst._check_cmis_access_operation(operation, None)
if res not in ('allow', 'deny', 'default'):
raise ValueError("_check_cmis_access_operation result "
"must be in ('allow', 'deny', 'default')")
if res != 'default':
return res == 'allow'
model_inst.check_access_rights(operation)
model_inst.check_access_rule(operation)
except AccessError:
return False
return True
def _apply_permissions_mapping(self, value, headers, proxy_info,
model_inst=None):
"""This method modify the defined allowableActions returned by the
CMIS container to apply the Odoo operation policy defined of the
model instance
"""
if not model_inst:
return
all_allowable_actions = [aa for aa in gen_dict_extract(
'allowableActions', value)]
if not all_allowable_actions:
return
can_read = self._check_access_operation(model_inst, 'read')
can_write = self._check_access_operation(model_inst, 'write')
can_unlink = self._check_access_operation(model_inst, 'unlink')
for allowable_actions in all_allowable_actions:
for action, val in allowable_actions.items():
allowed = False
if action in READ_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_read and val
elif action in WRITE_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_write and val
elif action in UNLINK_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_unlink and val
allowable_actions[action] = allowed
def _sanitize_headers(self, headers):
for key in headers:
if key.lower() == 'transfer-encoding':
headers[key] = None
def _prepare_json_response(self, value, headers, proxy_info,
model_inst=None):
cmis_location = proxy_info['location']
self._clean_url_in_dict(value,
urlparse.urlparse(cmis_location).geturl(),
proxy_info['proxy_location'])
if proxy_info['apply_odoo_security']:
self._apply_permissions_mapping(
value, headers, proxy_info, model_inst)
self._sanitize_headers(headers)
response = werkzeug.Response(
json.dumps(value), mimetype='application/json',
headers=headers)
return response
@classmethod
def _get_redirect_url(cls, proxy_info, url_path):
cmis_location = proxy_info['location']
return urlparse.urljoin(cmis_location, url_path)
def _forward_get_file(self, url, proxy_info, params):
"""Method called to retrieved the content associated to a CMIS object.
The content is streamed between the CMIS container and the caller to
avoid to suck the server memory
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
r = requests.get(
url, params=params,
stream=True,
auth=(proxy_info['username'], proxy_info['password']))
r.raise_for_status()
headers = dict(r.headers.items())
self._sanitize_headers(headers)
return werkzeug.Response(
r, headers=headers,
direct_passthrough=True)
def _forward_get(self, url_path, proxy_info, model_inst, params):
"""
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
url = self._get_redirect_url(proxy_info, url_path)
if params.get('cmisselector') == 'content':
return self._forward_get_file(url, proxy_info, params)
r = requests.get(
url, params=params,
auth=(proxy_info['username'], proxy_info['password']))
r.raise_for_status()
if r.text:
return self._prepare_json_response(
r.json(), dict(r.headers.items()), proxy_info, model_inst)
else:
response = werkzeug.Response()
return response
def _forward_post(self, url_path, proxy_info, model_inst, params):
"""The CMIS Browser binding is designed to be queried from the browser
Therefore, the parameters in a POST are expected to be submitted as
HTTP multipart forms. Therefore each parameter in the request is
forwarded as a part of a multipart/form-data.
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
files = {}
if 'content' in params:
# we are in a mulitpart form data'
content = params.pop('content')
files['content'] = (
content.filename,
content.stream,
content.mimetype
)
for k, v in params.items():
# no filename for parts dedicated to HTTP Form data
files[k] = (None, v, 'text/plain;charset=utf-8')
url = self._get_redirect_url(proxy_info, url_path)
r = requests.post(url, files=files,
auth=(
proxy_info['username'], proxy_info['password']))
r.raise_for_status()
if r.text:
return self._prepare_json_response(
r.json(), dict(r.headers.items()), proxy_info, model_inst)
else:
response = werkzeug.Response()
return response
def _check_provided_token(self, cmis_path, proxy_info, params):
""" Check that a token is present in the request or in the http
headers and both are equal.
:return: the token value if checks are OK, False otherwise.
"""
token = request.httprequest.headers.get('Authorization')
if token:
token = token.replace('Bearer', '').strip()
else:
token = (params.get('token') or '').strip()
if 'token' in params:
params.pop('token')
if not token:
_logger.info("Tokens not provided in headers or request params")
return False
return token
def _decode_token(self, cmis_path, proxy_info, params,
token):
"""Return the Odoo object referenced by the token and the field name
for which the query is done
:return: a tuple (Odoo model instance if exists and user has at least
read access or False, field_name)
"""
token = json.loads(token)
model_name = token.get('model')
false_result = False, False
res_id = token.get('res_id')
if model_name not in request.env:
_logger.info("Invalid model name in token (%s)", model_name)
return false_result
model = request.env[model_name]
if not model.check_access_rights('read', raise_exception=False):
_logger.info("User has no read access on model %s", model_name)
return false_result
model_inst = model.browse(res_id)
if not model_inst.exists():
_logger.info("The referenced model doesn't exist or the user has "
"no read access (%s, %s)", model, res_id)
return false_result
return model_inst, token.get('field_name')
def _check_cmis_content_access(self, cmis_path, proxy_info, params,
model_inst, field_name):
"""Check that the CMIS content referenced into the request is the
same as or a child of the one linked to the odoo model instance.
:return: True if check is Ok False otherwise
"""
token_cmis_objectid = getattr(model_inst, field_name)
if not token_cmis_objectid:
_logger.info("The referenced model doesn't reference a CMIS "
"content (%s, %s)", model_inst._name, model_inst.id)
return False
request_cmis_objectid = params.get('renderedObjectId')
if request_cmis_objectid:
# If the proxy is called to render a cmis content, we need to check
# the original objectId since the one provided by the rendition
# service has no paths
params.pop('renderedObjectId')
else:
request_cmis_objectid = params.get('objectId')
repo = proxy_info['cmis_repository']
if not request_cmis_objectid:
# get the CMIS object id from cmis_path
cmis_content = repo.getObjectByPath(cmis_path)
request_cmis_objectid = cmis_content.getObjectId()
if request_cmis_objectid == token_cmis_objectid:
# the operation is on the CMIS content linked to the Odoo model
# instance
return True
cmis_object = repo.getObject(request_cmis_objectid)
# We can't use a CMIS query to check if a node is in the expected
# tree since the indexation is asynchronous. In place of a simple
# query we check if one of the paths of the node linked to the Odoo
# content instance is in one of the node paths of the requested
# cmis_object
child_paths = cmis_object.getPaths()
parent_paths = repo.getObject(token_cmis_objectid).getPaths()
for p in parent_paths:
for cp in child_paths:
if p in cp:
return True
_logger.info("%s is not a child of %s", request_cmis_objectid,
token_cmis_objectid)
return False
def _check_content_action_access(self, cmis_path, proxy_info, params,
model_inst):
"""Check that the User has de required Permissioon on the Odoo model
instance to di the expected CMIS action
"""
cmisaction = params.get('cmisaction')
if not cmisaction:
return True
operation = CMSI_ACTIONS_OPERATION_MAP.get(cmisaction)
if not operation:
_logger.info("CMIS action %s not supported", cmisaction)
return False
if not self._check_access_operation(model_inst, operation):
_logger.info("User don't have the access right for operation %s "
"on %s to execute the CMIS action %s", operation,
model_inst.name_get()[0][1], cmisaction)
return False
return True
def _check_access(self, cmis_path, proxy_info, params):
"""This method check that the user can access to the requested CMIS
content.
Security checks applied when the proxy mode is activated,:
1. Requests from the client must provide a token (in the header or
as param of the request).
If no security token is provided in this case, the access is denied.
2. The Odoo object referenced by the token (the token is build as
'model.name' + '_' + 'instance_id') must exist.
3. The user must have read access to the object referenced by the token
4. If a cmis_path or object_id is provided by the request, the
referenced CMIS content must be child of or the node referenced by
the Odoo object from the token (or equal)
5. If a cmisaction is provided by the request, a check is done to
ensure that the user has the required privileges in Odoo
"""
# check token conformity
token = self._check_provided_token(cmis_path, proxy_info, params)
if not token:
raise AccessError(_("Bad request"))
# check access to object from token
model_inst, field_name = self._decode_token(
cmis_path, proxy_info, params, token)
if not model_inst:
raise AccessError(_("Bad request"))
# check if the CMIS object in the request is the the one referenced on
# model_inst or a child of this one
if not cmis_path and 'objectId' not in params:
# The request is not for an identified content
return model_inst
if not self._check_cmis_content_access(
cmis_path, proxy_info, params, model_inst, field_name):
raise AccessError(_("Bad request"))
if not self._check_content_action_access(
cmis_path, proxy_info, params, model_inst):
raise AccessError(_("Bad request"))
return model_inst
@http.route([
CMIS_PROXY_PATH + '/<int:backend_id>',
CMIS_PROXY_PATH + '/<int:backend_id>/<path:cmis_path>'
], type='http', auth="user", csrf=False, methods=['GET', 'POST'])
@main.serialize_exception
def call_cmis_services(self, backend_id, cmis_path="", **kwargs):
"""Call at the root of the CMIS repository. These calls are for
requesting the global services provided by the CMIS Container
"""
# proxy_info are informations available into the cache without loading
# the cmis.backend from the database
proxy_info = request.env['cmis.backend'].get_proxy_info_by_id(
backend_id)
method = request.httprequest.method
model_inst = False
if proxy_info.get('apply_odoo_security'):
model_inst = self._check_access(cmis_path, proxy_info, kwargs)
if method not in ['GET', 'POST']:
raise AccessError(
_("The HTTP METHOD %s is not supported by CMIS") % method)
if method == 'GET':
method = self._forward_get
elif method == 'POST':
method = self._forward_post
return method(cmis_path, proxy_info, model_inst, kwargs)
| agpl-3.0 | 6,017,278,269,581,853,000 | 39.267241 | 79 | 0.596286 | false | 4.189238 | false | false | false |
lablup/backend.ai-manager | src/ai/backend/manager/api/session_template.py | 1 | 14992 | import json
import logging
from typing import (
Any,
List,
Mapping,
TYPE_CHECKING,
Tuple,
)
import uuid
from aiohttp import web
import aiohttp_cors
import sqlalchemy as sa
import trafaret as t
import yaml
from ai.backend.common import validators as tx
from ai.backend.common.logging import BraceStyleAdapter
from ..models import (
association_groups_users as agus, domains,
groups, session_templates, keypairs, users, UserRole,
query_accessible_session_templates, TemplateType,
)
from ..models.session_template import check_task_template
from .auth import auth_required
from .exceptions import InvalidAPIParameters, TaskTemplateNotFound
from .manager import READ_ALLOWED, server_status_required
from .types import CORSOptions, Iterable, WebMiddleware
from .utils import check_api_params, get_access_key_scopes
if TYPE_CHECKING:
from .context import RootContext
log = BraceStyleAdapter(logging.getLogger(__name__))
@server_status_required(READ_ALLOWED)
@auth_required
@check_api_params(t.Dict(
{
tx.AliasedKey(['group', 'groupName', 'group_name'], default='default'): t.String,
tx.AliasedKey(['domain', 'domainName', 'domain_name'], default='default'): t.String,
t.Key('owner_access_key', default=None): t.Null | t.String,
t.Key('payload'): t.String
}
))
async def create(request: web.Request, params: Any) -> web.Response:
if params['domain'] is None:
params['domain'] = request['user']['domain_name']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
requester_uuid = request['user']['uuid']
log.info(
'SESSION_TEMPLATE.CREATE (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
user_uuid = request['user']['uuid']
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
if requester_access_key != owner_access_key:
# Admin or superadmin is creating sessions for another user.
# The check for admin privileges is already done in get_access_key_scope().
query = (
sa.select([keypairs.c.user, users.c.role, users.c.domain_name])
.select_from(sa.join(keypairs, users, keypairs.c.user == users.c.uuid))
.where(keypairs.c.access_key == owner_access_key)
)
result = await conn.execute(query)
row = result.first()
owner_domain = row['domain_name']
owner_uuid = row['user']
owner_role = row['role']
else:
# Normal case when the user is creating her/his own session.
owner_domain = request['user']['domain_name']
owner_uuid = requester_uuid
owner_role = UserRole.USER
query = (
sa.select([domains.c.name])
.select_from(domains)
.where(
(domains.c.name == owner_domain) &
(domains.c.is_active)
)
)
qresult = await conn.execute(query)
domain_name = qresult.scalar()
if domain_name is None:
raise InvalidAPIParameters('Invalid domain')
if owner_role == UserRole.SUPERADMIN:
# superadmin can spawn container in any designated domain/group.
query = (
sa.select([groups.c.id])
.select_from(groups)
.where(
(groups.c.domain_name == params['domain']) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
elif owner_role == UserRole.ADMIN:
# domain-admin can spawn container in any group in the same domain.
if params['domain'] != owner_domain:
raise InvalidAPIParameters("You can only set the domain to the owner's domain.")
query = (
sa.select([groups.c.id])
.select_from(groups)
.where(
(groups.c.domain_name == owner_domain) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
else:
# normal users can spawn containers in their group and domain.
if params['domain'] != owner_domain:
raise InvalidAPIParameters("You can only set the domain to your domain.")
query = (
sa.select([agus.c.group_id])
.select_from(agus.join(groups, agus.c.group_id == groups.c.id))
.where(
(agus.c.user_id == owner_uuid) &
(groups.c.domain_name == owner_domain) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
if group_id is None:
raise InvalidAPIParameters('Invalid group')
log.debug('Params: {0}', params)
try:
body = json.loads(params['payload'])
except json.JSONDecodeError:
try:
body = yaml.safe_load(params['payload'])
except (yaml.YAMLError, yaml.MarkedYAMLError):
raise InvalidAPIParameters('Malformed payload')
template_data = check_task_template(body)
template_id = uuid.uuid4().hex
resp = {
'id': template_id,
'user': user_uuid.hex,
}
query = session_templates.insert().values({
'id': template_id,
'domain_name': params['domain'],
'group_id': group_id,
'user_uuid': user_uuid,
'name': template_data['metadata']['name'],
'template': template_data,
'type': TemplateType.TASK,
})
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response(resp)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('all', default=False): t.ToBool,
tx.AliasedKey(['group_id', 'groupId'], default=None): tx.UUID | t.String | t.Null,
}),
)
async def list_template(request: web.Request, params: Any) -> web.Response:
resp = []
access_key = request['keypair']['access_key']
domain_name = request['user']['domain_name']
user_role = request['user']['role']
user_uuid = request['user']['uuid']
log.info('SESSION_TEMPLATE.LIST (ak:{})', access_key)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
entries: List[Mapping[str, Any]]
if request['is_superadmin'] and params['all']:
j = (
session_templates
.join(users, session_templates.c.user_uuid == users.c.uuid, isouter=True)
.join(groups, session_templates.c.group_id == groups.c.id, isouter=True)
)
query = (
sa.select([session_templates, users.c.email, groups.c.name], use_labels=True)
.select_from(j)
.where(
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.execute(query)
entries = []
for row in result:
is_owner = True if row.session_templates_user == user_uuid else False
entries.append({
'name': row.session_templates_name,
'id': row.session_templates_id,
'created_at': row.session_templates_created_at,
'is_owner': is_owner,
'user': (str(row.session_templates_user_uuid)
if row.session_templates_user_uuid else None),
'group': (str(row.session_templates_group_id)
if row.session_templates_group_id else None),
'user_email': row.users_email,
'group_name': row.groups_name,
})
else:
extra_conds = None
if params['group_id'] is not None:
extra_conds = ((session_templates.c.group_id == params['group_id']))
entries = await query_accessible_session_templates(
conn,
user_uuid,
TemplateType.TASK,
user_role=user_role,
domain_name=domain_name,
allowed_types=['user', 'group'],
extra_conds=extra_conds,
)
for entry in entries:
resp.append({
'name': entry['name'],
'id': entry['id'].hex,
'created_at': str(entry['created_at']),
'is_owner': entry['is_owner'],
'user': str(entry['user']),
'group': str(entry['group']),
'user_email': entry['user_email'],
'group_name': entry['group_name'],
'type': 'user' if entry['user'] is not None else 'group',
})
return web.json_response(resp)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('format', default='yaml'): t.Null | t.Enum('yaml', 'json'),
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def get(request: web.Request, params: Any) -> web.Response:
if params['format'] not in ['yaml', 'json']:
raise InvalidAPIParameters('format should be "yaml" or "json"')
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.GET (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
template_id = request.match_info['template_id']
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.template])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
template = await conn.scalar(query)
if not template:
raise TaskTemplateNotFound
template = json.loads(template)
if params['format'] == 'yaml':
body = yaml.dump(template)
return web.Response(text=body, content_type='text/yaml')
else:
return web.json_response(template)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('payload'): t.String,
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def put(request: web.Request, params: Any) -> web.Response:
template_id = request.match_info['template_id']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.PUT (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.id])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.scalar(query)
if not result:
raise TaskTemplateNotFound
try:
body = json.loads(params['payload'])
except json.JSONDecodeError:
body = yaml.safe_load(params['payload'])
except (yaml.YAMLError, yaml.MarkedYAMLError):
raise InvalidAPIParameters('Malformed payload')
template_data = check_task_template(body)
query = (
sa.update(session_templates)
.values(template=template_data, name=template_data['metadata']['name'])
.where((session_templates.c.id == template_id))
)
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response({'success': True})
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def delete(request: web.Request, params: Any) -> web.Response:
template_id = request.match_info['template_id']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.DELETE (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*'
)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.id])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.scalar(query)
if not result:
raise TaskTemplateNotFound
query = (
sa.update(session_templates)
.values(is_active=False)
.where((session_templates.c.id == template_id))
)
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response({'success': True})
async def init(app: web.Application) -> None:
pass
async def shutdown(app: web.Application) -> None:
pass
def create_app(default_cors_options: CORSOptions) -> Tuple[web.Application, Iterable[WebMiddleware]]:
app = web.Application()
app.on_startup.append(init)
app.on_shutdown.append(shutdown)
app['api_versions'] = (4, 5)
app['prefix'] = 'template/session'
cors = aiohttp_cors.setup(app, defaults=default_cors_options)
cors.add(app.router.add_route('POST', '', create))
cors.add(app.router.add_route('GET', '', list_template))
template_resource = cors.add(app.router.add_resource(r'/{template_id}'))
cors.add(template_resource.add_route('GET', get))
cors.add(template_resource.add_route('PUT', put))
cors.add(template_resource.add_route('DELETE', delete))
return app, []
| lgpl-3.0 | -2,623,418,009,516,440,000 | 36.668342 | 101 | 0.566169 | false | 3.926663 | false | false | false |
domain51/d51.django.apps.blogs | d51/django/apps/blogs/models.py | 1 | 1309 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
class RichTextField(models.TextField):
pass
class Post(models.Model):
internal_title = models.CharField(max_length=255)
display_title = models.CharField(null=True, blank=True, max_length=255)
summary = RichTextField()
content = RichTextField()
meta_keywords = models.CharField(null=True, blank=True, max_length=255)
slug = models.SlugField(unique=True)
author = models.ForeignKey(User, blank=True, null=True)
published = models.DateTimeField()
add_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
link = models.CharField(blank=True, null=True, max_length=200)
@property
def title(self):
return self.display_title or self.internal_title
def get_absolute_url(self):
return reverse('post-detail', kwargs={
'year':self.published.year,
'month':self.published.strftime('%b'),
'day':self.published.day,
'slug':self.slug,
})
def __unicode__(self):
return self.title
class Meta:
ordering = ['-published',]
verbose_name = 'blog post'
verbose_name_plural = 'blog posts'
| gpl-3.0 | -3,552,080,607,034,685,400 | 29.44186 | 75 | 0.663866 | false | 3.895833 | false | false | false |
fls-bioinformatics-core/RnaChipIntegrator | rnachipintegrator/Features.py | 1 | 19026 | #!/bin/env python
#
# Features.py: classes for handling feature data
# Copyright (C) University of Manchester 2011-2019 Peter Briggs, Leo Zeef
# & Ian Donaldson
#
"""
Features.py
Classes for handling feature data.
"""
import logging
import io
from .distances import closestDistanceToRegion
from .utils import make_errline
class FeatureSet(object):
"""Class for storing a set of features
RNA-seq features consists of genes/transcripts/isomers, which
are stored individually in Feature objects. This class is a
container for a collection of Feature objects and provides
methods to operate on the collection, by creating subsets by
filtering, and sorting the features based on various criteria.
"""
def __init__(self,features_file=None,features_list=None):
"""Create a new FeatureSet instance
Raises an exception if there are errors in the input file data
(non-numeric fields for start/end positions, end positions
occurring before start positions, or illegal strand values).
Arguments:
features_file (str): (optional) the name of an input
file to read the feature data from
features_list (list): (optional) list of Feature objects
to populate the FeatureSet with
"""
self.features = []
self.source_file = None
if features_file:
self.loadFeaturesFromFile(features_file)
elif features_list:
for feature in features_list:
self.addFeature(feature)
def loadFeaturesFromFile(self,features_file):
"""Read features from a file and populate the object
Arguments:
features_file: the name of the input file to read features from.
"""
# Local flags etc
line_index = 0
critical_error = False
# Read in data from file
with io.open(features_file,'rt') as fp:
for line in fp:
# Increment index
line_index += 1
# Skip lines starting with #
if line.startswith('#'):
logging.debug("Feature file: skipped line: %s" %
line.strip())
continue
# Lines are tab-delimited and have at least 5 columns:
# ID chr start end strand
items = line.strip().split('\t')
if len(items) < 5:
logging.warning("Feature file: skipped line: %s" %
line.strip())
logging.warning("Insufficient number of fields (%d)" %
len(items))
continue
# Check line is valid i.e. start and stop should be
# numbers, strand should be + or -
problem_fields = []
if not items[2].isdigit():
problem_fields.append(2)
if not items[3].isdigit():
problem_fields.append(3)
if not (items[4] == '+' or items[4] == '-'):
problem_fields.append(4)
if problem_fields:
# If this is the first line then assume it's a header
# and ignore
if line_index == 1:
logging.warning("%s: first line ignored as header: "
"%s" % (features_file,line.strip()))
else:
# Indicate problem field(s)
logging.error("%s: critical error line %d: bad "
"values:" % (features_file,line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),
problem_fields))
# This is a critical error: update flag
critical_error = True
# Continue to next line
continue
elif int(items[2]) >= int(items[3]):
# Start position is same or higher than end
logging.error("%s: critical error line %d: 'end' comes "
"before 'start':" % (features_file,
line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),(2,3)))
# This is a critical error: update flag but continue
# reading
critical_error = True
continue
# Store in a new Feature object
feature = Feature(items[0],
items[1],
items[2],
items[3],
items[4],
source_file=features_file)
# Additional flag
if len(items) >= 6:
# Is column 6 a flag?
try:
flag_value = int(items[5])
if flag_value != 0 and flag_value != 1:
flag_value = None
except ValueError:
flag_value = None
# Store value
feature.flag = flag_value
# Store data
self.features.append(feature)
# Deal with postponed critical errors
if critical_error:
raise Exception("Critical error(s) in '%s'" % features_file)
# Store the source file
self.source_file = features_file
# Return a reference to this object
return self
def addFeature(self,feature):
"""Append a feature to the FeatureSet object
Arguments:
feature: a Feature instance.
"""
self.features.append(feature)
def filterByChr(self,matchChr):
"""Return a subset of features filtered by specified chromosome name
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.chrom == matchChr:
feature_subset.addFeature(feature)
return feature_subset
def filterByStrand(self,matchStrand):
"""Return a subset of features filtered by specified strand
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.strand == matchStrand:
feature_subset.addFeature(feature)
return feature_subset
def filterByFlag(self,matchFlag):
"""Return a subset of features filtered by flag value
Returns a new FeatureSet object containing only the features from
the current object which matches the specified criteria.
Note that if there is no flag (the "isFlagged()" function returns
False) then an empty set will be returned.
"""
# Make a new (empty) RNASeqData object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.flag == matchFlag:
feature_subset.addFeature(feature)
return feature_subset
def filterByTSS(self,limit1,limit2,exclude_limits=False):
"""Return a subset of features filtered by TSS position
Returns a new FeatureSet object containing only the features
from the current object where the TSS positions fall within a
region defined by upper and lower limits.
limits can be supplied in either order (i.e. highest/lowest
or lowest/highest).
If exclude_limits is False (the default) then TSS positions
that fall exactly on one of the boundaries are counted as
being within the region; if it is True then these TSS
positions will not be considered to lie inside the region.
"""
# Sort out upper and lower limits
if limit1 > limit2:
upper,lower = limit1,limit2
else:
upper,lower = limit2,limit1
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
TSS = feature.getTSS()
if exclude_limits:
if lower < TSS and TSS < upper:
feature_subset.addFeature(feature)
else:
if lower <= TSS and TSS <= upper:
feature_subset.addFeature(feature)
return feature_subset
def sortByDistanceFrom(self,position):
"""Sort the features into order based on distance from a position
Sorts the features into order of absolute distance of
their TSS to the specified position (closest first).
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
abs(record.getTSS()-position))
return self
def sortByClosestEdgeTo(self,position1,position2=None):
"""Sort the features into order based on closest edge (TSS or TES)
Sorts the features into order of smallest absolute distance
to the specified position (closest first), considering both TSS
and TES, using the getClosestEdgeDistanceTo method of the
Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestEdgeDistanceTo(position1,
position2))
return self
def sortByClosestTSSTo(self,position1,position2=None):
"""Sort the features into order based on closest edge to TSS
Sorts the features into order of smallest absolute distance
to the specified position (closest first) to the TSS position,
using the getClosestTSSDistanceTo method of the Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestTSSDistanceTo(position1,
position2))
return self
def isFlagged(self):
"""Check whether feature data includes flags
Checks whether all the Feature records also have a valid flag
associated with them - if yes then returns True (indicating the
dataset as a whole is flagged), otherwise returns False.
"""
# Check all data and look for any None flags
for feature in self.features:
if feature.flag is None:
return False
# All flags valid
return True
def __iter__(self):
return iter(self.features)
def __getitem__(self,key):
try:
start = key.start
stop = key.stop
step = key.step
slice_ = FeatureSet()
for feature in self.features[start:stop:step]:
slice_.addFeature(feature)
return slice_
except AttributeError:
return self.features[key]
def __len__(self):
return len(self.features)
def __eq__(self,other):
if len(self) != len(other):
return False
for f1,f2 in zip(self,other):
if f1 != f2:
return False
return True
def __ne__(self,other):
if len(self) != len(other):
return True
for f1,f2 in zip(self,other):
if f1 != f2:
return True
return False
class Feature(object):
"""Class for storing an 'feature' (gene/transcript/isomer)
Access the data for the feature using the object's properties:
id
chrom
start
end
strand
tss
tes
A feature can also have the following optional data
associated with it:
- A source file name, which is set via the 'source_file'
keyword and accessed via the 'source_file' property.
It will be None if no filename has been specified.
There are also convenience methods (getTSS, getTES, getPromoterRegion)
and methods for calculating various distances.
"""
def __init__(self,feature_id,chrom,start,end,strand,source_file=None):
self.id = feature_id
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.flag = None
self.source_file = source_file
# Set the TSS and TES
if self.strand == '+':
self.tss = self.start
self.tes = self.end
elif self.strand == '-':
self.tss = self.end
self.tes = self.start
else:
raise Exception("Bad strand: '%s'" % self.strand)
def __repr__(self):
items = [self.id,
self.chrom,
str(self.start),
str(self.end),
self.strand]
if self.flag != None:
items.append(str(self.flag))
return '\t'.join(items)
def __eq__(self,other):
return \
(self.id == other.id) and \
(self.strand == other.strand) and \
(self.start == other.start) and \
(self.end == other.end)
def __ne__(self,other):
return \
(self.id != other.id) or \
(self.strand != other.strand) or \
(self.start != other.start) or \
(self.end != other.end)
def getTSS(self):
"""Return the TSS coordinate
TTS (transcription start site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tss' property.
"""
return self.tss
def getTES(self):
"""Return the TES coordinate
TES (transcription end site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tes' property.
"""
return self.tes
def containsPosition(self,coordinate):
"""Check whether a coordinate is within the gene coordinates
Returns True if coordinate lies within start and end, False
otherwise.
"""
return (self.start <= coordinate and coordinate <= self.end)
def getClosestTSSDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TSS to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TSS.
If a second position is given (specifying a region) then return
smallest absolute distance of (TSS,position1) and (TSS,position2).
By default there is no special treatment when the TSS lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTSS(),
position1,position2,
zero_inside_region)
def getClosestTESDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TES to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TES.
If a second position is given (specifying a region) then return
smallest absolute distance of (TES,position1) and (TES,position2).
By default there is no special treatment when the TES lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTES(),
position1,position2,
zero_inside_region)
def getClosestEdgeDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return closest edge distance to a coordinate or region
For a single specified position, the closest edge is whichever
of the TSS or TES is nearest (smallest absolute distance) from
that position i.e. the smallest distance of (TSS,position) and
(TES,position).
If a second position is given (specifying a region) then
the closest edge is whichever of the TSS/TES is closest to
either position1 or position2 i.e. the smallest distance of
(TSS,position1), (TES,position1), (TSS,position2) and
(TES,position2).
By default there is no special treatment when either the TSS
or TES lie inside the region specified by two positions; to
set this to zero, set the 'zero_inside_region' argument to
True.
"""
return min(self.getClosestTSSDistanceTo(position1,
position2,
zero_inside_region),
self.getClosestTESDistanceTo(position1,
position2,
zero_inside_region))
def getPromoterRegion(self,to_TSS,from_TSS):
"""Return the coordinates of the promoter region
The promoter region is a region of coordinates around the
TSS of a gene, defined by the supplied distances 'to_TSS'
(the distance downstream from the TSS) and 'from_TSS' (the
distance upstream from the TSS).
Returns a tuple containing the start and end coordinates
defining the promoter region.
"""
if self.strand == '+':
return (self.getTSS() - to_TSS,
self.getTSS() + from_TSS)
else:
return (self.getTSS() + to_TSS,
self.getTSS() - from_TSS)
| artistic-2.0 | -4,439,065,279,214,473,700 | 36.087719 | 77 | 0.553716 | false | 4.833841 | false | false | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/stat/cmp/cmp_stats.py | 1 | 23212 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cmp_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._delbwsaving = 0
self._delcmpratio = 0
self._decomptcpratio = 0
self._decomptcpbandwidthsaving = 0
self._comptcpratio = 0
self._comptcpbandwidthsaving = 0
self._comptotaldatacompressionratio = 0
self._comphttpbandwidthsaving = 0
self._compratio = 0
self._comptotalrequests = 0
self._comprequestsrate = 0
self._comptotalrxbytes = 0
self._comprxbytesrate = 0
self._comptotaltxbytes = 0
self._comptxbytesrate = 0
self._comptotalrxpackets = 0
self._comprxpacketsrate = 0
self._comptotaltxpackets = 0
self._comptxpacketsrate = 0
self._comptcptotalrxbytes = 0
self._comptcprxbytesrate = 0
self._comptcptotalrxpackets = 0
self._comptcprxpacketsrate = 0
self._comptcptotaltxbytes = 0
self._comptcptxbytesrate = 0
self._comptcptotaltxpackets = 0
self._comptcptxpacketsrate = 0
self._comptcptotalquantum = 0
self._comptcpquantumrate = 0
self._comptcptotalpush = 0
self._comptcppushrate = 0
self._comptcptotaleoi = 0
self._comptcpeoirate = 0
self._comptcptotaltimer = 0
self._comptcptimerrate = 0
self._decomptcprxbytes = 0
self._decomptcprxbytesrate = 0
self._decomptcprxpackets = 0
self._decomptcprxpacketsrate = 0
self._decomptcptxbytes = 0
self._decomptcptxbytesrate = 0
self._decomptcptxpackets = 0
self._decomptcptxpacketsrate = 0
self._decomptcperrdata = 0
self._decomptcperrdatarate = 0
self._decomptcperrlessdata = 0
self._decomptcperrlessdatarate = 0
self._decomptcperrmoredata = 0
self._decomptcperrmoredatarate = 0
self._decomptcperrmemory = 0
self._decomptcperrmemoryrate = 0
self._decomptcperrunknown = 0
self._decomptcperrunknownrate = 0
self._delcomptotalrequests = 0
self._delcomprequestsrate = 0
self._delcompdone = 0
self._delcompdonerate = 0
self._delcomptcprxbytes = 0
self._delcomptcprxbytesrate = 0
self._delcomptcptxbytes = 0
self._delcomptcptxbytesrate = 0
self._delcompfirstaccess = 0
self._delcompfirstaccessrate = 0
self._delcomptcprxpackets = 0
self._delcomptcprxpacketsrate = 0
self._delcomptcptxpackets = 0
self._delcomptcptxpacketsrate = 0
self._delcompbaseserved = 0
self._delcompbaseservedrate = 0
self._delcompbasetcptxbytes = 0
self._delcompbasetcptxbytesrate = 0
self._delcomperrbypassed = 0
self._delcomperrbypassedrate = 0
self._delcomperrbfilewhdrfailed = 0
self._delcomperrbfilewhdrfailedrate = 0
self._delcomperrnostoremiss = 0
self._delcomperrnostoremissrate = 0
self._delcomperrreqinfotoobig = 0
self._delcomperrreqinfotoobigrate = 0
self._delcomperrreqinfoallocfail = 0
self._delcomperrreqinfoallocfailrate = 0
self._delcomperrsessallocfail = 0
self._delcomperrsessallocfailrate = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def delcompbasetcptxbytes(self) :
"""Number of basefile bytes transmitted by NetScaler.
"""
try :
return self._delcompbasetcptxbytes
except Exception as e:
raise e
@property
def comphttpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._comphttpbandwidthsaving
except Exception as e:
raise e
@property
def comptcptotalpush(self) :
"""Number of times the NetScaler compresses data on receiving a TCP PUSH flag from the server. The PUSH flag ensures that data is compressed immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotalpush
except Exception as e:
raise e
@property
def delcompfirstaccess(self) :
"""Total number of delta compression first accesses.
"""
try :
return self._delcompfirstaccess
except Exception as e:
raise e
@property
def delcompdone(self) :
"""Total number of delta compressions done by NetScaler.
"""
try :
return self._delcompdone
except Exception as e:
raise e
@property
def comptcptotalrxpackets(self) :
"""Total number of compressible packets received by NetScaler.
"""
try :
return self._comptcptotalrxpackets
except Exception as e:
raise e
@property
def delcomperrbypassed(self) :
"""Number of times delta-compression bypassed by NetScaler.
"""
try :
return self._delcomperrbypassed
except Exception as e:
raise e
@property
def decomptcptxpacketsrate(self) :
"""Rate (/s) counter for decomptcptxpackets.
"""
try :
return self._decomptcptxpacketsrate
except Exception as e:
raise e
@property
def delcompbasetcptxbytesrate(self) :
"""Rate (/s) counter for delcompbasetcptxbytes.
"""
try :
return self._delcompbasetcptxbytesrate
except Exception as e:
raise e
@property
def delbwsaving(self) :
"""Bandwidth saving from delta compression expressed as percentage.
"""
try :
return self._delbwsaving
except Exception as e:
raise e
@property
def comprequestsrate(self) :
"""Rate (/s) counter for comptotalrequests.
"""
try :
return self._comprequestsrate
except Exception as e:
raise e
@property
def comptotaltxbytes(self) :
"""Number of bytes the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptotaltxbytes
except Exception as e:
raise e
@property
def comptcpeoirate(self) :
"""Rate (/s) counter for comptcptotaleoi.
"""
try :
return self._comptcpeoirate
except Exception as e:
raise e
@property
def delcomptcptxbytes(self) :
"""Total number of delta-compressed bytes transmitted by NetScaler.
"""
try :
return self._delcomptcptxbytes
except Exception as e:
raise e
@property
def delcomperrreqinfoallocfail(self) :
"""Number of times requested basefile could not be allocated.
"""
try :
return self._delcomperrreqinfoallocfail
except Exception as e:
raise e
@property
def delcomperrbypassedrate(self) :
"""Rate (/s) counter for delcomperrbypassed.
"""
try :
return self._delcomperrbypassedrate
except Exception as e:
raise e
@property
def delcmpratio(self) :
"""Ratio of compressible data received to compressed data transmitted.If this ratio is one (uncmp:1.0) that means compression is disabled or we are not able to compress even a single compressible packet.
"""
try :
return self._delcmpratio
except Exception as e:
raise e
@property
def delcomprequestsrate(self) :
"""Rate (/s) counter for delcomptotalrequests.
"""
try :
return self._delcomprequestsrate
except Exception as e:
raise e
@property
def delcomperrreqinfotoobig(self) :
"""Number of times basefile request URL was too large.
"""
try :
return self._delcomperrreqinfotoobig
except Exception as e:
raise e
@property
def delcomptcprxpacketsrate(self) :
"""Rate (/s) counter for delcomptcprxpackets.
"""
try :
return self._delcomptcprxpacketsrate
except Exception as e:
raise e
@property
def decomptcperrmemory(self) :
"""Number of times memory failures occurred while decompressing.
"""
try :
return self._decomptcperrmemory
except Exception as e:
raise e
@property
def decomptcprxbytes(self) :
"""Total number of compressed bytes received by NetScaler.
"""
try :
return self._decomptcprxbytes
except Exception as e:
raise e
@property
def comptcptxpacketsrate(self) :
"""Rate (/s) counter for comptcptotaltxpackets.
"""
try :
return self._comptcptxpacketsrate
except Exception as e:
raise e
@property
def comptotaldatacompressionratio(self) :
"""Ratio of total HTTP data received to total HTTP data transmitted.
"""
try :
return self._comptotaldatacompressionratio
except Exception as e:
raise e
@property
def comprxbytesrate(self) :
"""Rate (/s) counter for comptotalrxbytes.
"""
try :
return self._comprxbytesrate
except Exception as e:
raise e
@property
def delcomperrsessallocfailrate(self) :
"""Rate (/s) counter for delcomperrsessallocfail.
"""
try :
return self._delcomperrsessallocfailrate
except Exception as e:
raise e
@property
def delcomptcptxpacketsrate(self) :
"""Rate (/s) counter for delcomptcptxpackets.
"""
try :
return self._delcomptcptxpacketsrate
except Exception as e:
raise e
@property
def comptcptotaleoi(self) :
"""Number of times the NetScaler compresses data on receiving End Of Input (FIN packet). When the NetScaler receives End Of Input (FIN packet), it compresses the buffered data immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotaleoi
except Exception as e:
raise e
@property
def comptcppushrate(self) :
"""Rate (/s) counter for comptcptotalpush.
"""
try :
return self._comptcppushrate
except Exception as e:
raise e
@property
def decomptcperrmemoryrate(self) :
"""Rate (/s) counter for decomptcperrmemory.
"""
try :
return self._decomptcperrmemoryrate
except Exception as e:
raise e
@property
def decomptcperrunknownrate(self) :
"""Rate (/s) counter for decomptcperrunknown.
"""
try :
return self._decomptcperrunknownrate
except Exception as e:
raise e
@property
def comptcpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._comptcpbandwidthsaving
except Exception as e:
raise e
@property
def decomptcperrmoredata(self) :
"""Number of times NetScaler received more data than declared by protocol.
"""
try :
return self._decomptcperrmoredata
except Exception as e:
raise e
@property
def delcompfirstaccessrate(self) :
"""Rate (/s) counter for delcompfirstaccess.
"""
try :
return self._delcompfirstaccessrate
except Exception as e:
raise e
@property
def comprxpacketsrate(self) :
"""Rate (/s) counter for comptotalrxpackets.
"""
try :
return self._comprxpacketsrate
except Exception as e:
raise e
@property
def comptotalrxbytes(self) :
"""Number of bytes that can be compressed, which the NetScaler receives from the server. This gives the content length of the response that the NetScaler receives from server.
"""
try :
return self._comptotalrxbytes
except Exception as e:
raise e
@property
def decomptcprxpacketsrate(self) :
"""Rate (/s) counter for decomptcprxpackets.
"""
try :
return self._decomptcprxpacketsrate
except Exception as e:
raise e
@property
def comptcpquantumrate(self) :
"""Rate (/s) counter for comptcptotalquantum.
"""
try :
return self._comptcpquantumrate
except Exception as e:
raise e
@property
def comptxbytesrate(self) :
"""Rate (/s) counter for comptotaltxbytes.
"""
try :
return self._comptxbytesrate
except Exception as e:
raise e
@property
def delcompbaseservedrate(self) :
"""Rate (/s) counter for delcompbaseserved.
"""
try :
return self._delcompbaseservedrate
except Exception as e:
raise e
@property
def decomptcptxbytes(self) :
"""Total number of decompressed bytes transmitted by NetScaler.
"""
try :
return self._decomptcptxbytes
except Exception as e:
raise e
@property
def comptcptxbytesrate(self) :
"""Rate (/s) counter for comptcptotaltxbytes.
"""
try :
return self._comptcptxbytesrate
except Exception as e:
raise e
@property
def delcomptcprxpackets(self) :
"""Number of delta-compressible packets received.
"""
try :
return self._delcomptcprxpackets
except Exception as e:
raise e
@property
def decomptcprxpackets(self) :
"""Total number of compressed packets received by NetScaler.
"""
try :
return self._decomptcprxpackets
except Exception as e:
raise e
@property
def comptcptotaltimer(self) :
"""Number of times the NetScaler compresses data on expiration of data accumulation timer. The timer expires if the server response is very slow and consequently, the NetScaler does not receive response for a certain amount of time. Under such a condition, the NetScaler compresses the buffered data immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotaltimer
except Exception as e:
raise e
@property
def delcomperrnostoremissrate(self) :
"""Rate (/s) counter for delcomperrnostoremiss.
"""
try :
return self._delcomperrnostoremissrate
except Exception as e:
raise e
@property
def delcomperrbfilewhdrfailed(self) :
"""Number of times basefile could not be updated in NetScaler cache.
"""
try :
return self._delcomperrbfilewhdrfailed
except Exception as e:
raise e
@property
def decomptcperrmoredatarate(self) :
"""Rate (/s) counter for decomptcperrmoredata.
"""
try :
return self._decomptcperrmoredatarate
except Exception as e:
raise e
@property
def decomptcpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._decomptcpbandwidthsaving
except Exception as e:
raise e
@property
def delcomperrsessallocfail(self) :
"""Number of times delta compression session could not be allocated.
"""
try :
return self._delcomperrsessallocfail
except Exception as e:
raise e
@property
def delcompbaseserved(self) :
"""Total number of basefile requests served by NetScaler.
"""
try :
return self._delcompbaseserved
except Exception as e:
raise e
@property
def compratio(self) :
"""Ratio of the compressible data received from the server to the compressed data sent to the client.
"""
try :
return self._compratio
except Exception as e:
raise e
@property
def decomptcptxbytesrate(self) :
"""Rate (/s) counter for decomptcptxbytes.
"""
try :
return self._decomptcptxbytesrate
except Exception as e:
raise e
@property
def decomptcperrlessdata(self) :
"""Number of times NetScaler received less data than declared by protocol.
"""
try :
return self._decomptcperrlessdata
except Exception as e:
raise e
@property
def comptcprxbytesrate(self) :
"""Rate (/s) counter for comptcptotalrxbytes.
"""
try :
return self._comptcprxbytesrate
except Exception as e:
raise e
@property
def comptxpacketsrate(self) :
"""Rate (/s) counter for comptotaltxpackets.
"""
try :
return self._comptxpacketsrate
except Exception as e:
raise e
@property
def comptcprxpacketsrate(self) :
"""Rate (/s) counter for comptcptotalrxpackets.
"""
try :
return self._comptcprxpacketsrate
except Exception as e:
raise e
@property
def comptotaltxpackets(self) :
"""Number of HTTP packets that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptotaltxpackets
except Exception as e:
raise e
@property
def delcomptcptxbytesrate(self) :
"""Rate (/s) counter for delcomptcptxbytes.
"""
try :
return self._delcomptcptxbytesrate
except Exception as e:
raise e
@property
def delcomperrreqinfotoobigrate(self) :
"""Rate (/s) counter for delcomperrreqinfotoobig.
"""
try :
return self._delcomperrreqinfotoobigrate
except Exception as e:
raise e
@property
def decomptcprxbytesrate(self) :
"""Rate (/s) counter for decomptcprxbytes.
"""
try :
return self._decomptcprxbytesrate
except Exception as e:
raise e
@property
def decomptcperrdatarate(self) :
"""Rate (/s) counter for decomptcperrdata.
"""
try :
return self._decomptcperrdatarate
except Exception as e:
raise e
@property
def comptotalrequests(self) :
"""Number of HTTP compression requests the NetScaler receives for which the response is successfully compressed. For example, after you enable compression and configure services, if you send requests to the NetScaler with the following header information: "Accept-Encoding: gzip, deflate", and NetScaler compresses the corresponding response, this counter is incremented.
"""
try :
return self._comptotalrequests
except Exception as e:
raise e
@property
def decomptcperrunknown(self) :
"""Number of times unknown errors occurred while decompressing.
"""
try :
return self._decomptcperrunknown
except Exception as e:
raise e
@property
def comptotalrxpackets(self) :
"""Number of HTTP packets that can be compressed, which the NetScaler receives from the server.
"""
try :
return self._comptotalrxpackets
except Exception as e:
raise e
@property
def delcomptcprxbytes(self) :
"""Total number of delta-compressible bytes received by NetScaler.
"""
try :
return self._delcomptcprxbytes
except Exception as e:
raise e
@property
def comptcptimerrate(self) :
"""Rate (/s) counter for comptcptotaltimer.
"""
try :
return self._comptcptimerrate
except Exception as e:
raise e
@property
def comptcptotalquantum(self) :
"""Number of times the NetScaler compresses a quantum of data. NetScaler buffers the data received from the server till it reaches the quantum size and then compresses the buffered data and transmits to the client.
"""
try :
return self._comptcptotalquantum
except Exception as e:
raise e
@property
def comptcptotaltxpackets(self) :
"""Number of TCP packets that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptcptotaltxpackets
except Exception as e:
raise e
@property
def delcompdonerate(self) :
"""Rate (/s) counter for delcompdone.
"""
try :
return self._delcompdonerate
except Exception as e:
raise e
@property
def delcomptcptxpackets(self) :
"""Total number of delta-compressed packets transmitted by NetScaler.
"""
try :
return self._delcomptcptxpackets
except Exception as e:
raise e
@property
def decomptcpratio(self) :
"""Ratio of decompressed data transmitted to compressed data received.
"""
try :
return self._decomptcpratio
except Exception as e:
raise e
@property
def decomptcperrlessdatarate(self) :
"""Rate (/s) counter for decomptcperrlessdata.
"""
try :
return self._decomptcperrlessdatarate
except Exception as e:
raise e
@property
def comptcptotalrxbytes(self) :
"""Number of bytes that can be compressed, which the NetScaler receives from the server. This gives the content length of the response that the NetScaler receives from server.
"""
try :
return self._comptcptotalrxbytes
except Exception as e:
raise e
@property
def delcomptcprxbytesrate(self) :
"""Rate (/s) counter for delcomptcprxbytes.
"""
try :
return self._delcomptcprxbytesrate
except Exception as e:
raise e
@property
def comptcptotaltxbytes(self) :
"""Number of bytes that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptcptotaltxbytes
except Exception as e:
raise e
@property
def decomptcptxpackets(self) :
"""Total number of decompressed packets transmitted by NetScaler.
"""
try :
return self._decomptcptxpackets
except Exception as e:
raise e
@property
def delcomptotalrequests(self) :
"""Total number of delta compression requests received by NetScaler.
"""
try :
return self._delcomptotalrequests
except Exception as e:
raise e
@property
def delcomperrreqinfoallocfailrate(self) :
"""Rate (/s) counter for delcomperrreqinfoallocfail.
"""
try :
return self._delcomperrreqinfoallocfailrate
except Exception as e:
raise e
@property
def delcomperrbfilewhdrfailedrate(self) :
"""Rate (/s) counter for delcomperrbfilewhdrfailed.
"""
try :
return self._delcomperrbfilewhdrfailedrate
except Exception as e:
raise e
@property
def delcomperrnostoremiss(self) :
"""Number of times basefile was not found in NetScaler cache.
"""
try :
return self._delcomperrnostoremiss
except Exception as e:
raise e
@property
def comptcpratio(self) :
"""Ratio of compressible data received to compressed data transmitted.If this ratio is one (uncmp:1.0) that means compression is disabled or we are not able to compress even a single compressible packet.
"""
try :
return self._comptcpratio
except Exception as e:
raise e
@property
def decomptcperrdata(self) :
"""Number of data errors encountered while decompressing.
"""
try :
return self._decomptcperrdata
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cmp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cmp
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all cmp_stats resources that are configured on netscaler.
"""
try :
obj = cmp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class cmp_response(base_response) :
def __init__(self, length=1) :
self.cmp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cmp = [cmp_stats() for _ in range(length)]
| apache-2.0 | 1,323,930,941,714,670,600 | 24.039914 | 384 | 0.721006 | false | 3.227475 | false | false | false |
pbasov/fuel-extension-cpu-pinning | fuel_extension_cpu_pinning/validators.py | 1 | 1623 | from fuel_extension_cpu_pinning.models import CpuPinOverride
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.errors import errors
from nailgun.logger import logger
class CpuPinningValidator(BasicValidator):
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "CPU pinning for Nova and Contrail vrouter",
"description": "CPU cores masks",
"type": "object",
"properties": {
"nova_cores": {"type": "array"},
"vrouter_cores": {"type": "array"},
},
}
@classmethod
def validate(cls, data, node=None, pins_data=None):
"""Check input data for intersections
to ensure correct core bindings
"""
dict_data = cls.validate_json(data)
cls.validate_schema(dict_data, cls.schema)
api_nova_cores = dict_data.get('nova_cores', [])
api_vrouter_cores = dict_data.get('vrouter_cores', [])
db_nova_cores = pins_data.get('nova_cores') or []
db_vrouter_cores = pins_data.get('vrouter_cores') or []
if set(api_nova_cores) & set(api_vrouter_cores) != set():
raise errors.InvalidData('Input values conflict with each other')
if all(cores != [] for cores in (api_nova_cores, api_vrouter_cores)):
return dict_data
if any(condition != set() for condition in [
set(api_nova_cores) & set(db_vrouter_cores),
set(api_vrouter_cores) & set(db_nova_cores)]
):
raise errors.InvalidData('Input values conflict with existing one')
return dict_data
| apache-2.0 | 5,613,233,836,513,574,000 | 36.744186 | 79 | 0.610598 | false | 3.731034 | false | false | false |
akvo/butler | setup.py | 1 | 2525 | # -*- coding: UTF-8 -*-
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import setup
from setuptools import find_packages
import os
import re
import time
_version = "0.1.%sdev0" % int(time.time())
_packages = find_packages('butler', exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
# make sure that data files go into the right place
# see http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# find any static content such as HTML files or CSS
_INCLUDE = re.compile("^.*\.(html|less|css|js|png|gif|jpg|mo|eot|svg|ttf|woff|otf|json|conf|txt|ico)$")
_root_directory='butler'
def get_package_data():
package_data = {}
for pkg in os.listdir(_root_directory):
pkg_path = os.path.join(_root_directory, pkg)
if os.path.isdir(pkg_path):
package_data[pkg] = create_paths(pkg_path)
return package_data
def create_paths(root_dir):
paths = []
is_package = os.path.exists(os.path.join(root_dir, '__init__.py'))
children = os.listdir(root_dir)
for child in children:
childpath = os.path.join(root_dir, child)
if os.path.isfile(childpath) and not is_package and _INCLUDE.match(child):
paths.append(child)
if os.path.isdir(childpath):
paths += [os.path.join( child, path ) for path in create_paths( os.path.join(root_dir, child) ) ]
return paths
_reqs_dir = os.path.join(os.path.dirname(__file__), 'requirements')
def _strip_comments(line):
return line.split('#', 1)[0].strip()
def _get_reqs(req):
with open(os.path.join( _reqs_dir, req ) ) as f:
requires = f.readlines()
requires = map(_strip_comments, requires)
requires = filter( lambda x:x.strip()!='', requires )
return requires
_install_requires = _get_reqs('common.txt')
_extras_require = {
'psql': _get_reqs('psql.txt'),
'mysql': _get_reqs('mysql.txt'),
}
_data_files = [('', ['requirements/%s' % reqs_file for reqs_file in os.listdir(_reqs_dir)])]
setup(
name='butler',
version=_version,
packages=_packages,
package_dir={'': 'butler'},
package_data=get_package_data(),
install_requires=_install_requires,
extras_require=_extras_require,
data_files=_data_files,
author='Akvo.org',
author_email='[email protected]',
url='https://github.com/akvo/akvo-butler',
license='Affero GPL',
)
| agpl-3.0 | -8,536,758,986,645,435,000 | 27.693182 | 109 | 0.643564 | false | 3.184111 | false | false | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/NFR4XBoot/ubi_reader/ui/common.py | 5 | 3812 | import os
from ubi_io import leb_virtual_file
from ubifs import ubifs, walk, output
from ubifs.defines import PRINT_UBIFS_KEY_HASH, PRINT_UBIFS_COMPR
from ubi.defines import PRINT_VOL_TYPE_LIST, UBI_VTBL_AUTORESIZE_FLG
output_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'output')
def extract_files(ubifs, out_path, perms = False):
try:
inodes = {}
walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes)
for dent in inodes[1]['dent']:
output.dents(ubifs, inodes, dent, out_path, perms)
except Exception as e:
import traceback
ubifs.log.write('%s' % e)
traceback.print_exc()
def get_ubi_params(ubi):
ubi_flags = {'min_io_size': '-m',
'max_bud_bytes': '-j',
'leb_size': '-e',
'default_compr': '-x',
'sub_page_size': '-s',
'fanout': '-f',
'key_hash': '-k',
'orph_lebs': '-p',
'log_lebs': '-l',
'max_leb_cnt': '-c',
'peb_size': '-p',
'sub_page_size': '-s',
'vid_hdr_offset': '-O',
'version': '-x',
'image_seq': '-Q',
'alignment': '-a',
'vol_id': '-n',
'name': '-N'}
ubi_params = {}
ubi_args = {}
ini_params = {}
for image in ubi.images:
img_seq = image.image_seq
ubi_params[img_seq] = {}
ubi_args[img_seq] = {}
ini_params[img_seq] = {}
for volume in image.volumes:
ubi_args[img_seq][volume] = {}
ini_params[img_seq][volume] = {}
ini_params[img_seq][volume]['vol_type'] = PRINT_VOL_TYPE_LIST[image.volumes[volume].vol_rec.vol_type]
if image.volumes[volume].vol_rec.flags == UBI_VTBL_AUTORESIZE_FLG:
ini_params[img_seq][volume]['vol_flags'] = 'autoresize'
else:
ini_params[img_seq][volume]['vol_flags'] = image.volumes[volume].vol_rec.flags
ini_params[img_seq][volume]['vol_id'] = image.volumes[volume].vol_id
ini_params[img_seq][volume]['vol_name'] = image.volumes[volume].name.rstrip('\x00')
ini_params[img_seq][volume]['vol_alignment'] = image.volumes[volume].vol_rec.alignment
ini_params[img_seq][volume]['vol_size'] = image.volumes[volume].vol_rec.reserved_pebs * ubi.leb_size
ufsfile = leb_virtual_file(ubi, image.volumes[volume])
uubifs = ubifs(ufsfile)
for key, value in uubifs.superblock_node:
if key == 'key_hash':
value = PRINT_UBIFS_KEY_HASH[value]
elif key == 'default_compr':
value = PRINT_UBIFS_COMPR[value]
if key in ubi_flags:
ubi_args[img_seq][volume][key] = value
for key, value in image.volumes[volume].vol_rec:
if key == 'name':
value = value.rstrip('\x00')
if key in ubi_flags:
ubi_args[img_seq][volume][key] = value
ubi_args[img_seq][volume]['version'] = image.version
ubi_args[img_seq][volume]['vid_hdr_offset'] = image.vid_hdr_offset
ubi_args[img_seq][volume]['sub_page_size'] = ubi_args[img_seq][volume]['vid_hdr_offset']
ubi_args[img_seq][volume]['sub_page_size'] = ubi_args[img_seq][volume]['vid_hdr_offset']
ubi_args[img_seq][volume]['image_seq'] = image.image_seq
ubi_args[img_seq][volume]['peb_size'] = ubi.peb_size
ubi_args[img_seq][volume]['vol_id'] = image.volumes[volume].vol_id
ubi_params[img_seq][volume] = {'flags': ubi_flags,
'args': ubi_args[img_seq][volume],
'ini': ini_params[img_seq][volume]}
return ubi_params | gpl-2.0 | 6,523,534,966,752,915,000 | 41.83908 | 113 | 0.543284 | false | 3.163485 | false | false | false |
DS-CM/live-slides | src/GetImage.py | 1 | 1282 | import http.client, urllib.request, urllib.parse, urllib.error, base64, json
from pprint import pprint
class GetImage:
def __init__(self, key):
self.key = key
def getImage(self, keywords):
search_string = ""
for x in keywords:
search_string = search_string + " " + x
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': self.key,
}
params = urllib.parse.urlencode({
# Request parameters
'q': search_string,
'count': '1',
'offset': '0',
'mkt': 'en-us',
'safeSearch': 'Strict',
})
try:
conn = http.client.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("GET", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = json.loads(response.read().decode('utf-8'))
conn.close()
try:
return data['value'][0]['contentUrl']
except IndexError as e:
print("David wants to output this error: {}".format(e))
return None
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
| apache-2.0 | 6,372,495,451,247,394,000 | 28.136364 | 90 | 0.513261 | false | 4.162338 | false | false | false |
paypal/keystone | keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py | 1 | 19579 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Normalize for domain_id, i.e. ensure User and Project entities have the
domain_id as a first class attribute.
Both User and Project (as well as Group) entities are owned by a
domain, which is implemented as each having a domain_id foreign key
in their sql representation that points back to the respective
domain in the domain table. This domain_id attribute should also
be required (i.e. not nullable)
Adding a non_nullable foreign key attribute to a table with existing
data causes a few problems since not all DB engines support the
ability to either control the triggering of integrity constraints
or the ability to modify columns after they are created.
To get round the above inconsistencies, two versions of the
upgrade/downgrade functions are supplied, one for those engines
that support dropping columns, and one for those that don't. For
the latter we are forced to do table copy AND control the triggering
of integrity constraints.
"""
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from keystone import config
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
def _disable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 0;')
def _enable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 1;')
def upgrade_user_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the user table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# First make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
# Now switch off constraints while we drop and then re-create the
# user table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column("password", sql.String(128)),
sql.Column("enabled", sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
user_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled, domain_id) '
'values ( :id, :name, :extra, '
':password, :enabled, :domain_id);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_user;')
def upgrade_project_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the project table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
# Now switch off constraints while we drop and then re-create the
# project table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
project_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled, domain_id) '
'values ( :id, :name, :extra, '
':description, :enabled, :domain_id);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_project;')
def downgrade_user_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, '
'password, enabled, extra) '
'values ( :id, :name, '
':password, :enabled, :extra);',
{'id': user.id,
'name': user.name,
'password': user.password,
'enabled': user.enabled,
'extra': user.extra})
# Now switch off constraints while we drop and then re-create the
# user table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
user_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
session.execute('drop table temp_user;')
def downgrade_project_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, '
'description, enabled, extra) '
'values ( :id, :name, '
':description, :enabled, :extra);',
{'id': project.id,
'name': project.name,
'description': project.description,
'enabled': project.enabled,
'extra': project.extra})
# Now switch off constraints while we drop and then re-create the
# project table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
project_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
session.execute("drop table temp_project;")
def upgrade_user_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
user_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for user in session.query(user_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = user_table.update().\
where(user_table.c.id == user.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
user_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT user_name_key;')
session.execute('ALTER TABLE "user" ADD CONSTRAINT user_dom_name_unique '
'UNIQUE (domain_id, name);')
def upgrade_project_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
project_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for project in session.query(project_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = project_table.update().\
where(project_table.c.id == project.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
project_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT tenant_name_key;')
session.execute('ALTER TABLE project ADD CONSTRAINT proj_dom_name_unique '
'UNIQUE (domain_id, name);')
def downgrade_user_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT '
'user_dom_name_unique;')
session.execute('ALTER TABLE "user" ADD UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(user_table)
def downgrade_project_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT '
'proj_dom_name_unique;')
session.execute('ALTER TABLE project ADD CONSTRAINT tenant_name_key '
'UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(project_table)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
upgrade_user_table_with_copy(meta, migrate_engine, session)
upgrade_project_table_with_copy(meta, migrate_engine, session)
else:
upgrade_user_table_with_col_create(meta, migrate_engine, session)
upgrade_project_table_with_col_create(meta, migrate_engine, session)
session.commit()
session.close()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
downgrade_user_table_with_copy(meta, migrate_engine, session)
downgrade_project_table_with_copy(meta, migrate_engine, session)
else:
# MySQL should in theory be able to use this path, but seems to
# have problems dropping columns which are foreign keys
downgrade_user_table_with_col_drop(meta, migrate_engine, session)
downgrade_project_table_with_col_drop(meta, migrate_engine, session)
session.commit()
session.close()
| apache-2.0 | 4,975,266,049,065,287,000 | 44.21709 | 78 | 0.623117 | false | 4.145458 | false | false | false |
hpc/hypnotoad | hypnotoad/plugins/datamodels/ldap/ldap_plugin.py | 1 | 3400 | #
# An ldap data model plugin for hypnotoad.
#
import ldap
import logging
from hypnotoad.core import plugin
LOG = logging.getLogger('root')
class ldap_plugin(plugin.data_model_plugin):
def setup(self, config, model_version):
"""Called before the plugin is asked to do anything."""
if config.getboolean('Data Model Options', 'ldap_plugin_enabled'):
self.plugin_enabled = True
LOG.debug("LDAP plugin enabled")
ldap_url = config.get('Data Model Options', 'ldap_server')
ldap_dc = config.get('Data Model Options', 'ldap_dc')
ldap_ou_group = config.get('Data Model Options', 'ldap_ou_group')
ldap_ou_user = config.get('Data Model Options', 'ldap_ou_user')
ldap_timeout = config.getfloat(
'Data Model Options', 'ldap_timeout')
self.ldap_dn_user = "ou=" + ldap_ou_user + "," + ldap_dc
self.ldap_dn_group = "ou=" + ldap_ou_group + "," + ldap_dc
LOG.debug("URL: " + ldap_url)
LOG.debug("Base DC: " + ldap_dc)
LOG.debug("DN for groups: " + self.ldap_dn_group)
LOG.debug("DN for users: " + self.ldap_dn_user)
self.ldap_ctx = ldap.initialize(ldap_url)
self.ldap_ctx.set_option(ldap.OPT_NETWORK_TIMEOUT, ldap_timeout)
self.config = config
self.model_version = model_version
else:
self.plugin_enabled = False
def teardown(self):
"""Called to allow the plugin to free anything."""
if self.plugin_enabled:
LOG.debug("Got to ldap plugin teardown")
self.ldap_ctx.unbind_s()
def get_model(self):
"""Look up information in this data model."""
model = []
if self.plugin_enabled:
LOG.debug("Got to ldap plugin get_model")
model.append(
{'little_lang_entry': {'version': self.model_version}})
def ldap_search(dn, attrs):
return self.ldap_ctx.search_s(dn, ldap.SCOPE_SUBTREE, '(cn=*)', attrs)
users = ldap_search(self.ldap_dn_user, [
'cn', 'gidNumber', 'homeDirectory', 'uid',
'uidNumber', 'gecos', 'hpcDRMadef', 'loginShell'
])
LOG.debug("Found " + str(len(users)) + " users.")
for u in users:
dn, attrs = u
model.append({'user_entry': {
'short_name_string': attrs['uid'][0],
'full_name_string': attrs['cn'][0],
'group_id_integer': attrs['gidNumber'][0],
'user_id_integer': attrs['uidNumber'][0],
'home_directory_string': attrs['homeDirectory'][0],
'login_shell_string': attrs['loginShell'][0],
'priority_fairshare_float': '',
'priority_qos_name_array': ''
}})
groups = ldap_search(
self.ldap_dn_group, ['cn', 'hpcDRMshare', 'memberUid'])
for g in groups:
dn, attrs = g
LOG.debug("Found group with DN: " + dn)
model.append({'group_entry': {
'short_name_string': attrs['cn'][0],
'priority_fairshare_float': attrs['hpcDRMshare'][0],
}})
return model
# EOF
| bsd-3-clause | 450,297,542,380,948,160 | 32.333333 | 86 | 0.519412 | false | 3.944316 | true | false | false |
hanelsofterp/green-hanel | purchase_landed_cost_assigning_before_receiving/wizard/wizard_import.py | 1 | 1127 | __author__ = 'trananhdung'
from openerp import models, fields, api
class extendPickingImportWizard(models.TransientModel):
_inherit = 'picking.import.wizard'
pickings = fields.Many2many(
comodel_name='stock.picking',
relation='distribution_import_picking_rel', column1='wizard_id',
column2='picking_id', string='Incoming shipments',
domain="[('partner_id', 'child_of', supplier),"
"('location_id.usage', '=', 'supplier'),"
"('id', 'not in', prev_pickings[0][2]),"
"('state', 'in', ('landed_cost','done'))]", required=True)
@api.multi
def action_import_picking(self):
self.ensure_one()
# for picking in self.pickings:
# for move in picking.move_lines:
# self.env['purchase.cost.distribution.line'].create(
# self._prepare_distribution_line(move))
self.pickings.write({
'distribution_id': self.env.context.get('active_id', False)
})
return super(extendPickingImportWizard, self).action_import_picking()
| gpl-3.0 | 947,077,968,450,730,200 | 38.25 | 77 | 0.582076 | false | 3.899654 | false | false | false |
rambler-digital-solutions/aioriak | aioriak/datatypes/datatype.py | 1 | 4884 | from . import TYPES
from aioriak.error import ContextRequired
class Datatype:
'''
Base class for all convergent datatype wrappers. You will not use
this class directly, but it does define some methods are common to
all datatype wrappers.
'''
#: The string "name" of this datatype. Each datatype should set this.
type_name = None
def __init__(self, bucket=None, key=None, value=None, context=None):
self.bucket = bucket
self.key = key
self._context = context
if value is not None:
self._set_value(value)
else:
self._set_value(self._default_value())
self._post_init()
@property
def value(self):
'''
The pure, immutable value of this datatype, as a Python value,
which is unique for each datatype.
**NB**: Do not use this property to mutate data, as it will not
have any effect. Use the methods of the individual type to effect
changes. This value is guaranteed to be independent of any internal
data representation.
'''
return self._value
@property
def context(self):
'''
The opaque context for this type, if it was previously fetched.
:rtype: str
'''
if self._context:
return self._context[:]
@property
def modified(self):
'''
Whether this datatype has staged local modifications.
:rtype: bool
'''
raise NotImplementedError
# Lifecycle methods
async def reload(self, **params):
'''
Reloads the datatype from Riak.
.. warning: This clears any local modifications you might have
made.
:rtype: :class:`Datatype`
'''
if not self.bucket:
raise ValueError('bucket property not assigned')
if not self.key:
raise ValueError('key property not assigned')
dtype, value, context = await self.bucket._client._fetch_datatype(
self.bucket, self.key, **params)
if not dtype == self.type_name:
raise TypeError("Expected datatype {} but "
"got datatype {}".format(self.__class__,
TYPES[dtype]))
self.clear()
self._context = context
self._set_value(value)
return self
async def delete(self, **params):
'''
Deletes the datatype from Riak. See :meth:`RiakClient.delete()
<aioriak.client.RiakClient.delete>` for options.
'''
self.clear()
self._context = None
self._set_value(self._default_value())
await self.bucket._client.delete(self, **params)
return self
async def update(self, **params):
'''
Sends locally staged mutations to Riak.
:rtype: a subclass of :class:`~aioriak.datatypes.Datatype`
'''
if not self.modified:
raise ValueError("No operation to perform")
params.setdefault('return_body', True)
await self.bucket._client.update_datatype(self, **params)
self.clear()
return self
store = update
def clear(self):
'''
Removes all locally staged mutations.
'''
self._post_init()
def to_op(self):
'''
Extracts the mutation operation from this datatype, if any.
Each type must implement this method, returning the
appropriate operation, or `None` if there is no queued
mutation.
'''
raise NotImplementedError
# Private stuff
def _set_value(self, value):
self._raise_if_badtype(value)
self._value = self._coerce_value(value)
def _raise_if_badtype(self, new_value):
if not self._check_type(new_value):
raise TypeError(self._type_error_msg)
def _check_type(self, new_value):
'''
Checks that initial values of the type are appropriate. Each
type must implement this method.
:rtype: bool
'''
raise NotImplementedError
def _default_value(self):
'''
Returns what the initial value of an empty datatype should be.
'''
raise NotImplementedError
def _coerce_value(self, new_value):
'''
Coerces the input value into the internal representation for
the type. Datatypes may override this method.
'''
return new_value
def _post_init(self):
'''
Called at the end of :meth:`__init__` so that subclasses can tweak
their own setup without overriding the constructor.
'''
pass
def _require_context(self):
'''
Raises an exception if the context is not present
'''
if not self._context:
raise ContextRequired()
| mit | -50,095,760,985,934,680 | 27.231214 | 75 | 0.577805 | false | 4.620624 | false | false | false |
nschaetti/pyTweetBot | pyTweetBot/tweet/RSSHunter.py | 1 | 2080 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import feedparser
from .Hunter import Hunter
from .Tweet import Tweet
import logging
from textblob import TextBlob
# Find new tweets from RSS streams
class RSSHunter(Hunter):
"""
Find new tweets from RSS streams
"""
# Constructor
def __init__(self, stream):
self._stream = stream
self._stream_url = stream['url']
logging.debug(u"Retreiving RSS stream {}".format(self._stream_url))
self._entries = feedparser.parse(self._stream_url)['entries']
self._hashtags = stream['hashtags'] if 'hashtags' in stream else list()
self._lang = stream['lang']
self._current = 0
# end __init__
# Get stream
def get_stream(self):
"""
Get stream
"""
return self._stream
# end get_stream
# To unicode
def __unicode__(self):
"""
To unicode
:return:
"""
return u"RSSHunter(stream={})".format(self._stream)
# end __unicode__
# Iterator
def __iter__(self):
"""
Iterator
:return:
"""
return self
# end __iter__
# Next
def next(self):
"""
Next
:return:
"""
if self._current >= len(self._entries):
raise StopIteration
# end if
# Found
found = False
while not found and self._current < len(self._entries):
# Get current entry
current_entry = self._entries[self._current]
# Analyze text
tweet_blob = TextBlob(current_entry['title'])
# Right language
if tweet_blob.detect_language() in self._lang:
found = True
# end if
# Next
self._current += 1
# end while
# Tweet generator
if found:
return Tweet(current_entry['title'], current_entry['links'][0]['href'], self._hashtags)
else:
raise StopIteration
# end if
# end next
# end RSSHunter
| gpl-3.0 | -6,919,431,503,497,942,000 | 21.608696 | 99 | 0.522115 | false | 4.262295 | false | false | false |
Dymaxion00/KittenGroomer | fs/opt/groomer/functions_pier9.py | 1 | 2525 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from helpers import FileBase, KittenGroomerBase, main
printers = ['.STL', '.obj']
cnc = ['.nc', '.tap', '.gcode', '.dxf', '.stl', '.obj', '.iges', '.igs',
'.vrml', '.vrl', '.thing', '.step', '.stp', '.x3d']
shopbot = ['.ai', '.svg', '.dxf', '.dwg', '.eps']
omax = ['.ai', '.svg', '.dxf', '.dwg', '.eps', '.omx', '.obj']
epilog_laser = ['.ai', '.svg', '.dxf', '.dwg', '.eps']
metabeam = ['.dxf']
up = ['.upp', '.up3', '.stl', '.obj']
class FilePier9(FileBase):
def __init__(self, src_path, dst_path):
''' Init file object, set the extension '''
super(FilePier9, self).__init__(src_path, dst_path)
a, self.extension = os.path.splitext(self.src_path)
class KittenGroomerPier9(KittenGroomerBase):
def __init__(self, root_src=None, root_dst=None):
'''
Initialize the basics of the copy
'''
if root_src is None:
root_src = os.path.join(os.sep, 'media', 'src')
if root_dst is None:
root_dst = os.path.join(os.sep, 'media', 'dst')
super(KittenGroomerPier9, self).__init__(root_src, root_dst)
# The initial version will accept all the file extension for all the machines.
self.authorized_extensions = printers + cnc + shopbot + omax + epilog_laser + metabeam + up
def _print_log(self):
'''
Print the logs related to the current file being processed
'''
tmp_log = self.log_name.fields(**self.cur_file.log_details)
if not self.cur_file.log_details.get('valid'):
tmp_log.warning(self.cur_file.log_string)
else:
tmp_log.debug(self.cur_file.log_string)
def processdir(self):
'''
Main function doing the processing
'''
for srcpath in self._list_all_files(self.src_root_dir):
self.log_name.info('Processing {}', srcpath.replace(self.src_root_dir + '/', ''))
self.cur_file = FilePier9(srcpath, srcpath.replace(self.src_root_dir, self.dst_root_dir))
if self.cur_file.extension in self.authorized_extensions:
self.cur_file.add_log_details('valid', True)
self.cur_file.log_string = 'Expected extension: ' + self.cur_file.extension
self._safe_copy()
else:
self.cur_file.log_string = 'Bad extension: ' + self.cur_file.extension
self._print_log()
if __name__ == '__main__':
main(KittenGroomerPier9)
| bsd-3-clause | 8,387,122,660,357,544,000 | 36.132353 | 101 | 0.565149 | false | 3.216561 | false | false | false |
zqfan/leetcode | algorithms/227. Basic Calculator II/solution.py | 1 | 1095 | class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
queue = collections.deque()
method = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.div
}
pri = {
operator.add: 0,
operator.sub: 0,
operator.mul: 1,
operator.div: 1
}
i = 0; n = 0
while i < len(s):
if s[i].isdigit():
n = n * 10 + int(s[i])
elif s[i] in method:
while queue and pri[method[s[i]]] <= pri[queue[-1]]:
op = queue.pop()
n1 = queue.pop()
n = op(n1, n)
queue.append(n)
queue.append(method[s[i]])
n = 0
i += 1
queue.append(n)
while len(queue) >= 3:
n1 = queue.pop()
op = queue.pop()
n2 = queue.pop()
queue.append(op(n2, n1))
return queue.pop()
| gpl-3.0 | 8,992,052,370,713,019,000 | 27.076923 | 68 | 0.368037 | false | 3.99635 | false | false | false |
RaghavPro/Runescape-Hiscores | hiscores/forms.py | 1 | 2377 | from django import forms
from django.core.exceptions import FieldError
from .models import Skills
class SearchForm(forms.Form):
search = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
def clean_search(self):
search = self.cleaned_data['search']
try:
Skills.objects.get(user_name__iexact=search)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
class CompareForm(forms.Form):
player1 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
player2 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
def clean_player1(self):
player1 = self.cleaned_data['player1']
try:
Skills.objects.get(user_name__iexact=player1)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
return player1
def clean_player2(self):
player2 = self.cleaned_data['player2']
try:
Skills.objects.get(user_name__iexact=player2)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
return player2
class SearchRankForm(forms.Form):
search_rank = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Rank', 'required': ''}),
max_length=30, label=False)
skill_exp = forms.CharField(widget=forms.HiddenInput())
def clean_search_rank(self):
rank = self.cleaned_data['search_rank']
skill_exp = self.data['skill_exp']
try:
rank = max(int(rank), 1) # Take to first rank if negative
user_name = Skills.objects.order_by("-%s" % skill_exp).values("user_name")[rank - 1]['user_name']
except IndexError:
raise forms.ValidationError("That rank does not exist.")
except FieldError:
raise forms.ValidationError("Oops, please try again.")
except ValueError:
raise forms.ValidationError("Enter a valid number.")
return user_name
| gpl-2.0 | 856,744,986,639,875,300 | 36.730159 | 109 | 0.63231 | false | 4.063248 | false | false | false |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/kombu/transport/beanstalk.py | 1 | 3295 | """
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2012 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
from Queue import Empty
from anyjson import loads, dumps
from beanstalkc import Connection, BeanstalkcException, SocketError
from . import virtual
from ..exceptions import StdChannelError
DEFAULT_PORT = 11300
__author__ = "David Ziegler <[email protected]>"
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = loads(job.body)
dest = job.stats()["tube"]
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
priority = message["properties"]["delivery_info"]["priority"]
self.client.use(queue)
self.client.put(dumps(message), priority=priority)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
[self.client.watch(active)
for active in queues
if active not in watching]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
port = conninfo.port or DEFAULT_PORT
conn = Connection(host=conninfo.hostname, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (socket.error,
SocketError,
IOError)
channel_errors = (StdChannelError,
socket.error,
IOError,
SocketError,
BeanstalkcException)
| mit | 6,936,786,577,293,115,000 | 24.944882 | 69 | 0.553869 | false | 4.240669 | false | false | false |
atlefren/beerdatabase | breweryname_compare.py | 1 | 1755 | # -*- coding: utf-8 -*-
import json
from beertools import BreweryNameMatcher
def read_json(filename):
with open(filename, 'r') as infile:
return json.loads(infile.read())
def get_breweries_polet():
with open('data/polet.json', 'r') as infile:
data = json.loads(infile.read())
breweries = list(set([product['Produsent'] for product in data]))
return sorted(breweries), data
def get_breweries(beer_list, property_name):
return sorted(list(set([beer[property_name] for beer in beer_list])))
def get_breweries_ratebeer():
with open('data/ratebeer.json', 'r') as infile:
data = json.loads(infile.read())
breweries = list(set([product['brewery'] for product in data]))
return sorted(breweries)
def wrap_breweries(breweries):
return [{'id': index, 'name': brewery}
for index, brewery in enumerate(breweries)]
def compare_breweries(pol_data, breweries_rb):
breweries_pol = get_breweries(pol_data, 'Produsent')
# breweries_rb = wrap_breweries(get_breweries(rb_data, 'brewery'))
matcher = BreweryNameMatcher(breweries_rb)
with open('data/nomatch.txt', 'w') as nomatch:
with open('data/match.txt', 'w') as match_file:
for brewery in breweries_pol:
match = matcher.match_name(brewery)
if match is None:
nomatch.write(brewery.encode('utf8') + '\n')
else:
string = '%s: %s' % (brewery, match['name'])
match_file.write(string.encode('utf8') + '\n')
if __name__ == '__main__':
pol_data = read_json('data/polet.json')
rb_breweries = read_json('data/rb_breweries.json')
compare_breweries(pol_data, rb_breweries)
| mit | 8,518,859,701,575,844,000 | 30.909091 | 73 | 0.614815 | false | 3.179348 | false | false | false |
ddico/odoo | addons/mrp/models/mrp_workorder.py | 1 | 39609 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from collections import defaultdict
import json
from odoo import api, fields, models, _, SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_round, format_datetime
class MrpWorkorder(models.Model):
_name = 'mrp.workorder'
_description = 'Work Order'
_inherit = ['mail.thread', 'mail.activity.mixin']
def _read_group_workcenter_id(self, workcenters, domain, order):
workcenter_ids = self.env.context.get('default_workcenter_id')
if not workcenter_ids:
workcenter_ids = workcenters._search([], order=order, access_rights_uid=SUPERUSER_ID)
return workcenters.browse(workcenter_ids)
name = fields.Char(
'Work Order', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
workcenter_id = fields.Many2one(
'mrp.workcenter', 'Work Center', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)], 'progress': [('readonly', True)]},
group_expand='_read_group_workcenter_id', check_company=True)
working_state = fields.Selection(
string='Workcenter Status', related='workcenter_id.working_state', readonly=False,
help='Technical: used in views only')
product_id = fields.Many2one(related='production_id.product_id', readonly=True, store=True, check_company=True)
product_tracking = fields.Selection(related="product_id.tracking")
product_uom_id = fields.Many2one('uom.uom', 'Unit of Measure', required=True, readonly=True)
use_create_components_lots = fields.Boolean(related="production_id.picking_type_id.use_create_components_lots")
production_id = fields.Many2one('mrp.production', 'Manufacturing Order', required=True, check_company=True)
production_availability = fields.Selection(
string='Stock Availability', readonly=True,
related='production_id.reservation_state', store=True,
help='Technical: used in views and domains only.')
production_state = fields.Selection(
string='Production State', readonly=True,
related='production_id.state',
help='Technical: used in views only.')
production_bom_id = fields.Many2one('mrp.bom', related='production_id.bom_id')
qty_production = fields.Float('Original Production Quantity', readonly=True, related='production_id.product_qty')
company_id = fields.Many2one(related='production_id.company_id')
qty_producing = fields.Float(
compute='_compute_qty_producing', inverse='_set_qty_producing',
string='Currently Produced Quantity', digits='Product Unit of Measure')
qty_remaining = fields.Float('Quantity To Be Produced', compute='_compute_qty_remaining', digits='Product Unit of Measure')
qty_produced = fields.Float(
'Quantity', default=0.0,
readonly=True,
digits='Product Unit of Measure',
copy=False,
help="The number of products already handled by this work order")
is_produced = fields.Boolean(string="Has Been Produced",
compute='_compute_is_produced')
state = fields.Selection([
('pending', 'Waiting for another WO'),
('ready', 'Ready'),
('progress', 'In Progress'),
('done', 'Finished'),
('cancel', 'Cancelled')], string='Status',
default='pending', copy=False, readonly=True)
leave_id = fields.Many2one(
'resource.calendar.leaves',
help='Slot into workcenter calendar once planned',
check_company=True, copy=False)
date_planned_start = fields.Datetime(
'Scheduled Start Date',
compute='_compute_dates_planned',
inverse='_set_dates_planned',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
store=True, tracking=True, copy=False)
date_planned_finished = fields.Datetime(
'Scheduled End Date',
compute='_compute_dates_planned',
inverse='_set_dates_planned',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
store=True, tracking=True, copy=False)
date_start = fields.Datetime(
'Start Date', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
date_finished = fields.Datetime(
'End Date', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
duration_expected = fields.Float(
'Expected Duration', digits=(16, 2), default=60.0,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Expected duration (in minutes)")
duration = fields.Float(
'Real Duration', compute='_compute_duration',
readonly=True, store=True)
duration_unit = fields.Float(
'Duration Per Unit', compute='_compute_duration',
readonly=True, store=True)
duration_percent = fields.Integer(
'Duration Deviation (%)', compute='_compute_duration',
group_operator="avg", readonly=True, store=True)
progress = fields.Float('Progress Done (%)', digits=(16, 2), compute='_compute_progress')
operation_id = fields.Many2one(
'mrp.routing.workcenter', 'Operation', check_company=True)
# Should be used differently as BoM can change in the meantime
worksheet = fields.Binary(
'Worksheet', related='operation_id.worksheet', readonly=True)
worksheet_type = fields.Selection(
string='Worksheet Type', related='operation_id.worksheet_type', readonly=True)
worksheet_google_slide = fields.Char(
'Worksheet URL', related='operation_id.worksheet_google_slide', readonly=True)
operation_note = fields.Text("Description", related='operation_id.note', readonly=True)
move_raw_ids = fields.One2many(
'stock.move', 'workorder_id', 'Raw Moves',
domain=[('raw_material_production_id', '!=', False), ('production_id', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'workorder_id', 'Finished Moves',
domain=[('raw_material_production_id', '=', False), ('production_id', '!=', False)])
move_line_ids = fields.One2many(
'stock.move.line', 'workorder_id', 'Moves to Track',
help="Inventory moves for which you must scan a lot number at this work order")
finished_lot_id = fields.Many2one(
'stock.production.lot', string='Lot/Serial Number', compute='_compute_finished_lot_id',
inverse='_set_finished_lot_id', domain="[('product_id', '=', product_id), ('company_id', '=', company_id)]",
check_company=True)
time_ids = fields.One2many(
'mrp.workcenter.productivity', 'workorder_id', copy=False)
is_user_working = fields.Boolean(
'Is the Current User Working', compute='_compute_working_users',
help="Technical field indicating whether the current user is working. ")
working_user_ids = fields.One2many('res.users', string='Working user on this work order.', compute='_compute_working_users')
last_working_user_id = fields.One2many('res.users', string='Last user that worked on this work order.', compute='_compute_working_users')
next_work_order_id = fields.Many2one('mrp.workorder', "Next Work Order", check_company=True)
scrap_ids = fields.One2many('stock.scrap', 'workorder_id')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
production_date = fields.Datetime('Production Date', related='production_id.date_planned_start', store=True, readonly=False)
json_popover = fields.Char('Popover Data JSON', compute='_compute_json_popover')
show_json_popover = fields.Boolean('Show Popover?', compute='_compute_json_popover')
consumption = fields.Selection([
('strict', 'Strict'),
('warning', 'Warning'),
('flexible', 'Flexible')],
required=True,
)
@api.depends('production_state', 'date_planned_start', 'date_planned_finished')
def _compute_json_popover(self):
previous_wo_data = self.env['mrp.workorder'].read_group(
[('next_work_order_id', 'in', self.ids)],
['ids:array_agg(id)', 'date_planned_start:max', 'date_planned_finished:max'],
['next_work_order_id'])
previous_wo_dict = dict([(x['next_work_order_id'][0], {
'id': x['ids'][0],
'date_planned_start': x['date_planned_start'],
'date_planned_finished': x['date_planned_finished']})
for x in previous_wo_data])
if self.ids:
conflicted_dict = self._get_conflicted_workorder_ids()
for wo in self:
infos = []
if not wo.date_planned_start or not wo.date_planned_finished or not wo.ids:
wo.show_json_popover = False
wo.json_popover = False
continue
if wo.state in ['pending', 'ready']:
previous_wo = previous_wo_dict.get(wo.id)
prev_start = previous_wo and previous_wo['date_planned_start'] or False
prev_finished = previous_wo and previous_wo['date_planned_finished'] or False
if wo.state == 'pending' and prev_start and not (prev_start > wo.date_planned_start):
infos.append({
'color': 'text-primary',
'msg': _("Waiting the previous work order, planned from %(start)s to %(end)s",
start=format_datetime(self.env, prev_start, dt_format=False),
end=format_datetime(self.env, prev_finished, dt_format=False))
})
if wo.date_planned_finished < fields.Datetime.now():
infos.append({
'color': 'text-warning',
'msg': _("The work order should have already been processed.")
})
if prev_start and prev_start > wo.date_planned_start:
infos.append({
'color': 'text-danger',
'msg': _("Scheduled before the previous work order, planned from %(start)s to %(end)s",
start=format_datetime(self.env, prev_start, dt_format=False),
end=format_datetime(self.env, prev_finished, dt_format=False))
})
if conflicted_dict.get(wo.id):
infos.append({
'color': 'text-danger',
'msg': _("Planned at the same time than other workorder(s) at %s", wo.workcenter_id.display_name)
})
color_icon = infos and infos[-1]['color'] or False
wo.show_json_popover = bool(color_icon)
wo.json_popover = json.dumps({
'infos': infos,
'color': color_icon,
'icon': 'fa-exclamation-triangle' if color_icon in ['text-warning', 'text-danger'] else 'fa-info-circle',
'replan': color_icon not in [False, 'text-primary']
})
@api.depends('production_id.lot_producing_id')
def _compute_finished_lot_id(self):
for workorder in self:
workorder.finished_lot_id = workorder.production_id.lot_producing_id
def _set_finished_lot_id(self):
for workorder in self:
workorder.production_id.lot_producing_id = workorder.finished_lot_id
@api.depends('production_id.qty_producing')
def _compute_qty_producing(self):
for workorder in self:
workorder.qty_producing = workorder.production_id.qty_producing
def _set_qty_producing(self):
for workorder in self:
workorder.production_id.qty_producing = workorder.qty_producing
workorder.production_id._set_qty_producing()
# Both `date_planned_start` and `date_planned_finished` are related fields on `leave_id`. Let's say
# we slide a workorder on a gantt view, a single call to write is made with both
# fields Changes. As the ORM doesn't batch the write on related fields and instead
# makes multiple call, the constraint check_dates() is raised.
# That's why the compute and set methods are needed. to ensure the dates are updated
# in the same time.
@api.depends('leave_id')
def _compute_dates_planned(self):
for workorder in self:
workorder.date_planned_start = workorder.leave_id.date_from
workorder.date_planned_finished = workorder.leave_id.date_to
def _set_dates_planned(self):
date_from = self[0].date_planned_start
date_to = self[0].date_planned_finished
self.mapped('leave_id').write({
'date_from': date_from,
'date_to': date_to,
})
def name_get(self):
res = []
for wo in self:
if len(wo.production_id.workorder_ids) == 1:
res.append((wo.id, "%s - %s - %s" % (wo.production_id.name, wo.product_id.name, wo.name)))
else:
res.append((wo.id, "%s - %s - %s - %s" % (wo.production_id.workorder_ids.ids.index(wo.id) + 1, wo.production_id.name, wo.product_id.name, wo.name)))
return res
def unlink(self):
# Removes references to workorder to avoid Validation Error
(self.mapped('move_raw_ids') | self.mapped('move_finished_ids')).write({'workorder_id': False})
self.mapped('leave_id').unlink()
mo_dirty = self.production_id.filtered(lambda mo: mo.state in ("confirmed", "progress", "to_close"))
res = super().unlink()
# We need to go through `_action_confirm` for all workorders of the current productions to
# make sure the links between them are correct (`next_work_order_id` could be obsolete now).
mo_dirty.workorder_ids._action_confirm()
return res
@api.depends('production_id.product_qty', 'qty_produced', 'production_id.product_uom_id')
def _compute_is_produced(self):
self.is_produced = False
for order in self.filtered(lambda p: p.production_id and p.production_id.product_uom_id):
rounding = order.production_id.product_uom_id.rounding
order.is_produced = float_compare(order.qty_produced, order.production_id.product_qty, precision_rounding=rounding) >= 0
@api.depends('time_ids.duration', 'qty_produced')
def _compute_duration(self):
for order in self:
order.duration = sum(order.time_ids.mapped('duration'))
order.duration_unit = round(order.duration / max(order.qty_produced, 1), 2) # rounding 2 because it is a time
if order.duration_expected:
order.duration_percent = 100 * (order.duration_expected - order.duration) / order.duration_expected
else:
order.duration_percent = 0
@api.depends('duration', 'duration_expected', 'state')
def _compute_progress(self):
for order in self:
if order.state == 'done':
order.progress = 100
elif order.duration_expected:
order.progress = order.duration * 100 / order.duration_expected
else:
order.progress = 0
def _compute_working_users(self):
""" Checks whether the current user is working, all the users currently working and the last user that worked. """
for order in self:
order.working_user_ids = [(4, order.id) for order in order.time_ids.filtered(lambda time: not time.date_end).sorted('date_start').mapped('user_id')]
if order.working_user_ids:
order.last_working_user_id = order.working_user_ids[-1]
elif order.time_ids:
order.last_working_user_id = order.time_ids.sorted('date_end')[-1].user_id
else:
order.last_working_user_id = False
if order.time_ids.filtered(lambda x: (x.user_id.id == self.env.user.id) and (not x.date_end) and (x.loss_type in ('productive', 'performance'))):
order.is_user_working = True
else:
order.is_user_working = False
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('workorder_id', 'in', self.ids)], ['workorder_id'], ['workorder_id'])
count_data = dict((item['workorder_id'][0], item['workorder_id_count']) for item in data)
for workorder in self:
workorder.scrap_count = count_data.get(workorder.id, 0)
@api.onchange('date_planned_finished')
def _onchange_date_planned_finished(self):
if self.date_planned_start and self.date_planned_finished:
diff = self.date_planned_finished - self.date_planned_start
self.duration_expected = diff.total_seconds() / 60
@api.onchange('operation_id')
def _onchange_operation_id(self):
if self.operation_id:
self.name = self.operation_id.name
self.workcenter_id = self.operation_id.workcenter_id.id
@api.onchange('date_planned_start', 'duration_expected')
def _onchange_date_planned_start(self):
if self.date_planned_start and self.duration_expected:
self.date_planned_finished = self.date_planned_start + relativedelta(minutes=self.duration_expected)
@api.onchange('operation_id', 'workcenter_id', 'qty_production')
def _onchange_expected_duration(self):
self.duration_expected = self._get_duration_expected()
def write(self, values):
if 'production_id' in values:
raise UserError(_('You cannot link this work order to another manufacturing order.'))
if 'workcenter_id' in values:
for workorder in self:
if workorder.workcenter_id.id != values['workcenter_id']:
if workorder.state in ('progress', 'done', 'cancel'):
raise UserError(_('You cannot change the workcenter of a work order that is in progress or done.'))
workorder.leave_id.resource_id = self.env['mrp.workcenter'].browse(values['workcenter_id']).resource_id
if any(k not in ['time_ids', 'duration_expected', 'next_work_order_id'] for k in values.keys()) and any(workorder.state == 'done' for workorder in self):
raise UserError(_('You can not change the finished work order.'))
if 'date_planned_start' in values or 'date_planned_finished' in values:
for workorder in self:
start_date = fields.Datetime.to_datetime(values.get('date_planned_start')) or workorder.date_planned_start
end_date = fields.Datetime.to_datetime(values.get('date_planned_finished')) or workorder.date_planned_finished
if start_date and end_date and start_date > end_date:
raise UserError(_('The planned end date of the work order cannot be prior to the planned start date, please correct this to save the work order.'))
# Update MO dates if the start date of the first WO or the
# finished date of the last WO is update.
if workorder == workorder.production_id.workorder_ids[0] and 'date_planned_start' in values:
if values['date_planned_start']:
workorder.production_id.with_context(force_date=True).write({
'date_planned_start': fields.Datetime.to_datetime(values['date_planned_start'])
})
if workorder == workorder.production_id.workorder_ids[-1] and 'date_planned_finished' in values:
if values['date_planned_finished']:
workorder.production_id.with_context(force_date=True).write({
'date_planned_finished': fields.Datetime.to_datetime(values['date_planned_finished'])
})
return super(MrpWorkorder, self).write(values)
@api.model_create_multi
def create(self, values):
res = super().create(values)
# Auto-confirm manually added workorders.
# We need to go through `_action_confirm` for all workorders of the current productions to
# make sure the links between them are correct.
to_confirm = res.filtered(lambda wo: wo.production_id.state in ("confirmed", "progress", "to_close"))
to_confirm = to_confirm.production_id.workorder_ids
to_confirm._action_confirm()
return res
def _action_confirm(self):
workorders_by_production = defaultdict(lambda: self.env['mrp.workorder'])
for workorder in self:
workorders_by_production[workorder.production_id] |= workorder
for production, workorders in workorders_by_production.items():
workorders_by_bom = defaultdict(lambda: self.env['mrp.workorder'])
bom = self.env['mrp.bom']
moves = production.move_raw_ids | production.move_finished_ids
for workorder in self:
if workorder.operation_id.bom_id:
bom = workorder.operation_id.bom_id
if not bom:
bom = workorder.production_id.bom_id
previous_workorder = workorders_by_bom[bom][-1:]
previous_workorder.next_work_order_id = workorder.id
workorders_by_bom[bom] |= workorder
moves.filtered(lambda m: m.operation_id == workorder.operation_id).write({
'workorder_id': workorder.id
})
exploded_boms, dummy = production.bom_id.explode(production.product_id, 1, picking_type=production.bom_id.picking_type_id)
exploded_boms = {b[0]: b[1] for b in exploded_boms}
for move in moves:
if move.workorder_id:
continue
bom = move.bom_line_id.bom_id
while bom and bom not in workorders_by_bom:
bom_data = exploded_boms.get(bom, {})
bom = bom_data.get('parent_line') and bom_data['parent_line'].bom_id or False
if bom in workorders_by_bom:
move.write({
'workorder_id': workorders_by_bom[bom][-1:].id
})
else:
move.write({
'workorder_id': workorders_by_bom[production.bom_id][-1:].id
})
for workorders in workorders_by_bom.values():
if workorders[0].state == 'pending':
workorders[0].state = 'ready'
for workorder in workorders:
workorder._start_nextworkorder()
def _get_byproduct_move_to_update(self):
return self.production_id.move_finished_ids.filtered(lambda x: (x.product_id.id != self.production_id.product_id.id) and (x.state not in ('done', 'cancel')))
def _start_nextworkorder(self):
rounding = self.product_id.uom_id.rounding
if self.next_work_order_id.state == 'pending' and (
(self.operation_id.batch == 'no' and
float_compare(self.qty_production, self.qty_produced, precision_rounding=rounding) <= 0) or
(self.operation_id.batch == 'yes' and
float_compare(self.operation_id.batch_size, self.qty_produced, precision_rounding=rounding) <= 0)):
self.next_work_order_id.state = 'ready'
if self.state == 'done' and self.next_work_order_id.state == 'pending':
self.next_work_order_id.state = 'ready'
@api.model
def gantt_unavailability(self, start_date, end_date, scale, group_bys=None, rows=None):
"""Get unavailabilities data to display in the Gantt view."""
workcenter_ids = set()
def traverse_inplace(func, row, **kargs):
res = func(row, **kargs)
if res:
kargs.update(res)
for row in row.get('rows'):
traverse_inplace(func, row, **kargs)
def search_workcenter_ids(row):
if row.get('groupedBy') and row.get('groupedBy')[0] == 'workcenter_id' and row.get('resId'):
workcenter_ids.add(row.get('resId'))
for row in rows:
traverse_inplace(search_workcenter_ids, row)
start_datetime = fields.Datetime.to_datetime(start_date)
end_datetime = fields.Datetime.to_datetime(end_date)
workcenters = self.env['mrp.workcenter'].browse(workcenter_ids)
unavailability_mapping = workcenters._get_unavailability_intervals(start_datetime, end_datetime)
# Only notable interval (more than one case) is send to the front-end (avoid sending useless information)
cell_dt = (scale in ['day', 'week'] and timedelta(hours=1)) or (scale == 'month' and timedelta(days=1)) or timedelta(days=28)
def add_unavailability(row, workcenter_id=None):
if row.get('groupedBy') and row.get('groupedBy')[0] == 'workcenter_id' and row.get('resId'):
workcenter_id = row.get('resId')
if workcenter_id:
notable_intervals = filter(lambda interval: interval[1] - interval[0] >= cell_dt, unavailability_mapping[workcenter_id])
row['unavailabilities'] = [{'start': interval[0], 'stop': interval[1]} for interval in notable_intervals]
return {'workcenter_id': workcenter_id}
for row in rows:
traverse_inplace(add_unavailability, row)
return rows
def button_start(self):
self.ensure_one()
# As button_start is automatically called in the new view
if self.state in ('done', 'cancel'):
return True
if self.product_tracking == 'serial':
self.qty_producing = 1.0
# Need a loss in case of the real time exceeding the expected
timeline = self.env['mrp.workcenter.productivity']
if not self.duration_expected or self.duration < self.duration_expected:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','productive')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Productivity'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
else:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
if self.production_id.state != 'progress':
self.production_id.write({
'date_start': datetime.now(),
})
timeline.create({
'workorder_id': self.id,
'workcenter_id': self.workcenter_id.id,
'description': _('Time Tracking: ') + self.env.user.name,
'loss_id': loss_id[0].id,
'date_start': datetime.now(),
'user_id': self.env.user.id, # FIXME sle: can be inconsistent with company_id
'company_id': self.company_id.id,
})
if self.state == 'progress':
return True
start_date = datetime.now()
vals = {
'state': 'progress',
'date_start': start_date,
}
if not self.leave_id:
leave = self.env['resource.calendar.leaves'].create({
'name': self.display_name,
'calendar_id': self.workcenter_id.resource_calendar_id.id,
'date_from': start_date,
'date_to': start_date + relativedelta(minutes=self.duration_expected),
'resource_id': self.workcenter_id.resource_id.id,
'time_type': 'other'
})
vals['leave_id'] = leave.id
return self.write(vals)
else:
vals['date_planned_start'] = start_date
if self.date_planned_finished < start_date:
vals['date_planned_finished'] = start_date
return self.write(vals)
def button_finish(self):
end_date = datetime.now()
for workorder in self:
if workorder.state in ('done', 'cancel'):
continue
workorder.end_all()
vals = {
'state': 'done',
'date_finished': end_date,
'date_planned_finished': end_date
}
if not workorder.date_start:
vals['date_start'] = end_date
if not workorder.date_planned_start or end_date < workorder.date_planned_start:
vals['date_planned_start'] = end_date
workorder.write(vals)
workorder._start_nextworkorder()
return True
def end_previous(self, doall=False):
"""
@param: doall: This will close all open time lines on the open work orders when doall = True, otherwise
only the one of the current user
"""
# TDE CLEANME
timeline_obj = self.env['mrp.workcenter.productivity']
domain = [('workorder_id', 'in', self.ids), ('date_end', '=', False)]
if not doall:
domain.append(('user_id', '=', self.env.user.id))
not_productive_timelines = timeline_obj.browse()
for timeline in timeline_obj.search(domain, limit=None if doall else 1):
wo = timeline.workorder_id
if wo.duration_expected <= wo.duration:
if timeline.loss_type == 'productive':
not_productive_timelines += timeline
timeline.write({'date_end': fields.Datetime.now()})
else:
maxdate = fields.Datetime.from_string(timeline.date_start) + relativedelta(minutes=wo.duration_expected - wo.duration)
enddate = datetime.now()
if maxdate > enddate:
timeline.write({'date_end': enddate})
else:
timeline.write({'date_end': maxdate})
not_productive_timelines += timeline.copy({'date_start': maxdate, 'date_end': enddate})
if not_productive_timelines:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type', '=', 'performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one unactive productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
not_productive_timelines.write({'loss_id': loss_id.id})
return True
def end_all(self):
return self.end_previous(doall=True)
def button_pending(self):
self.end_previous()
return True
def button_unblock(self):
for order in self:
order.workcenter_id.unblock()
return True
def action_cancel(self):
self.leave_id.unlink()
return self.write({'state': 'cancel'})
def action_replan(self):
"""Replan a work order.
It actually replans every "ready" or "pending"
work orders of the linked manufacturing orders.
"""
for production in self.production_id:
production._plan_workorders(replan=True)
return True
def button_done(self):
if any([x.state in ('done', 'cancel') for x in self]):
raise UserError(_('A Manufacturing Order is already done or cancelled.'))
self.end_all()
end_date = datetime.now()
return self.write({
'state': 'done',
'date_finished': end_date,
'date_planned_finished': end_date,
})
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_company_id': self.production_id.company_id.id,
'default_workorder_id': self.id,
'default_production_id': self.production_id.id,
'product_ids': (self.production_id.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) | self.production_id.move_finished_ids.filtered(lambda x: x.state == 'done')).mapped('product_id').ids},
'target': 'new',
}
def action_see_move_scrap(self):
self.ensure_one()
action = self.env.ref('stock.action_stock_scrap').read()[0]
action['domain'] = [('workorder_id', '=', self.id)]
return action
def action_open_wizard(self):
self.ensure_one()
action = self.env.ref('mrp.mrp_workorder_mrp_production_form').read()[0]
action['res_id'] = self.id
return action
@api.depends('qty_production', 'qty_produced')
def _compute_qty_remaining(self):
for wo in self:
wo.qty_remaining = float_round(wo.qty_production - wo.qty_produced, precision_rounding=wo.production_id.product_uom_id.rounding)
def _get_duration_expected(self, alternative_workcenter=False):
self.ensure_one()
if not self.workcenter_id:
return False
qty_production = self.production_id.product_uom_id._compute_quantity(self.qty_production, self.production_id.product_id.uom_id)
cycle_number = float_round(qty_production / self.workcenter_id.capacity, precision_digits=0, rounding_method='UP')
if alternative_workcenter:
# TODO : find a better alternative : the settings of workcenter can change
duration_expected_working = (self.duration_expected - self.workcenter_id.time_start - self.workcenter_id.time_stop) * self.workcenter_id.time_efficiency / (100.0 * cycle_number)
if duration_expected_working < 0:
duration_expected_working = 0
return alternative_workcenter.time_start + alternative_workcenter.time_stop + cycle_number * duration_expected_working * 100.0 / alternative_workcenter.time_efficiency
time_cycle = self.operation_id and self.operation_id.time_cycle or 60.0
return self.workcenter_id.time_start + self.workcenter_id.time_stop + cycle_number * time_cycle * 100.0 / self.workcenter_id.time_efficiency
def _get_conflicted_workorder_ids(self):
"""Get conlicted workorder(s) with self.
Conflict means having two workorders in the same time in the same workcenter.
:return: defaultdict with key as workorder id of self and value as related conflicted workorder
"""
self.flush(['state', 'date_planned_start', 'date_planned_finished', 'workcenter_id'])
sql = """
SELECT wo1.id, wo2.id
FROM mrp_workorder wo1, mrp_workorder wo2
WHERE
wo1.id IN %s
AND wo1.state IN ('pending','ready')
AND wo2.state IN ('pending','ready')
AND wo1.id != wo2.id
AND wo1.workcenter_id = wo2.workcenter_id
AND (DATE_TRUNC('second', wo2.date_planned_start), DATE_TRUNC('second', wo2.date_planned_finished))
OVERLAPS (DATE_TRUNC('second', wo1.date_planned_start), DATE_TRUNC('second', wo1.date_planned_finished))
"""
self.env.cr.execute(sql, [tuple(self.ids)])
res = defaultdict(list)
for wo1, wo2 in self.env.cr.fetchall():
res[wo1].append(wo2)
return res
@api.model
def _prepare_component_quantity(self, move, qty_producing):
""" helper that computes quantity to consume (or to create in case of byproduct)
depending on the quantity producing and the move's unit factor"""
if move.product_id.tracking == 'serial':
uom = move.product_id.uom_id
else:
uom = move.product_uom
return move.product_uom._compute_quantity(
qty_producing * move.unit_factor,
uom,
round=False
)
def _update_finished_move(self):
""" Update the finished move & move lines in order to set the finished
product lot on it as well as the produced quantity. This method get the
information either from the last workorder or from the Produce wizard."""
production_move = self.production_id.move_finished_ids.filtered(
lambda move: move.product_id == self.product_id and
move.state not in ('done', 'cancel')
)
if production_move and production_move.product_id.tracking != 'none':
if not self.finished_lot_id:
raise UserError(_('You need to provide a lot for the finished product.'))
move_line = production_move.move_line_ids.filtered(
lambda line: line.lot_id.id == self.finished_lot_id.id
)
if move_line:
if self.product_id.tracking == 'serial':
raise UserError(_('You cannot produce the same serial number twice.'))
move_line.product_uom_qty += self.qty_producing
move_line.qty_done += self.qty_producing
else:
location_dest_id = production_move.location_dest_id._get_putaway_strategy(self.product_id).id or production_move.location_dest_id.id
move_line.create({
'move_id': production_move.id,
'product_id': production_move.product_id.id,
'lot_id': self.finished_lot_id.id,
'product_uom_qty': self.qty_producing,
'product_uom_id': self.product_uom_id.id,
'qty_done': self.qty_producing,
'location_id': production_move.location_id.id,
'location_dest_id': location_dest_id,
})
else:
rounding = production_move.product_uom.rounding
production_move._set_quantity_done(
float_round(self.qty_producing, precision_rounding=rounding)
)
def _strict_consumption_check(self):
if self.consumption == 'strict':
for move in self.move_raw_ids:
qty_done = 0.0
for line in move.move_line_ids:
qty_done += line.product_uom_id._compute_quantity(line.qty_done, move.product_uom)
rounding = move.product_uom_id.rounding
if float_compare(qty_done, move.product_uom_qty, precision_rounding=rounding) != 0:
raise UserError(_('You should consume the quantity of %s defined in the BoM. If you want to consume more or less components, change the consumption setting on the BoM.', move.product_id.name))
def _check_sn_uniqueness(self):
""" Alert the user if the serial number as already been produced """
if self.product_tracking == 'serial' and self.finished_lot_id:
sml = self.env['stock.move.line'].search_count([
('lot_id', '=', self.finished_lot_id.id),
('location_id.usage', '=', 'production'),
('qty_done', '=', 1),
('state', '=', 'done')
])
if sml:
raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))
| agpl-3.0 | 3,872,464,057,137,297,000 | 50.708877 | 230 | 0.601883 | false | 3.934148 | false | false | false |
blakev/suds | suds/__init__.py | 1 | 4404 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Suds is a lightweight SOAP python client that provides a
service proxy for Web Services.
"""
import os
import sys
#
# Project properties
#
__version__ = '0.4.unomena.2'
__build__="GA R699-20100913"
#
# Exceptions
#
class MethodNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Method not found: '%s'" % name)
class PortNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Port not found: '%s'" % name)
class ServiceNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Service not found: '%s'" % name)
class TypeNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Type not found: '%s'" % tostr(name))
class BuildError(Exception):
msg = \
"""
An error occured while building a instance of (%s). As a result
the object you requested could not be constructed. It is recommended
that you construct the type manually using a Suds object.
Please open a ticket with a description of this error.
Reason: %s
"""
def __init__(self, name, exception):
Exception.__init__(self, BuildError.msg % (name, exception))
class SoapHeadersNotPermitted(Exception):
msg = \
"""
Method (%s) was invoked with SOAP headers. The WSDL does not
define SOAP headers for this method. Retry without the soapheaders
keyword argument.
"""
def __init__(self, name):
Exception.__init__(self, self.msg % name)
class WebFault(Exception):
def __init__(self, fault, document):
if hasattr(fault, 'faultstring'):
Exception.__init__(self, "Server raised fault: '%s'" % fault.faultstring)
self.fault = fault
self.document = document
#
# Logging
#
class Repr:
def __init__(self, x):
self.x = x
def __str__(self):
return repr(self.x)
#
# Utility
#
def tostr(object, encoding=None):
""" get a unicode safe string representation of an object """
if isinstance(object, basestring):
if encoding is None:
return object
else:
return object.encode(encoding)
if isinstance(object, tuple):
s = ['(']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(')')
return ''.join(s)
if isinstance(object, list):
s = ['[']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(']')
return ''.join(s)
if isinstance(object, dict):
s = ['{']
for item in object.items():
if isinstance(item[0], basestring):
s.append(item[0])
else:
s.append(tostr(item[0]))
s.append(' = ')
if isinstance(item[1], basestring):
s.append(item[1])
else:
s.append(tostr(item[1]))
s.append(', ')
s.append('}')
return ''.join(s)
try:
return unicode(object)
except:
return str(object)
class null:
"""
The I{null} object.
Used to pass NULL for optional XML nodes.
"""
pass
def objid(obj):
return obj.__class__.__name__\
+':'+hex(id(obj))
import client
| lgpl-3.0 | -8,721,852,589,140,907,000 | 27.597403 | 85 | 0.581971 | false | 3.97832 | false | false | false |
meprogrammerguy/pyMadness | scrape_stats.py | 1 | 2098 | #!/usr/bin/env python3
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import html5lib
import pdb
from collections import OrderedDict
import json
import csv
import contextlib
url = "https://kenpom.com/index.php"
#url = "https://kenpom.com/index.php?y=2017" #past year testing override
print ("Scrape Statistics Tool")
print ("**************************")
print ("data is from {0}".format(url))
print ("**************************")
with contextlib.closing(urlopen(url)) as page:
soup = BeautifulSoup(page, "html5lib")
ratings_table=soup.find('table', id='ratings-table')
IDX=[]
A=[]
B=[]
C=[]
D=[]
E=[]
F=[]
G=[]
H=[]
I=[]
J=[]
K=[]
L=[]
M=[]
index=0
for row in ratings_table.findAll("tr"):
col=row.findAll('td')
if len(col)>0:
index+=1
IDX.append(index)
A.append(col[0].find(text=True))
B.append(col[1].find(text=True))
C.append(col[2].find(text=True))
D.append(col[3].find(text=True))
E.append(col[4].find(text=True))
F.append(col[5].find(text=True))
G.append(col[7].find(text=True))
H.append(col[9].find(text=True))
I.append(col[11].find(text=True))
J.append(col[13].find(text=True))
K.append(col[15].find(text=True))
L.append(col[17].find(text=True))
M.append(col[19].find(text=True))
df=pd.DataFrame(IDX,columns=['Index'])
df['Rank']=A
df['Team']=B
df['Conf']=C
df['W-L']=D
df['AdjEM']=E
df['AdjO']=F
df['AdjD']=G
df['AdjT']=H
df['Luck']=I
df['AdjEMSOS']=J
df['OppOSOS']=K
df['OppDSOS']=L
df['AdjEMNCSOS']=M
with open('stats.json', 'w') as f:
f.write(df.to_json(orient='index'))
with open("stats.json") as stats_json:
dict_stats = json.load(stats_json, object_pairs_hook=OrderedDict)
stats_sheet = open('stats.csv', 'w', newline='')
csvwriter = csv.writer(stats_sheet)
count = 0
for row in dict_stats.values():
#pdb.set_trace()
if (count == 0):
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
stats_sheet.close()
print ("done.")
| mit | -3,005,067,790,104,389,600 | 22.054945 | 72 | 0.609152 | false | 2.838972 | false | false | false |
berkeley-stat159/project-alpha | code/utils/scripts/glm_script.py | 1 | 3957 | """ Script for GLM functions.
Run with:
python glm_script.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
import sys
# Relative paths to subject 1 data.
project_path = "../../../"
pathtodata = project_path + "data/ds009/sub001/"
condition_location = pathtodata+"model/model001/onsets/task001_run001/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
sys.path.append(location_of_functions)
# Load events2neural from the stimuli module.
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single, convolution_specialized
# Load our GLM functions.
from glm import glm, glm_diagnostics, glm_multiple
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
#######################
# a. (my) convolution #
#######################
all_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))
##################
# b. np.convolve #
##################
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
N = len(neural_prediction) # N == n_vols == 173
M = len(hrf_at_trs) # M == 12
np_hrf=convolved[:N]
#############################
#############################
# Analysis and diagonistics #
#############################
#############################
#######################
# a. (my) convolution #
#######################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_my, X_my = glm(data, my_hrf)
# Some diagnostics.
MRSS_my, fitted_my, residuals_my = glm_diagnostics(B_my, X_my, data)
# Print out the mean MRSS.
print("MRSS using 'my' convolution function: "+str(np.mean(MRSS_my)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2]) #change from cherry-picking
plt.plot(fitted_my[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my.png")
plt.close()
##################
# b. np.convolve #
##################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_np, X_np = glm(data, np_hrf)
# Some diagnostics.
MRSS_np, fitted_np, residuals_np = glm_diagnostics(B_np, X_np, data)
# Print out the mean MRSS.
print("MRSS using np convolution function: "+str(np.mean(MRSS_np)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2])
plt.plot(fitted_np[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_np.png")
plt.close()
X_my3=np.ones((data.shape[-1],4))
for i in range(2):
X_my3[:,i+1]=my_hrf**(i+1)
B_my3, X_my3 = glm_multiple(data, X_my3)
MRSS_my3, fitted_my3, residuals_my3 = glm_diagnostics(B_my3, X_my3, data)
print("MRSS using 'my' convolution function, 3rd degree polynomial: "+str(np.mean(MRSS_my3))+ ", but the chart looks better")
plt.plot(data[41, 47, 2])
plt.plot(fitted_my3[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my3.png")
plt.close()
| bsd-3-clause | -381,920,970,145,549,500 | 28.75188 | 125 | 0.664645 | false | 2.854978 | false | false | false |
dbatalov/ri-optimizer | example_main.py | 1 | 9032 | """
This is the main example script to execute, it is meant as an example
of how the riptimize.py module is to be used, and effectivey acts as
the driver of the module with rudimentary console UI + CSV report
generation and S3 upload. It's job is to demonstrate the functionality
of riptimize and it is not meant to execute in production as is.
The step-by-step instructions as to how to execute this script is
embedded in comments below labeled with STEP X OF X.
"""
import riptimize
import datetime
import csv
import boto
def main():
print "Example Riptimize Driver"
print
# 1. setup
# STEP 1 of 7: specify region
region = 'us-east-1'
# STEP 2 of 7: set the RI holding account id and credentials
ri_account_id = 'RIRI-RIRI-RIRI' # replace with actual AWS Account ID
ri_account_credentials = ('<access-key-id-ri>', '<secret_access-key-ri>')
all_accounts = {ri_account_id: ri_account_credentials}
# STEP 3 of 7: add ids and credentials for all other linked accounts, at first just add a couple other accounts
# all_accounts['AAAA-AAAA-AAAA'] = ('<access-key-id-a>', '<secret-access-key-a>')
# all_accounts['BBBB-BBBB-BBBB'] = ('<access-key-id-b>', '<secret-access-key-b>')
# ...
# all_accounts['ZZZZ-ZZZZ-ZZZZ'] = ('<access-key-id-z>', '<secret-access-key-z>')
# STEP 4 of 7: For the first few tests this should be set to False
# once you see that the script is running, change to True to actually execute RI modifications
optimize = False # if False, means a DRY-RUN
# STEP 5 of 7: Leaving as True will publish RI surplus metrics to CloudWatch
publish_metrics = True # custom metrics are created in AWS CloudWatch
# STEP 6 of 7: Leaving as True will upload the CSV report to S3 for safekeeping
upload_report = True # CSV reports will be saved in S3 in s3_report_bucket
s3_report_bucket = "riptimize-reports-%s" % ri_account_id
# 2. do it
# STEP 7 of 7: Ok, you are ready to go, just execute on the command line % python example_main.py
riptimize_result_tuple = riptimize.riptimize(all_accounts, ri_account_credentials, region, optimize, publish_metrics)
# 3. show results
i_inventory, i_inventory_by_account, ri_inventory, supported_ri_zones, processing_modifications, clean_mismatch, recommendations, plan, modification_ids = riptimize_result_tuple
time_now = datetime.datetime.utcnow()
print "Report for region %s as of %s" % (region, time_now)
print
# 3.1 print on-demand instance inventory
print "Instance Inventory by account:"
print i_inventory_by_account
print
print "Aggregate instance inventory:"
print i_inventory
print
# 3.2 print RI inventory
print "RI Inventory:"
print ri_inventory
print
# 3.3 show all supported AZs in the RI holding account
print "Supported RI zones: " + str(supported_ri_zones)
# 3.4 show if previous modifications are still being executed
modifications_inflight = len(processing_modifications) != 0
if modifications_inflight:
print
print "======--- WARNING ---======"
print "Previous modifications are still processing:"
for mod in processing_modifications:
print "modification_id: %s, status: %s" % (mod.modification_id, mod.status)
print "!!! RI optimizations cannot be performed until previous modifications are completed"
print "!!! RI inventory and recommendations will also be potentially incorrect"
print
# 3.5 print detected mismatches between numbers of on-demand running instances and RIs by availability zone and instance type
if len(clean_mismatch) > 0:
print "On-demand/RI inventory mismatches per availability zone:"
print clean_mismatch
else:
print "No On-demand/RI inventory mimatches detected in any availability zones:"
print
# 3.6 print recommendations for migrating running instances into AZs covered by RI holding account, purchasing additional RIs or launching additional instances to get better RI utilization
eliminated_i_inventory, ri_imbalance = recommendations
if len(eliminated_i_inventory) == 0 and len(ri_imbalance) == 0:
print "No recomendations available"
else:
print "Recommendations:"
if len(eliminated_i_inventory) > 0:
print "\tOn-demand instances running in zones not supported by RIs. Migrate them to supported zones:"
print "\t" + str(eliminated_i_inventory)
print
if len(ri_imbalance) > 0:
print "\tOn-demand/RI imbalance detected!"
print "\tNegative numbers indicate additional RIs needed, positive ones indicate that RIs are underutilized and more instances can be launched:"
print "\t" + str(ri_imbalance)
print
# 3.7 print high-level optimization plan if one is possible, showing how many RIs need to be moved to which AZs
if len(plan) == 0:
print "No RI redistribution is possible."
else:
print "RI Optimization possible! Plan: " + str(plan)
if optimize:
if modifications_inflight:
print "Previous optimizations are still processing, new optimizations kicked off in DRY-RUN mode only!"
else:
print "Optimize option selected, optimizations kicked-off..."
else:
print "Optimize flag not set, so optimizations kicked off in DRY-RUN mode only!"
print
# 3.8 finally, if optimizations were actually kicked off, list all modification ids, or fake ones in case of a dry run
print "Initiated optimizations:"
print modification_ids
filename_safe_timestamp = str(time_now).replace(' ','_').replace(':', '-')
report_file_name = "riptimize_report_%s_%s.csv" % (region, filename_safe_timestamp)
csv_report(report_file_name, time_now, region, i_inventory_by_account, ri_inventory, clean_mismatch, plan, modification_ids)
print
print "CSV report written to %s" % report_file_name
if upload_report:
upload_report_to_s3(ri_account_credentials, report_file_name, s3_report_bucket)
print
print "Report uploaded to S3 as %s/%s of RI holding account %s" % (s3_report_bucket, report_file_name, ri_account_id)
print
print "Done"
# exapmle of generating a CSV report
def csv_report(csv_file_name, time_now, region, i_inventory_by_account, ri_inventory, clean_mismatch, plan, modification_ids):
with open(csv_file_name, 'wb') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["Report for region %s at %s" % (region, str(time_now))])
# write instance inventory report
writer.writerow([])
writer.writerow(['Instance Inventory'])
writer.writerow(['Account ID', 'Instance Type', 'Availability Zone', 'Count'])
for account_id, inventory_for_account in i_inventory_by_account.items():
for (itype, az), count in inventory_for_account.items():
writer.writerow([account_id, itype, az, count])
# write RI inventory report
writer.writerow([])
writer.writerow(['RI Inventory'])
writer.writerow(['Instance Type', 'Availability Zone', 'Count'])
for (itype, az), count in ri_inventory.items():
writer.writerow([itype, az, count])
# write report on On-demand/RI inventory mismatches
writer.writerow([])
writer.writerow(['On-demand/RI inventory mismatches per each availability zone'])
writer.writerow(['Instance Type', 'Availability Zone', 'Diff'])
for (itype, az), count in clean_mismatch.items():
writer.writerow([itype, az, count])
# write optimization plan
writer.writerow([])
writer.writerow(['RI modification plan'])
writer.writerow(['Instance Type', 'Source AZ', 'Destination AZ', 'Count'])
for itype, source_az, dest_az, count in plan:
writer.writerow([itype, source_az, dest_az, count])
# write modification_ids
writer.writerow([])
writer.writerow(['Kicked off RI modifications'])
writer.writerow(['Modification ID'])
for modification_id in modification_ids:
writer.writerow([modification_id])
def upload_report_to_s3(ri_account_credentials, report_file_name, s3_report_bucket):
access_key_id, secret_access_key = ri_account_credentials
s3 = boto.connect_s3(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# create bucket if does not exist
bucket = s3.lookup(s3_report_bucket)
if not bucket:
bucket = s3.create_bucket(s3_report_bucket)
# upload the report
key = bucket.new_key(report_file_name)
key.set_contents_from_filename(report_file_name)
s3.close()
if __name__ == '__main__':
main()
| bsd-2-clause | -7,441,872,143,540,658,000 | 46.042553 | 192 | 0.660762 | false | 3.884731 | false | false | false |
EKiefer/edge-starter | py34env/Scripts/enhancer.py | 1 | 1558 | #!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| mit | 8,260,503,957,191,499,000 | 25.40678 | 80 | 0.646341 | false | 3.103586 | false | false | false |
prasannav7/ggrc-core | test/integration/ggrc/models/factories.py | 1 | 4323 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""Factories for models"""
import random
import factory
from ggrc import db
from ggrc import models
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0, 9999999999),
)
class ModelFactory(factory.Factory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
# modified_by_id = 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class(*args, **kwargs)
db.session.add(instance)
db.session.commit()
return instance
class TitledFactory(factory.Factory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
title = factory.LazyAttribute(lambda m: random_string('title'))
class DirectiveFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Directive
class ControlFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Control
directive = factory.SubFactory(DirectiveFactory)
kind_id = None
version = None
documentation_description = None
verify_frequency_id = None
fraud_related = None
key_control = None
active = None
notes = None
class AssessmentFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Assessment
class ControlCategoryFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.ControlCategory
name = factory.LazyAttribute(lambda m: random_string('name'))
lft = None
rgt = None
scope_id = None
depth = None
required = None
class CategorizationFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Categorization
category = None
categorizable = None
category_id = None
categorizable_id = None
categorizable_type = None
class ProgramFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Program
title = factory.LazyAttribute(lambda _: random_string("program_title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
class AuditFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Audit
title = factory.LazyAttribute(lambda _: random_string("title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
status = "Planned"
program_id = factory.LazyAttribute(lambda _: ProgramFactory().id)
class ContractFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Contract
class EventFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Event
revisions = []
class RelationshipFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Relationship
source = None
destination = None
class RelationshipAttrFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.RelationshipAttr
relationship_id = None
attr_name = None
attr_value = None
class PersonFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Person
| apache-2.0 | -2,486,109,641,783,697,400 | 23.844828 | 78 | 0.734906 | false | 3.575682 | false | false | false |
k0001/meaningtoolws | meaningtoolws/ct.py | 1 | 4944 | # -*- coding: utf-8 -*-
# Copyright (c) 2009, Popego Corporation <contact [at] popego [dot] com>
# All rights reserved.
#
# This file is part of the Meaningtool Web Services Python Client project
#
# See the COPYING file distributed with this project for its licensing terms.
"""
Meaningtool Category Tree REST API v0.1 client
Official documentation for the REST API v0.1 can be found at
http://meaningtool.com/docs/ws/ct/restv0.1
"""
import re
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
MT_BASE_URL = u"http://ws.meaningtool.com/ct/restv0.1"
_re_url = re.compile(ur"^https?://.+$")
class Result(object):
def __init__(self, status_errcode, status_message, data):
super(Result, self).__init__()
self.status_errcode = status_errno
self.status_message = status_message
self.data = data
def __repr__(self):
return u"<%s - %s>" % (self.__class__.__name__, self.status_message)
class ResultError(Result, Exception):
def __init__(self, status_errcode, status_message, data):
Result.__init__(self, status_errcode, status_message, data)
Exception.__init__(self, u"%s: %s" % (status_errcode, status_message))
def __repr__(self):
return u"<%s - %s: %s>" % (self.__class__.__name__, self.status_errcode, self.status_message)
class Client(object):
def __init__(self, ct_key):
self.ct_key = ct_key
self._base_url = u"%s/%s" % (MT_BASE_URL, ct_key)
def __repr__(self):
return u"<%s - ct_key: %s>" % (self.__class__.__name__, self.ct_key)
def _req_base(self, method, url, data, headers):
if method == "GET":
req = urllib2.Request(u"%s?%s" % (url, urllib.urlencode(data)))
elif method == "POST":
req = urllib2.Request(url, urllib.urlencode(data))
else:
raise ValueError(u"HTTP Method '%s' not supported" % method)
req.add_header("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
req.add_header("Accept-Charset", "UTF-8")
for k,v in headers:
req.add_header(k, v)
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code >= 500:
raise
resp = e
s = resp.read()
return s
def _req_json(self, method, url, data, headers):
url += u'.json'
headers.append(("Accept", "application/json"))
return self._req_base(method, url, data, headers)
def _parse_result_base(self, result_dict):
status = result_dict["status"]
status_errcode = result_dict["errno"]
status_message = result_dict["message"]
data = result_dict["data"]
if status == "ok":
return Result(status_errcode, status_message, data)
else:
raise ResultError(status_errcode, status_message, data)
def _parse_result_json(self, raw):
return self._parse_result_base(json.loads(raw, encoding="utf8"))
# default request/parse methods
_req = _req_json
_parse_result = _parse_result_json
def get_categories(self, source, input, url_hint=None, additionals=None, content_language=None):
url = u"%s/categories" % self._base_url
data = {}
headers = []
data["source"] = source.encode("utf8")
data["input"] = input.encode("utf8")
if url_hint:
if not _re_url.match(url_hint):
raise ValueError(u"url_hint")
data["url_hint"] = url_hint.encode("utf8")
if additionals:
additionals = u",".join(set(additionals))
data["additionals"] = additionals.encode("utf8")
if content_language:
content_language = content_language[:2].lower()
if not len(content_language) == 2:
raise ValueError(u"content_language")
headers.append(("Content-Language", content_language.encode("ascii")))
# Even if POST, it's idempotent as GET.
return self._parse_result(self._req("POST", url, data, headers))
def get_tags(self, source, input, url_hint=None, content_language=None):
url = u"%s/tags" % self._base_url
data = {}
headers = []
data["source"] = source.encode("utf8")
data["input"] = input.encode("utf8")
if url_hint:
if not _re_url.match(url_hint):
raise ValueError(u"url_hint")
data["url_hint"] = url_hint.encode("utf8")
if content_language:
content_language = content_language[:2].lower()
if not len(content_language) == 2:
raise ValueError(u"content_language")
headers.append(("Content-Language", content_language.encode("ascii")))
# Even if POST, it's idempotent as GET.
return self._parse_result(self._req("POST", url, data, headers))
| bsd-3-clause | 6,838,378,754,736,598,000 | 32.405405 | 101 | 0.587379 | false | 3.624633 | false | false | false |
proversity-org/edx-platform | lms/djangoapps/student_account/talenetic.py | 1 | 7807 | from six.moves.urllib_parse import urlencode, unquote
import jwt
import json
from django.conf import settings
from student.models import Registration, UserProfile
from social_core.backends.oauth import BaseOAuth2
from django.contrib.auth.models import User
import uuid
import logging
import social_django
log = logging.getLogger(__name__)
class TaleneticOAuth2(BaseOAuth2):
"""
Talenetic OAuth2 authentication backend
"""
settings_dict = settings.CUSTOM_BACKENDS.get('talenetic')
name = 'talenetic-oauth2'
REDIRECT_STATE = False
ID_KEY = 'emailaddress'
STATE_PARAMETER = False
AUTHORIZATION_URL = settings_dict.get('AUTH_URL')
ACCESS_TOKEN_URL = settings_dict.get('ACCESS_TOKEN_URL')
ACCESS_TOKEN_METHOD = 'GET'
REFRESH_TOKEN_URL = settings_dict.get('REFRESH_TOKEN_URL')
REFRESH_TOKEN_METHOD = 'POST'
RESPONSE_TYPE = 'code jwt_token'
REDIRECT_IS_HTTPS = False
REVOKE_TOKEN_URL = settings_dict.get('LOGOUT_URL')
REVOKE_TOKEN_METHOD = 'POST'
def get_scope_argument(self):
return {}
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
self.process_error(self.data)
state = self.validate_state()
access_url = "{}?uid={}".format(self.access_token_url(), self._get_uid())
response = self.request_access_token(
access_url,
data=self._get_creds(),
headers=self._get_creds(),
auth=self.auth_complete_credentials(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
return self.do_auth(response['jwt_token'], response=response,
*args, **kwargs)
def do_auth(self, jwt_token, *args, **kwargs):
data = self.user_data(jwt_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
if 'access_token' not in response:
response['access_token'] = jwt_token
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def _get_uid(self):
if 'uid' in self.data:
return self.data['uid']
else:
return None
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
uri = self.get_redirect_uri(state)
if self.REDIRECT_IS_HTTPS:
uri = uri.replace('http://', 'https://')
params = {
'urlredirect': uri,
'clientId': client_id,
'secretkey': client_secret
}
return params
def get_user_id(self, details, response):
return details.get('email')
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_user_details(kwargs.get('response'))
def get_user_details(self, response):
response = self._fill_fields(response)
self._set_uid_to_profile(self._get_uid(), response.get('emailaddress'))
return {'username': response.get('username'),
'email': response.get('emailaddress'),
'fullname': response.get('firstname'),
'first_name': response.get('firstname')}
def _fill_fields(self, data):
# a little util to fill in missing data for later consumption
if data.get('firstname') is None:
data['firstname'] = data.get('emailaddress').split('@')[0]
if data.get('username') is None:
data['username'] = data.get('emailaddress').split('@')[0]
return data
def _get_creds(self):
client_id, client_secret = self.get_key_and_secret()
return {
'secretkey': client_secret,
'clientId': client_id
}
def auth_headers(self):
return {'Accept': 'application/json'}
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
"""
This is a special override of the pipeline method.
This will grab the user from the actual ran pipeline and
add the incoming uid as a uid field to the meta field on the user profile
"""
# due to some of the usernames that will come in from the SSO containing a .fullstop
# the user can not be found and then the oauth tries
# to make a new one and breaks as the email exists,
# this is to set the user if it exists forcefully for the rest of oauth to work properly.
if kwargs.get('user') is None:
try:
user = User.objects.get(email=kwargs.get('response').get('emailaddress'))
kwargs['user'] = user
except User.DoesNotExist:
pass
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def _set_uid_to_profile(self, uid, emailaddress):
"""
This function calls for the existing user by emailaddress,
if the user is found we save the requested uid to the user profile
because we need it to logout.
"""
try:
user = User.objects.get(email=emailaddress)
user_profile = user.profile
new_meta = {'talenetic-uid': uid}
if len(user_profile.meta) > 0:
previous_meta = json.loads(user_profile.meta)
mixed_dicts =\
(previous_meta.items() + new_meta.items())
new_meta =\
{key: value for (key, value) in mixed_dicts}
user_profile.meta = json.dumps(new_meta)
user_profile.save()
except Exception as e:
log.error("Could not save uid to user profile or something else: {}".format(e.message))
def auth_url(self):
"""Return redirect url"""
params = self.auth_params()
params = urlencode(params)
if not self.REDIRECT_STATE:
# redirect_uri matching is strictly enforced, so match the
# providers value exactly.
params = unquote(params)
return '{0}?{1}'.format(self.authorization_url(), params)
def revoke_token_url(self, token, uid):
social_user = social_django.models.DjangoStorage.user.get_social_auth(provider=self.name, uid=uid)
profile = social_user.user.profile
meta_data = json.loads(profile.meta)
url = "{}?uid={}".format(self.REVOKE_TOKEN_URL, meta_data.get('talenetic-uid'))
return url
def revoke_token_params(self, token, uid):
return {}
def revoke_token_headers(self, token, uid):
return self._get_creds()
def process_revoke_token_response(self, response):
return response.status_code == 200
def revoke_token(self, token, uid):
if self.REVOKE_TOKEN_URL:
url = self.revoke_token_url(token, uid)
params = self.revoke_token_params(token, uid)
headers = self.revoke_token_headers(token, uid)
data = urlencode(params) if self.REVOKE_TOKEN_METHOD != 'GET' \
else None
response = self.request(url, params=params, headers=headers,
data=data, method=self.REVOKE_TOKEN_METHOD)
return self.process_revoke_token_response(response)
| agpl-3.0 | 211,726,550,094,709,500 | 33.166667 | 106 | 0.575893 | false | 4.096013 | false | false | false |
denis-guillemenot/pmi_collect | simpleHTTPServer.py | 1 | 1587 | # ----------------------------------------------------------------
# name : simpleHTTPServer.py
# object: Simple MultiThreaded Web Server
# usage: python SimpleHTTPServer [port] / default port: 8080
# author: [email protected] / [email protected]
# date : 19/09/2013
# ----------------------------------------------------------------
import sys
# Use default or provided port
print
if ( len( sys.argv) > 0):
msg = "provided"
try:
cause = "must be an integer"
port = int( sys.argv[0])
if ( port < 1024):
cause = "must be =< 1024"
raise
except:
print "ERROR: %s port:%s %s... exiting" % (msg, sys.argv[0], cause)
sys.exit( 1)
else:
msg = "default"
port = 8080
print "Using %s port:%d" % ( msg, port)
import SocketServer, BaseHTTPServer, sys, os, CGIHTTPServer, os, os.path
# port = 8080
class ThreadingCGIServer( SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
# set os separator
try:
os_sep = os.path.os.sep
False = 0
True = 1
except:
try:
os_sep = os.path.sep
except:
print("ERROR: can not set os.separator, exiting...")
sys.exit(-1)
# set rootdir
currdir = os.getcwd()
# rootdir = currdir + os_sep + 'data'
# if ( os.path.exists( rootdir)): os.chdir( rootdir)
# start HTTP Server
server = ThreadingCGIServer( ('', port), CGIHTTPServer.CGIHTTPRequestHandler)
print "Server started on port %s." % port
try:
while 1:
sys.stdout.flush()
server.handle_request()
except keyboardInterrupt:
if ( os.path.exists( currdir)): os.chdir( currdir)
print "Server stopped."
| mit | 3,394,831,740,184,228,400 | 24.190476 | 82 | 0.608066 | false | 3.348101 | false | false | false |
sylvainnizac/Djangoctopus | blog/admin.py | 1 | 2774 | # -*- coding: utf8 -*-
from django.contrib import admin
from blog.models import Categorie, Article, Comment
class ArticleAdmin(admin.ModelAdmin):
list_display = ('titre', 'auteur', 'date', 'categorie', 'apercu_contenu')
list_filter = ('auteur','categorie',)
date_hierarchy = 'date'
ordering = ('-date', )
search_fields = ('titre', 'contenu')
prepopulated_fields = {"slug": ("titre",)}
# Configuration du formulaire d'édition
fieldsets = (
# Fieldset 1 : meta-info (titre, auteur…)
('Général',
{'fields': ('titre', 'slug', 'auteur', 'categorie')
}),
# Fieldset 2 : contenu de l'article
('Contenu de l\'article',
{ 'description': 'Le formulaire accepte les balises HTML. Utilisez-les à bon escient !',
'fields': ('contenu', )
}),
)
def apercu_contenu(self, article):
"""
Retourne les 40 premiers caractères du contenu de l'article. S'il
y a plus de 40 caractères, il faut ajouter des points de suspension.
"""
text = article.contenu[0:40]
if len(article.contenu) > 40:
return '%s...' % text
else:
return text
# En-tête de notre colonne
apercu_contenu.short_description = 'Aperçu du contenu'
class CommentsAdmin(admin.ModelAdmin):
list_display = ('pseudo', 'email', 'article', 'apercu_description', 'date', 'commentaire_visible')
list_filter = ('pseudo', 'article', 'email', )
date_hierarchy = 'date'
ordering = ('-date', )
search_fields = ('pseudo', 'email', 'article', )
# Configuration du formulaire d'édition
fieldsets = (
# Fieldset 1 : meta-info (titre, auteur…)
('Général',
{'fields': ('pseudo', 'email'), }),
# Fieldset 2 : contenu de l'article
('Commentaire',
{ 'description': 'Le formulaire n\'accepte pas les balises HTML.',
'fields': ('description', )}),
# Fieldset 3 : modération
('Modération',
{ 'fields': ('commentaire_visible', )}),
)
def apercu_description(self, commentaire):
"""
Retourne les 40 premiers caractères du contenu du commentaire. S'il
y a plus de 40 caractères, il faut ajouter des points de suspension.
"""
text = commentaire.description[0:40]
if len(commentaire.description) > 40:
return '%s...' % text
else:
return text
# En-tête de notre colonne
apercu_description.short_description = 'Aperçu du commentaire'
# Register your models here.
admin.site.register(Categorie)
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment, CommentsAdmin)
| gpl-2.0 | -2,033,523,279,121,713,400 | 33.848101 | 104 | 0.587722 | false | 3.407178 | false | false | false |
derv82/wifite2 | wifite/tools/ifconfig.py | 1 | 1784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from .dependency import Dependency
class Ifconfig(Dependency):
dependency_required = True
dependency_name = 'ifconfig'
dependency_url = 'apt-get install net-tools'
@classmethod
def up(cls, interface, args=[]):
'''Put interface up'''
from ..util.process import Process
command = ['ifconfig', interface]
if type(args) is list:
command.extend(args)
elif type(args) is 'str':
command.append(args)
command.append('up')
pid = Process(command)
pid.wait()
if pid.poll() != 0:
raise Exception('Error putting interface %s up:\n%s\n%s' % (interface, pid.stdout(), pid.stderr()))
@classmethod
def down(cls, interface):
'''Put interface down'''
from ..util.process import Process
pid = Process(['ifconfig', interface, 'down'])
pid.wait()
if pid.poll() != 0:
raise Exception('Error putting interface %s down:\n%s\n%s' % (interface, pid.stdout(), pid.stderr()))
@classmethod
def get_mac(cls, interface):
from ..util.process import Process
output = Process(['ifconfig', interface]).stdout()
# Mac address separated by dashes
mac_dash_regex = ('[a-zA-Z0-9]{2}-' * 6)[:-1]
match = re.search(' ({})'.format(mac_dash_regex), output)
if match:
return match.group(1).replace('-', ':')
# Mac address separated by colons
mac_colon_regex = ('[a-zA-Z0-9]{2}:' * 6)[:-1]
match = re.search(' ({})'.format(mac_colon_regex), output)
if match:
return match.group(1)
raise Exception('Could not find the mac address for %s' % interface)
| gpl-2.0 | 4,402,937,286,454,165,000 | 28.245902 | 113 | 0.568946 | false | 3.90372 | false | false | false |
HalcyonChimera/osf.io | website/project/metadata/schemas.py | 1 | 2265 | import os
import json
LATEST_SCHEMA_VERSION = 2
def _id_to_name(id):
return ' '.join(id.split('_'))
def _name_to_id(name):
return '_'.join(name.split(' '))
def ensure_schema_structure(schema):
schema['pages'] = schema.get('pages', [])
schema['title'] = schema['name']
schema['version'] = schema.get('version', 1)
schema['active'] = schema.get('active', True)
return schema
here = os.path.split(os.path.abspath(__file__))[0]
def from_json(fname):
with open(os.path.join(here, fname)) as f:
return json.load(f)
OSF_META_SCHEMAS = [
ensure_schema_structure(from_json('osf-open-ended-1.json')),
ensure_schema_structure(from_json('osf-open-ended-2.json')),
ensure_schema_structure(from_json('osf-standard-1.json')),
ensure_schema_structure(from_json('osf-standard-2.json')),
ensure_schema_structure(from_json('brandt-prereg-1.json')),
ensure_schema_structure(from_json('brandt-prereg-2.json')),
ensure_schema_structure(from_json('brandt-postcomp-1.json')),
ensure_schema_structure(from_json('brandt-postcomp-2.json')),
ensure_schema_structure(from_json('prereg-prize.json')),
ensure_schema_structure(from_json('erpc-prize.json')),
ensure_schema_structure(from_json('confirmatory-general-2.json')),
ensure_schema_structure(from_json('egap-project-2.json')),
ensure_schema_structure(from_json('veer-1.json')),
ensure_schema_structure(from_json('aspredicted.json')),
ensure_schema_structure(from_json('registered-report.json')),
ensure_schema_structure(from_json('ridie-initiation.json')),
ensure_schema_structure(from_json('ridie-complete.json')),
]
METASCHEMA_ORDERING = (
'Prereg Challenge',
'Open-Ended Registration',
'Preregistration Template from AsPredicted.org',
'Registered Report Protocol Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
'Replication Recipe (Brandt et al., 2013): Post-Completion',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
'RIDIE Registration - Study Initiation',
'RIDIE Registration - Study Complete',
)
| apache-2.0 | 8,412,698,295,121,021,000 | 38.736842 | 98 | 0.696247 | false | 3.296943 | false | false | false |
ashbc/tgrsite | tgrsite/settings.py | 1 | 6161 | """
Django settings for tgrsite project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import django.contrib.messages.constants as message_constants
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.urls import reverse_lazy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = [('Webadmin', '[email protected]')]
MANAGERS = [('Webadmin', '[email protected]')]
LOGIN_URL = '/login/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 587)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = os.environ.get('FROM_EMAIL', 'webmaster@localhost')
s = ''
try:
from .keys import secret
s = secret()
except:
# this will throw a KeyError and crash if neither are specified
# which is a decent enough way of enforcing it
s = os.environ['SECRET_KEY']
SECRET_KEY = s
# Defaults off unless explicitly stated in environment variable
try:
if os.environ['DEBUG'].lower() == 'true':
DEBUG = True
else:
DEBUG = False
except KeyError:
DEBUG = False
# needs 127 to work on my machine...
ALLOWED_HOSTS = [os.environ.get('HOST', 'localhost'), '127.0.0.1']
PRIMARY_HOST = '127.0.0.1:8000'
if DEBUG:
from .ipnetworks import IpNetworks
INTERNAL_IPS = IpNetworks(['127.0.0.1', '192.168.0.0/255.255.0.0'])
else:
INTERNAL_IPS = ['127.0.0.1']
INSTALLED_APPS = [
'website_settings',
'navbar',
'assets',
'minutes',
'inventory',
'forum',
'users',
'rpgs',
'exec',
'templatetags',
'timetable',
'messaging',
'gallery',
'pages',
'newsletters',
'notifications',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'redirect'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tgrsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'tgrsite/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'tgrsite.context_processors.latestposts',
'tgrsite.context_processors.mergednavbar'
],
},
},
]
WSGI_APPLICATION = 'tgrsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'users.backends.CaseInsensitiveModelBackend',
# 'django.contrib.auth.backends.ModelBackend',
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-gb'
USE_I18N = True
USE_L10N = True
# Europe/London means GMT+0 with a DST offset of +1:00 i.e. England time
TIME_ZONE = 'Europe/London'
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# site URL that static files are served from
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL=reverse_lazy("homepage")
# directories to collect static files from
STATICFILES_DIRS = [
# where the static files are stored in the repo and collected from
os.path.join(BASE_DIR, 'static_resources'),
]
# directory the static files are served from
STATIC_ROOT = os.path.join(BASE_DIR, 'STATIC')
# directories for the uploaded files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'MEDIA')
# Monday
FIRST_DAY_OF_WEEK = 1
# Setup Cripsy to render forms bootstrap4ish
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# as advised by python manage.py check --deploy
# prevent browsers from MIME type sniffing. doesn't play nice
# SECURE_CONTENT_TYPE_NOSNIFF=True
# enable browsers' XSS filters
SECURE_BROWSER_XSS_FILTER = True
# ensure all traffic is SSL (https)
SECURE_SSL_REDIRECT = not DEBUG
# session cookies secure-only
SESSION_COOKIE_SECURE = not DEBUG
# same for CSRF cookie
CSRF_COOKIE_SECURE = not DEBUG
# CSRF_COOKIE_HTTPONLY=True
X_FRAME_OPTIONS = 'DENY'
MESSAGE_TAGS = {
message_constants.DEBUG: 'alert-dark',
message_constants.INFO: 'alert-primary',
message_constants.SUCCESS: 'alert-success',
message_constants.WARNING: 'alert-warning',
message_constants.ERROR: 'alert-danger',
}
# Allow local configuration (change deploy options etc.)
try:
from .local_config import *
except ImportError:
pass
| isc | -7,394,605,813,598,171,000 | 26.382222 | 91 | 0.687226 | false | 3.42468 | false | false | false |
Guts/isogeo-api-py-minsdk | isogeo_pysdk/models/metadata.py | 1 | 38320 | # -*- coding: UTF-8 -*-
#! python3
"""
Isogeo API v1 - Model of Metadata (= Resource) entity
See: http://help.isogeo.com/api/complete/index.html#definition-resource
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
import logging
import pprint
import re
import unicodedata
# package
from isogeo_pysdk.enums import MetadataSubresources, MetadataTypes
# others models
from isogeo_pysdk.models import Workgroup
# #############################################################################
# ########## Globals ###############
# ##################################
logger = logging.getLogger(__name__)
# for slugified title
_regex_slugify_strip = re.compile(r"[^\w\s-]")
_regex_slugify_hyphenate = re.compile(r"[-\s]+")
# #############################################################################
# ########## Classes ###############
# ##################################
class Metadata(object):
"""Metadata are the main entities in Isogeo.
:Example:
.. code-block:: json
{
"_abilities": [
"string"
],
"_created": "string (date-time)",
"_creator": {
"_abilities": [
"string"
],
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"areKeywordsRestricted": "boolean",
"canCreateMetadata": "boolean",
"code": "string",
"contact": {
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"addressLine1": "string",
"addressLine2": "string",
"addressLine3": "string",
"available": "string",
"city": "string",
"count": "integer (int32)",
"countryCode": "string",
"email": "string",
"fax": "string",
"hash": "string",
"name": "string",
"organization": "string",
"phone": "string",
"type": "string",
"zipCode": "string"
},
"keywordsCasing": "string",
"metadataLanguage": "string",
"themeColor": "string"
},
"_id": "string (uuid)",
"_modified": "string (date-time)",
"abstract": "string",
"bbox": [
"number (double)"
],
"collectionContext": "string",
"collectionMethod": "string",
"conditions": [
{
"_id": "string (uuid)",
"description": "string",
"license": {
"_id": "string (uuid)",
"content": "string",
"count": "integer (int32)",
"link": "string",
"name": "string"
}
}
],
"contacts": [
{
"_id": "string (uuid)",
"contact": {
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"addressLine1": "string",
"addressLine2": "string",
"addressLine3": "string",
"available": "string",
"city": "string",
"count": "integer (int32)",
"countryCode": "string",
"email": "string",
"fax": "string",
"hash": "string",
"name": "string",
"organization": "string",
"phone": "string",
"type": "string",
"zipCode": "string"
},
"role": "string"
}
],
"context": "object",
"coordinate-system": "object",
"created": "string (date-time)",
"distance": "number (double)",
"editionProfile": "string",
"encoding": "string",
"envelope": "object",
"features": "integer (int32)",
"format": "string",
"formatVersion": "string",
"geometry": "string",
"height": "integer (int32)",
"keywords": [
{}
]
}
"""
# -- ATTRIBUTES --------------------------------------------------------------------
ATTR_TYPES = {
"_abilities": list,
"_created": str,
"_creator": dict,
"_id": str,
"_modified": str,
"abstract": str,
"collectionContext": str,
"collectionMethod": str,
"conditions": list,
"contacts": list,
"coordinateSystem": dict,
"created": str,
"distance": float,
"editionProfile": str,
"encoding": str,
"envelope": dict,
"events": list,
"featureAttributes": list,
"features": int,
"format": str,
"formatVersion": str,
"geometry": str,
"keywords": list,
"language": str,
"layers": list,
"limitations": list,
"links": list,
"modified": str,
"name": str,
"operations": list,
"path": str,
"precision": str,
"published": str,
"scale": int,
"series": bool,
"serviceLayers": list,
"specifications": list,
"tags": list,
"title": str,
"topologicalConsistency": str,
"type": str,
"updateFrequency": str,
"validFrom": str,
"validTo": str,
"validityComment": str,
}
ATTR_CREA = {
"abstract": str,
"collectionContext": str,
"collectionMethod": str,
"distance": float,
"editionProfile": str,
"encoding": str,
"envelope": dict,
"features": int,
"format": str,
"formatVersion": str,
"geometry": str,
"language": str,
"name": str,
"path": str,
"precision": str,
"scale": int,
"series": bool,
"title": str,
"topologicalConsistency": str,
"type": str,
"updateFrequency": str,
"validFrom": str,
"validTo": str,
"validityComment": str,
}
ATTR_MAP = {
"coordinateSystem": "coordinate-system",
"featureAttributes": "feature-attributes",
}
# -- CLASS METHODS -----------------------------------------------------------------
@classmethod
def clean_attributes(cls, raw_object: dict):
"""Renames attributes which are incompatible with Python (hyphens...).
See related issue: https://github.com/isogeo/isogeo-api-py-minsdk/issues/82
:param dict raw_object: metadata dictionary returned by a request.json()
:returns: the metadata with correct attributes
:rtype: Metadata
"""
for k, v in cls.ATTR_MAP.items():
raw_object[k] = raw_object.pop(v, [])
return cls(**raw_object)
# -- CLASS INSTANCIATION -----------------------------------------------------------
def __init__(
self,
_abilities: list = None,
_created: str = None,
_creator: dict = None,
_id: str = None,
_modified: str = None,
abstract: str = None,
collectionContext: str = None,
collectionMethod: str = None,
conditions: list = None,
contacts: list = None,
coordinateSystem: dict = None,
created: str = None,
distance: float = None,
editionProfile: str = None,
encoding: str = None,
envelope: dict = None,
events: list = None,
featureAttributes: list = None,
features: int = None,
format: str = None,
formatVersion: str = None,
geometry: str = None,
keywords: list = None,
language: str = None,
layers: list = None,
limitations: list = None,
links: list = None,
modified: str = None,
name: str = None,
operations: list = None,
path: str = None,
precision: str = None,
published: str = None,
scale: int = None,
series: bool = None,
serviceLayers: list = None,
specifications: list = None,
tags: list = None,
title: str = None,
topologicalConsistency: str = None,
type: str = None,
updateFrequency: str = None,
validFrom: str = None,
validTo: str = None,
validityComment: str = None,
):
"""Metadata model"""
# default values for the object attributes/properties
self.__abilities = None
self.__created = None
self.__creator = None
self.__id = None
self.__modified = None
self._abstract = None
self._collectionContext = None
self._collectionMethod = None
self._conditions = None
self._contacts = None
self._coordinateSystem = None
self._creation = None # = created
self._distance = None
self._editionProfile = None
self._encoding = None
self._envelope = None
self._events = None
self._featureAttributes = None
self._features = None
self._format = None
self._formatVersion = None
self._geometry = None
self._keywords = None
self._language = None
self._layers = None
self._limitations = None
self._links = None
self._modification = None # = modified
self._name = None
self._operations = None
self._path = None
self._precision = None
self._published = None
self._scale = None
self._series = None
self._serviceLayers = None
self._specifications = None
self._tags = None
self._title = None
self._topologicalConsistency = None
self._type = None
self._updateFrequency = None
self._validFrom = None
self._validTo = None
self._validityComment = None
# if values have been passed, so use them as objects attributes.
# attributes are prefixed by an underscore '_'
if _abilities is not None:
self.__abilities = _abilities
if _created is not None:
self.__created = _created
if _creator is not None:
self.__creator = _creator
if _id is not None:
self.__id = _id
if _modified is not None:
self.__modified = _modified
if abstract is not None:
self._abstract = abstract
if collectionContext is not None:
self._collectionContext = collectionContext
if collectionMethod is not None:
self._collectionMethod = collectionMethod
if conditions is not None:
self._conditions = conditions
if contacts is not None:
self._contacts = contacts
if coordinateSystem is not None:
self._coordinateSystem = coordinateSystem
if created is not None:
self._creation = created
if distance is not None:
self._distance = distance
if editionProfile is not None:
self._editionProfile = editionProfile
if encoding is not None:
self._encoding = encoding
if envelope is not None:
self._envelope = envelope
if events is not None:
self._events = events
if featureAttributes is not None:
self._featureAttributes = featureAttributes
if features is not None:
self._features = features
if format is not None:
self._format = format
if formatVersion is not None:
self._formatVersion = formatVersion
if geometry is not None:
self._geometry = geometry
if keywords is not None:
self._keywords = keywords
if language is not None:
self._language = language
if layers is not None:
self._layers = layers
if limitations is not None:
self._limitations = limitations
if links is not None:
self._links = links
if modified is not None:
self._modification = modified
if name is not None:
self._name = name
if operations is not None:
self._operations = operations
if path is not None:
self._path = path
if precision is not None:
self._precision = precision
if published is not None:
self._published = published
if scale is not None:
self._scale = scale
if serviceLayers is not None:
self._serviceLayers = serviceLayers
if specifications is not None:
self._specifications = specifications
if tags is not None:
self._tags = tags
if title is not None:
self._title = title
if topologicalConsistency is not None:
self._topologicalConsistency = topologicalConsistency
if type is not None:
self._type = type
if updateFrequency is not None:
self._updateFrequency = updateFrequency
if validFrom is not None:
self._validFrom = validFrom
if validTo is not None:
self._validTo = validTo
if validityComment is not None:
self._validityComment = validityComment
# -- PROPERTIES --------------------------------------------------------------------
# abilities of the user related to the metadata
@property
def _abilities(self) -> list:
"""Gets the abilities of this Metadata.
:return: The abilities of this Metadata.
:rtype: list
"""
return self.__abilities
# _created
@property
def _created(self) -> str:
"""Gets the creation datetime of the Metadata.
Datetime format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The created of this Metadata.
:rtype: str
"""
return self.__created
# _modified
@property
def _modified(self) -> str:
"""Gets the last modification datetime of this Metadata.
Datetime format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The modified of this Metadata.
:rtype: str
"""
return self.__modified
# metadata owner
@property
def _creator(self) -> dict:
"""Gets the creator of this Metadata.
:return: The creator of this Metadata.
:rtype: dict
"""
return self.__creator
# metadata UUID
@property
def _id(self) -> str:
"""Gets the id of this Metadata.
:return: The id of this Metadata.
:rtype: str
"""
return self.__id
@_id.setter
def _id(self, _id: str):
"""Sets the id of this Metadata.
:param str id: The id of this Metadata.
"""
self.__id = _id
# metadata description
@property
def abstract(self) -> str:
"""Gets the abstract.
:return: The abstract of this Metadata.
:rtype: str
"""
return self._abstract
@abstract.setter
def abstract(self, abstract: str):
"""Sets the abstract used into Isogeo filters of this Metadata.
:param str abstract: the abstract of this Metadata.
"""
self._abstract = abstract
# collection context
@property
def collectionContext(self) -> str:
"""Gets the collectionContext of this Metadata.
:return: The collectionContext of this Metadata.
:rtype: str
"""
return self._collectionContext
@collectionContext.setter
def collectionContext(self, collectionContext: str):
"""Sets the collection context of this Metadata.
:param str collectionContext: The collection context of this Metadata.
"""
self._collectionContext = collectionContext
# collection method
@property
def collectionMethod(self) -> str:
"""Gets the collection method of this Metadata.
:return: The collection method of this Metadata.
:rtype: str
"""
return self._collectionMethod
@collectionMethod.setter
def collectionMethod(self, collectionMethod: str):
"""Sets the collection method of this Metadata.
:param str collectionMethod: the collection method to set. Accepts markdown.
"""
self._collectionMethod = collectionMethod
# CGUs
@property
def conditions(self) -> list:
"""Gets the conditions of this Metadata.
:return: The conditions of this Metadata.
:rtype: list
"""
return self._conditions
@conditions.setter
def conditions(self, conditions: list):
"""Sets conditions of this Metadata.
:param list conditions: conditions to be set
"""
self._conditions = conditions
# contacts
@property
def contacts(self) -> list:
"""Gets the contacts of this Metadata.
:return: The contacts of this Metadata.
:rtype: list
"""
return self._contacts
@contacts.setter
def contacts(self, contacts: list):
"""Sets the of this Metadata.
:param list contacts: to be set
"""
self._contacts = contacts
# coordinateSystem
@property
def coordinateSystem(self) -> dict:
"""Gets the coordinateSystem of this Metadata.
:return: The coordinateSystem of this Metadata.
:rtype: dict
"""
return self._coordinateSystem
@coordinateSystem.setter
def coordinateSystem(self, coordinateSystem: dict):
"""Sets the coordinate systems of this Metadata.
:param dict coordinateSystem: to be set
"""
self._coordinateSystem = coordinateSystem
# created
@property
def created(self) -> str:
"""Gets the creation date of the data described by the Metadata.
It's the equivalent of the `created` original attribute (renamed to avoid conflicts with the _created` one).
Date format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The creation of this Metadata.
:rtype: str
"""
return self._creation
# distance
@property
def distance(self) -> str:
"""Gets the distance of this Metadata.
:return: The distance of this Metadata.
:rtype: str
"""
return self._distance
@distance.setter
def distance(self, distance: str):
"""Sets the of this Metadata.
:param str distance: to be set
"""
self._distance = distance
# editionProfile
@property
def editionProfile(self) -> str:
"""Gets the editionProfile of this Metadata.
:return: The editionProfile of this Metadata.
:rtype: str
"""
return self._editionProfile
@editionProfile.setter
def editionProfile(self, editionProfile: str):
"""Sets the of this Metadata.
:param str editionProfile: to be set
"""
self._editionProfile = editionProfile
# encoding
@property
def encoding(self) -> str:
"""Gets the encoding of this Metadata.
:return: The encoding of this Metadata.
:rtype: str
"""
return self._encoding
@encoding.setter
def encoding(self, encoding: str):
"""Sets the of this Metadata.
:param str encoding: to be set
"""
self._encoding = encoding
# envelope
@property
def envelope(self) -> str:
"""Gets the envelope of this Metadata.
:return: The envelope of this Metadata.
:rtype: str
"""
return self._envelope
@envelope.setter
def envelope(self, envelope: str):
"""Sets the of this Metadata.
:param str envelope: to be set
"""
self._envelope = envelope
# events
@property
def events(self) -> list:
"""Gets the events of this Metadata.
:return: The events of this Metadata.
:rtype: list
"""
return self._events
@events.setter
def events(self, events: list):
"""Sets the of this Metadata.
:param list events: to be set
"""
self._events = events
# featureAttributes
@property
def featureAttributes(self) -> list:
"""Gets the featureAttributes of this Metadata.
:return: The featureAttributes of this Metadata.
:rtype: list
"""
return self._featureAttributes
@featureAttributes.setter
def featureAttributes(self, featureAttributes: list):
"""Sets the of this Metadata.
:param list featureAttributes: to be set
"""
self._featureAttributes = featureAttributes
# features
@property
def features(self) -> int:
"""Gets the features of this Metadata.
:return: The features of this Metadata.
:rtype: int
"""
return self._features
@features.setter
def features(self, features: int):
"""Sets the of this Metadata.
:param int features: to be set
"""
self._features = features
# format
@property
def format(self) -> str:
"""Gets the format of this Metadata.
:return: The format of this Metadata.
:rtype: str
"""
return self._format
@format.setter
def format(self, format: str):
"""Sets the of this Metadata.
:param str format: to be set
"""
self._format = format
# formatVersion
@property
def formatVersion(self) -> str:
"""Gets the formatVersion of this Metadata.
:return: The formatVersion of this Metadata.
:rtype: str
"""
return self._formatVersion
@formatVersion.setter
def formatVersion(self, formatVersion: str):
"""Sets the of this Metadata.
:param str formatVersion: to be set
"""
self._formatVersion = formatVersion
# geometry
@property
def geometry(self) -> str:
"""Gets the geometry of this Metadata.
:return: The geometry of this Metadata.
:rtype: str
"""
return self._geometry
@geometry.setter
def geometry(self, geometry: str):
"""Sets the of this Metadata.
:param str geometry: to be set
"""
self._geometry = geometry
# keywords
@property
def keywords(self) -> str:
"""Gets the keywords of this Metadata.
:return: The keywords of this Metadata.
:rtype: str
"""
return self._keywords
@keywords.setter
def keywords(self, keywords: str):
"""Sets the of this Metadata.
:param str keywords: to be set
"""
self._keywords = keywords
# language
@property
def language(self) -> str:
"""Gets the language of this Metadata.
:return: The language of this Metadata.
:rtype: str
"""
return self._language
@language.setter
def language(self, language: str):
"""Sets the of this Metadata.
:param str language: to be set
"""
self._language = language
# layers
@property
def layers(self) -> list:
"""Gets the layers of this Metadata.
:return: The layers of this Metadata.
:rtype: list
"""
return self._layers
@layers.setter
def layers(self, layers: list):
"""Sets the of this Metadata.
:param list layers: to be set
"""
self._layers = layers
# limitations
@property
def limitations(self) -> str:
"""Gets the limitations of this Metadata.
:return: The limitations of this Metadata.
:rtype: str
"""
return self._limitations
@limitations.setter
def limitations(self, limitations: str):
"""Sets the of this Metadata.
:param str limitations: to be set
"""
self._limitations = limitations
# links
@property
def links(self) -> str:
"""Gets the links of this Metadata.
:return: The links of this Metadata.
:rtype: str
"""
return self._links
@links.setter
def links(self, links: str):
"""Sets the of this Metadata.
:param str links: to be set
"""
self._links = links
# modification
@property
def modified(self) -> str:
"""Gets the last modification date of the data described by this Metadata.
It's the equivalent of the `created` original attribute (renamed to avoid conflicts with the _created` one).
:return: The modification of this Metadata.
:rtype: str
"""
return self._modification
# name
@property
def name(self) -> str:
"""Gets the name of this Metadata.
:return: The name of this Metadata.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets technical name of the Metadata.
:param str name: technical name this Metadata.
"""
self._name = name
# operations
@property
def operations(self) -> list:
"""Gets the operations of this Metadata.
:return: The operations of this Metadata.
:rtype: list
"""
return self._operations
@operations.setter
def operations(self, operations: list):
"""Sets the of this Metadata.
:param list operations: to be set
"""
self._operations = operations
# path
@property
def path(self) -> str:
"""Gets the path of this Metadata.
:return: The path of this Metadata.
:rtype: str
"""
return self._path
@path.setter
def path(self, path: str):
"""Sets the of this Metadata.
:param str path: to be set
"""
self._path = path
# precision
@property
def precision(self) -> str:
"""Gets the precision of this Metadata.
:return: The precision of this Metadata.
:rtype: str
"""
return self._precision
@precision.setter
def precision(self, precision: str):
"""Sets the of this Metadata.
:param str precision: to be set
"""
self._precision = precision
# published
@property
def published(self) -> str:
"""Gets the published of this Metadata.
:return: The published of this Metadata.
:rtype: str
"""
return self._published
@published.setter
def published(self, published: str):
"""Sets the of this Metadata.
:param str published: to be set
"""
self._published = published
# scale
@property
def scale(self) -> str:
"""Gets the scale of this Metadata.
:return: The scale of this Metadata.
:rtype: str
"""
return self._scale
@scale.setter
def scale(self, scale: str):
"""Sets the of this Metadata.
:param str scale: to be set
"""
self._scale = scale
# series
@property
def series(self) -> str:
"""Gets the series of this Metadata.
:return: The series of this Metadata.
:rtype: str
"""
return self._series
@series.setter
def series(self, series: str):
"""Sets the of this Metadata.
:param str series: to be set
"""
self._series = series
# serviceLayers
@property
def serviceLayers(self) -> list:
"""Gets the serviceLayers of this Metadata.
:return: The serviceLayers of this Metadata.
:rtype: list
"""
return self._serviceLayers
@serviceLayers.setter
def serviceLayers(self, serviceLayers: list):
"""Sets the of this Metadata.
:param list serviceLayers: to be set
"""
self._serviceLayers = serviceLayers
# specifications
@property
def specifications(self) -> str:
"""Gets the specifications of this Metadata.
:return: The specifications of this Metadata.
:rtype: str
"""
return self._specifications
@specifications.setter
def specifications(self, specifications: str):
"""Sets the of this Metadata.
:param str specifications: to be set
"""
self._specifications = specifications
# tags
@property
def tags(self) -> str:
"""Gets the tags of this Metadata.
:return: The tags of this Metadata.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags: str):
"""Sets the of this Metadata.
:param str tags: to be set
"""
self._tags = tags
# title
@property
def title(self) -> str:
"""Gets the title of this Metadata.
:return: The title of this Metadata.
:rtype: str
"""
return self._title
@title.setter
def title(self, title: str):
"""Sets the of this Metadata.
:param str title: to be set
"""
self._title = title
# topologicalConsistency
@property
def topologicalConsistency(self) -> str:
"""Gets the topologicalConsistency of this Metadata.
:return: The topologicalConsistency of this Metadata.
:rtype: str
"""
return self._topologicalConsistency
@topologicalConsistency.setter
def topologicalConsistency(self, topologicalConsistency: str):
"""Sets the of this Metadata.
:param str topologicalConsistency: to be set
"""
self._topologicalConsistency = topologicalConsistency
# type
@property
def type(self) -> str:
"""Gets the type of this Metadata.
:return: The type of this Metadata.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this Metadata.
:param str type: The type of this Metadata.
"""
# check type value
if type not in MetadataTypes.__members__:
raise ValueError(
"Metadata type '{}' is not an accepted value. Must be one of: {}.".format(
type, " | ".join([e.name for e in MetadataTypes])
)
)
self._type = type
# updateFrequency
@property
def updateFrequency(self) -> str:
"""Gets the updateFrequency of this Metadata.
:return: The updateFrequency of this Metadata.
:rtype: str
"""
return self._updateFrequency
@updateFrequency.setter
def updateFrequency(self, updateFrequency: str):
"""Sets the of this Metadata.
:param str updateFrequency: to be set
"""
self._updateFrequency = updateFrequency
# validFrom
@property
def validFrom(self) -> str:
"""Gets the validFrom of this Metadata.
:return: The validFrom of this Metadata.
:rtype: str
"""
return self._validFrom
@validFrom.setter
def validFrom(self, validFrom: str):
"""Sets the of this Metadata.
:param str validFrom: to be set
"""
self._validFrom = validFrom
# validTo
@property
def validTo(self) -> str:
"""Gets the validTo of this Metadata.
:return: The validTo of this Metadata.
:rtype: str
"""
return self._validTo
@validTo.setter
def validTo(self, validTo: str):
"""Sets the of this Metadata.
:param str validTo: to be set
"""
self._validTo = validTo
# validityComment
@property
def validityComment(self) -> str:
"""Gets the validityComment of this Metadata.
:return: The validityComment of this Metadata.
:rtype: str
"""
return self._validityComment
@validityComment.setter
def validityComment(self, validityComment: str):
"""Sets the of this Metadata.
:param str validityComment: to be set
"""
self._validityComment = validityComment
# -- SPECIFIC TO IMPLEMENTATION ----------------------------------------------------
@property
def groupName(self) -> str:
"""Shortcut to get the name of the workgroup which owns the Metadata."""
if isinstance(self._creator, dict):
return self._creator.get("contact").get("name")
elif isinstance(self._creator, Workgroup):
return self._creator.contact.get("name")
else:
return None
@property
def groupId(self) -> str:
"""Shortcut to get the UUID of the workgroup which owns the Metadata."""
if isinstance(self._creator, dict):
return self._creator.get("_id")
elif isinstance(self._creator, Workgroup):
return self._creator._id
else:
return None
# -- METHODS -----------------------------------------------------------------------
def admin_url(self, url_base: str = "https://app.isogeo.com") -> str:
"""Returns the administration URL (https://app.isogeo.com) for this metadata.
:param str url_base: base URL of admin site. Defaults to: https://app.isogeo.com
:rtype: str
"""
if self._creator is None:
logger.warning("Creator is required to build admin URL")
return False
creator_id = self._creator.get("_id")
return "{}/groups/{}/resources/{}/".format(url_base, creator_id, self._id)
def title_or_name(self, slugged: bool = False) -> str:
"""Gets the title of this Metadata or the name if there is no title.
It can return a slugified value.
:param bool slugged: slugify title. Defaults to `False`.
:returns: the title or the name of this Metadata.
:rtype: str
"""
if self._title:
title_or_name = self._title
else:
title_or_name = self._name
# slugify
if slugged:
title_or_name = (
unicodedata.normalize("NFKD", title_or_name)
.encode("ascii", "ignore")
.decode("ascii")
)
title_or_name = _regex_slugify_strip.sub("", title_or_name).strip().lower()
title_or_name = _regex_slugify_hyphenate.sub("-", title_or_name)
return title_or_name
def to_dict(self) -> dict:
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.ATTR_TYPES.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(Metadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_dict_creation(self) -> dict:
"""Returns the model properties as a dict structured for creation purpose (POST)"""
result = {}
for attr, _ in self.ATTR_CREA.items():
# get attribute value
value = getattr(self, attr)
# switch attribute name for creation purpose
if attr in self.ATTR_MAP:
attr = self.ATTR_MAP.get(attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(Metadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self) -> str:
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other) -> bool:
"""Returns true if both objects are equal"""
if not isinstance(other, Metadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other) -> bool:
"""Returns true if both objects are not equal"""
return not self == other
# ##############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
""" standalone execution """
md = Metadata()
print(md)
| gpl-3.0 | 639,171,598,854,724,600 | 26.235252 | 116 | 0.522886 | false | 4.686888 | false | false | false |
lochiiconnectivity/exabgp | lib/exabgp/configuration/engine/tokeniser.py | 1 | 3330 | # encoding: utf-8
"""
tokeniser.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from exabgp.util import coroutine
from exabgp.configuration.engine.location import Location
from exabgp.configuration.engine.raised import Raised
# convert special caracters
@coroutine.join
def unescape (s):
start = 0
while start < len(s):
pos = s.find('\\', start)
if pos == -1:
yield s[start:]
break
yield s[start:pos]
pos += 1
esc = s[pos]
if esc == 'b':
yield '\b'
elif esc == 'f':
yield '\f'
elif esc == 'n':
yield '\n'
elif esc == 'r':
yield '\r'
elif esc == 't':
yield '\t'
elif esc == 'u':
yield chr(int(s[pos + 1:pos + 5], 16))
pos += 4
else:
yield esc
start = pos + 1
# A coroutine which return the producer token, or string if quoted from the stream
@coroutine.each
def tokens (stream):
spaces = [' ','\t','\r','\n']
strings = ['"', "'"]
syntax = [',','[',']','{','}']
comment = ['#',]
nb_lines = 0
for line in stream:
nb_lines += 1
nb_chars = 0
quoted = ''
word = ''
for char in line:
if char in comment:
if quoted:
word += char
nb_chars += 1
else:
if word:
yield nb_lines,nb_chars,line,char
word = ''
break
elif char in syntax:
if quoted:
word += char
else:
if word:
yield nb_lines,nb_chars-len(word),line,word
word = ''
yield nb_lines,nb_chars,line,char
nb_chars += 1
elif char in spaces:
if quoted:
word += char
elif word:
yield nb_lines,nb_chars-len(word),line,word
word = ''
nb_chars += 1
elif char in strings:
word += char
if quoted == char:
quoted = ''
yield nb_lines,nb_chars-len(word),line,word
word = ''
else:
quoted = char
nb_chars += 1
else:
word += char
nb_chars += 1
# ==================================================================== Tokeniser
# Return the producer token from the configuration
class Tokeniser (Location):
def __init__ (self,name,stream):
super(Tokeniser,self).__init__()
self.name = name # A unique name for this tokenier, so we can have multiple
self.tokeniser = tokens(stream) # A corouting giving us the producer toker
self._rewind = [] # Should we want to rewind, the list of to pop first
def __call__ (self):
if self._rewind:
return self._rewind.pop()
token = self.content(self.tokeniser)
return token
# XXX: FIXME: line and position only work if we only rewind one element
def rewind (self,token):
self._rewind.append(token)
def content (self,producer):
try:
while True:
self.idx_line,self.idx_column,self.line,token = producer()
if token == '[':
returned = []
for token in self.iterate_list(producer):
returned.append((self.idx_line,self.idx_column,self.line,token))
return returned
elif token[0] in ('"',"'"):
return unescape(token[1:-1])
else:
return token
except ValueError:
raise Raised(Location(self.idx_line,self.idx_column,self.line),'Could not parse %s' % str(token))
except StopIteration:
return None
def iterate_list (self,producer):
token = self.content(producer)
while token and token != ']':
yield token
token = self.content(producer)
| bsd-3-clause | -4,720,082,603,888,199,000 | 22.125 | 100 | 0.597297 | false | 3.094796 | false | false | false |
tmilicic/networkx | networkx/classes/function.py | 1 | 16409 | """Functional interface to graph methods and assorted utilities.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
import networkx as nx
from networkx.utils import not_implemented_for
import itertools
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
'number_of_nodes', 'number_of_edges', 'density',
'is_directed', 'info', 'freeze', 'is_frozen', 'subgraph',
'create_empty_copy', 'set_node_attributes',
'get_node_attributes', 'set_edge_attributes',
'get_edge_attributes', 'all_neighbors', 'non_neighbors',
'non_edges', 'common_neighbors', 'is_weighted',
'is_negatively_weighted', 'is_empty']
def nodes(G):
"""Return an iterator over the graph nodes."""
return G.nodes()
def edges(G,nbunch=None):
"""Return iterator over edges incident to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges(nbunch)
def degree(G,nbunch=None,weight=None):
"""Return degree of single node or of nbunch of nodes.
If nbunch is ommitted, then return degrees of *all* nodes.
"""
return G.degree(nbunch,weight)
def neighbors(G,n):
"""Return a list of nodes connected to node n. """
return G.neighbors(n)
def number_of_nodes(G):
"""Return the number of nodes in the graph."""
return G.number_of_nodes()
def number_of_edges(G):
"""Return the number of edges in the graph. """
return G.number_of_edges()
def density(G):
r"""Return the density of a graph.
The density for undirected graphs is
.. math::
d = \frac{2m}{n(n-1)},
and for directed graphs is
.. math::
d = \frac{m}{n(n-1)},
where `n` is the number of nodes and `m` is the number of edges in `G`.
Notes
-----
The density is 0 for a graph without edges and 1 for a complete graph.
The density of multigraphs can be higher than 1.
Self loops are counted in the total number of edges so graphs with self
loops can have density higher than 1.
"""
n=number_of_nodes(G)
m=number_of_edges(G)
if m==0 or n <= 1:
d=0.0
else:
if G.is_directed():
d=m/float(n*(n-1))
else:
d= m*2.0/float(n*(n-1))
return d
def degree_histogram(G):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Networkx graph
A graph
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
# We need to make degseq list because we call it twice.
degseq = list(d for n, d in G.degree())
dmax = max(degseq) + 1
freq = [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
return freq
def is_directed(G):
""" Return True if graph is directed."""
return G.is_directed()
def frozen(*args):
"""Dummy method for raising errors when trying to modify frozen graphs"""
raise nx.NetworkXError("Frozen graph can't be modified")
def freeze(G):
"""Modify graph to prevent further change by adding or removing
nodes or edges.
Node and edge data can still be modified.
Parameters
----------
G : graph
A NetworkX graph
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([0,1,2,3])
>>> G=nx.freeze(G)
>>> try:
... G.add_edge(4,5)
... except nx.NetworkXError as e:
... print(str(e))
Frozen graph can't be modified
Notes
-----
To "unfreeze" a graph you must make a copy by creating a new graph object:
>>> graph = nx.path_graph(4)
>>> frozen_graph = nx.freeze(graph)
>>> unfrozen_graph = nx.Graph(frozen_graph)
>>> nx.is_frozen(unfrozen_graph)
False
See Also
--------
is_frozen
"""
G.add_node=frozen
G.add_nodes_from=frozen
G.remove_node=frozen
G.remove_nodes_from=frozen
G.add_edge=frozen
G.add_edges_from=frozen
G.remove_edge=frozen
G.remove_edges_from=frozen
G.clear=frozen
G.frozen=True
return G
def is_frozen(G):
"""Return True if graph is frozen.
Parameters
----------
G : graph
A NetworkX graph
See Also
--------
freeze
"""
try:
return G.frozen
except AttributeError:
return False
def subgraph(G, nbunch):
"""Return the subgraph induced on nodes in nbunch.
Parameters
----------
G : graph
A NetworkX graph
nbunch : list, iterable
A container of nodes that will be iterated through once (thus
it should be an iterator or be iterable). Each element of the
container should be a valid node type: any hashable type except
None. If nbunch is None, return all edges data in the graph.
Nodes in nbunch that are not in the graph will be (quietly)
ignored.
Notes
-----
subgraph(G) calls G.subgraph()
"""
return G.subgraph(nbunch)
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H
def info(G, n=None):
"""Print short summary of information for the graph G or the node n.
Parameters
----------
G : Networkx graph
A graph
n : node (any hashable)
A node in the graph G
"""
info='' # append this all to a string
if n is None:
info+="Name: %s\n"%G.name
type_name = [type(G).__name__]
info+="Type: %s\n"%",".join(type_name)
info+="Number of nodes: %d\n"%G.number_of_nodes()
info+="Number of edges: %d\n"%G.number_of_edges()
nnodes=G.number_of_nodes()
if len(G) > 0:
if G.is_directed():
info+="Average in degree: %8.4f\n"%\
(sum(d for n, d in G.in_degree())/float(nnodes))
info+="Average out degree: %8.4f"%\
(sum(d for n, d in G.out_degree())/float(nnodes))
else:
s=sum(dict(G.degree()).values())
info+="Average degree: %8.4f"%\
(float(s)/float(nnodes))
else:
if n not in G:
raise nx.NetworkXError("node %s not in graph"%(n,))
info+="Node % s has the following properties:\n"%n
info+="Degree: %d\n"%G.degree(n)
info+="Neighbors: "
info+=' '.join(str(nbr) for nbr in G.neighbors(n))
return info
def set_node_attributes(G, name, values):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`.
Examples
--------
>>> G = nx.path_graph(3)
>>> bb = nx.betweenness_centrality(G)
>>> nx.set_node_attributes(G, 'betweenness', bb)
>>> G.node[1]['betweenness']
1.0
"""
try:
values.items
except AttributeError:
# Treat `value` as the attribute value for each node.
values = dict(zip(G.nodes(), [values] * len(G)))
for node, value in values.items():
G.node[node][name] = value
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([1,2,3],color='red')
>>> color=nx.get_node_attributes(G,'color')
>>> color[1]
'red'
"""
return dict( (n,d[name]) for n,d in G.node.items() if name in d)
def set_edge_attributes(G, name, values):
"""Set edge attributes from dictionary of edge tuples and values.
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
values : dict
Dictionary of attribute values keyed by edge (tuple). For multigraphs,
the keys tuples must be of the form (u, v, key). For non-multigraphs,
the keys must be tuples of the form (u, v). If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every edge in `G`.
Examples
--------
>>> G = nx.path_graph(3)
>>> bb = nx.edge_betweenness_centrality(G, normalized=False)
>>> nx.set_edge_attributes(G, 'betweenness', bb)
>>> G[1][2]['betweenness']
2.0
"""
try:
values.items
except AttributeError:
# Treat `value` as the attribute value for each edge.
if G.is_multigraph():
edges = list(G.edges(keys=True))
else:
edges = list(G.edges())
values = dict(zip(edges, [values] * len(list(edges))))
if G.is_multigraph():
for (u, v, key), value in values.items():
G[u][v][key][name] = value
else:
for (u, v), value in values.items():
G[u][v][name] = value
def get_edge_attributes(G, name):
"""Get edge attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by edge. For (di)graphs, the keys are
2-tuples of the form: (u,v). For multi(di)graphs, the keys are 3-tuples of
the form: (u, v, key).
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([1,2,3],color='red')
>>> color=nx.get_edge_attributes(G,'color')
>>> color[(1,2)]
'red'
"""
if G.is_multigraph():
edges = G.edges(keys=True, data=True)
else:
edges = G.edges(data=True)
return dict( (x[:-1], x[-1][name]) for x in edges if name in x[-1] )
def all_neighbors(graph, node):
""" Returns all of the neighbors of a node in the graph.
If the graph is directed returns predecessors as well as successors.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
neighbors : iterator
Iterator of neighbors
"""
if graph.is_directed():
values = itertools.chain.from_iterable([graph.predecessors(node),
graph.successors(node)])
else:
values = graph.neighbors(node)
return values
def non_neighbors(graph, node):
"""Returns the non-neighbors of the node in the graph.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
non_neighbors : iterator
Iterator of nodes in the graph that are not neighbors of the node.
"""
nbors = set(neighbors(graph, node)) | set([node])
return (nnode for nnode in graph if nnode not in nbors)
def non_edges(graph):
"""Returns the non-existent edges in the graph.
Parameters
----------
graph : NetworkX graph.
Graph to find non-existent edges.
Returns
-------
non_edges : iterator
Iterator of edges that are not in the graph.
"""
if graph.is_directed():
for u in graph.nodes():
for v in non_neighbors(graph, u):
yield (u, v)
else:
nodes = set(graph)
while nodes:
u = nodes.pop()
for v in nodes - set(graph[u]):
yield (u, v)
@not_implemented_for('directed')
def common_neighbors(G, u, v):
"""Return the common neighbors of two nodes in a graph.
Parameters
----------
G : graph
A NetworkX undirected graph.
u, v : nodes
Nodes in the graph.
Returns
-------
cnbors : iterator
Iterator of common neighbors of u and v in the graph.
Raises
------
NetworkXError
If u or v is not a node in the graph.
Examples
--------
>>> G = nx.complete_graph(5)
>>> sorted(nx.common_neighbors(G, 0, 1))
[2, 3, 4]
"""
if u not in G:
raise nx.NetworkXError('u is not in the graph.')
if v not in G:
raise nx.NetworkXError('v is not in the graph.')
# Return a generator explicitly instead of yielding so that the above
# checks are executed eagerly.
return (w for w in G[u] if w in G[v] and w not in (u, v))
def is_weighted(G, edge=None, weight='weight'):
"""Returns ``True`` if ``G`` has weighted edges.
Parameters
----------
G : graph
A NetworkX graph.
edge : tuple, optional
A 2-tuple specifying the only edge in ``G`` that will be tested. If
``None``, then every edge in ``G`` is tested.
weight: string, optional
The attribute name used to query for edge weights.
Returns
-------
bool
A boolean signifying if ``G``, or the specified edge, is weighted.
Raises
------
NetworkXError
If the specified edge does not exist.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.is_weighted(G)
False
>>> nx.is_weighted(G, (2, 3))
False
>>> G = nx.DiGraph()
>>> G.add_edge(1, 2, weight=1)
>>> nx.is_weighted(G)
True
"""
if edge is not None:
data = G.get_edge_data(*edge)
if data is None:
msg = 'Edge {!r} does not exist.'.format(edge)
raise nx.NetworkXError(msg)
return weight in data
if is_empty(G):
# Special handling required since: all([]) == True
return False
return all(weight in data for u, v, data in G.edges(data=True))
def is_negatively_weighted(G, edge=None, weight='weight'):
"""Returns ``True`` if ``G`` has negatively weighted edges.
Parameters
----------
G : graph
A NetworkX graph.
edge : tuple, optional
A 2-tuple specifying the only edge in ``G`` that will be tested. If
``None``, then every edge in ``G`` is tested.
weight: string, optional
The attribute name used to query for edge weights.
Returns
-------
bool
A boolean signifying if ``G``, or the specified edge, is negatively
weighted.
Raises
------
NetworkXError
If the specified edge does not exist.
Examples
--------
>>> G=nx.Graph()
>>> G.add_edges_from([(1, 3), (2, 4), (2, 6)])
>>> G.add_edge(1, 2, weight=4)
>>> nx.is_negatively_weighted(G, (1, 2))
False
>>> G[2][4]['weight'] = -2
>>> nx.is_negatively_weighted(G)
True
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -2)])
>>> nx.is_negatively_weighted(G)
True
"""
if edge is not None:
data = G.get_edge_data(*edge)
if data is None:
msg = 'Edge {!r} does not exist.'.format(edge)
raise nx.NetworkXError(msg)
return weight in data and data[weight] < 0
return any(weight in data and data[weight] < 0
for u, v, data in G.edges(data=True))
def is_empty(G):
"""Returns ``True`` if ``G`` has no edges.
Parameters
----------
G : graph
A NetworkX graph.
Returns
-------
bool
``True`` if ``G`` has no edges, and ``False`` otherwise.
Notes
-----
An empty graph can have nodes but not edges. The empty graph with zero
nodes is known as the null graph. This is an O(n) operation where n is the
number of nodes in the graph.
"""
return not any(G.adj.values())
| bsd-3-clause | -4,094,525,715,898,061,000 | 23.899848 | 82 | 0.566945 | false | 3.7049 | false | false | false |
ESS-LLP/erpnext-healthcare | erpnext/hr/doctype/salary_slip/salary_slip.py | 1 | 34819 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import datetime
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words, getdate
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
from frappe.utils.background_jobs import enqueue
from erpnext.hr.doctype.additional_salary.additional_salary import get_additional_salary_component
from erpnext.hr.utils import get_payroll_period
from erpnext.hr.doctype.employee_benefit_application.employee_benefit_application import get_benefit_component_amount
from erpnext.hr.doctype.employee_benefit_claim.employee_benefit_claim import get_benefit_claim_amount, get_last_payroll_period_benefits
class SalarySlip(TransactionBase):
def __init__(self, *args, **kwargs):
super(SalarySlip, self).__init__(*args, **kwargs)
self.series = 'Sal Slip/{0}/.#####'.format(self.employee)
self.whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round,
"date": datetime.date,
"getdate": getdate
}
def autoname(self):
self.name = make_autoname(self.series)
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
if key=="earnings" and struct_row.is_flexible_benefit == 1:
self.add_employee_flexi_benefits(struct_row)
additional_components = get_additional_salary_component(self.employee, self.start_date, self.end_date)
if additional_components:
for additional_component in additional_components:
additional_component = frappe._dict(additional_component)
amount = additional_component.amount
overwrite = additional_component.overwrite
key = "earnings"
if additional_component.type == "Deduction":
key = "deductions"
self.update_component_row(frappe._dict(additional_component.struct_row), amount, key, overwrite=overwrite)
self.get_last_payroll_period_benefit()
# Calculate variable_based_on_taxable_salary after all components updated in salary slip
for struct_row in self._salary_structure_doc.get("deductions"):
if struct_row.variable_based_on_taxable_salary == 1 and not struct_row.formula and not struct_row.amount:
tax_detail = self.calculate_variable_based_on_taxable_salary(struct_row.salary_component)
if tax_detail and tax_detail[1]:
self.update_component_row(frappe._dict(tax_detail[0]), tax_detail[1], "deductions", tax_detail[2], tax_detail[3])
def get_last_payroll_period_benefit(self):
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
if payroll_period:
# Check for last payroll period
if (getdate(payroll_period.end_date) <= getdate(self.end_date)):
current_flexi_amount = 0
for d in self.get("earnings"):
if d.is_flexible_benefit == 1:
current_flexi_amount += d.amount
last_benefits = get_last_payroll_period_benefits(self.employee, self.start_date, self.end_date,\
current_flexi_amount, payroll_period, self._salary_structure_doc)
if last_benefits:
for last_benefit in last_benefits:
last_benefit = frappe._dict(last_benefit)
amount = last_benefit.amount
self.update_component_row(frappe._dict(last_benefit.struct_row), amount, "earnings")
def add_employee_flexi_benefits(self, struct_row):
if frappe.db.get_value("Salary Component", struct_row.salary_component, "pay_against_benefit_claim") != 1:
benefit_component_amount = get_benefit_component_amount(self.employee, self.start_date, self.end_date, \
struct_row, self._salary_structure_doc, self.total_working_days, self.payroll_frequency)
if benefit_component_amount:
self.update_component_row(struct_row, benefit_component_amount, "earnings")
else:
benefit_claim_amount = get_benefit_claim_amount(self.employee, self.start_date, self.end_date, struct_row.salary_component)
if benefit_claim_amount:
self.update_component_row(struct_row, benefit_claim_amount, "earnings")
def update_component_row(self, struct_row, amount, key, benefit_tax=None, additional_tax=None, overwrite=1):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component,
'abbr' : struct_row.abbr,
'do_not_include_in_total' : struct_row.do_not_include_in_total,
'is_tax_applicable': struct_row.is_tax_applicable,
'is_flexible_benefit': struct_row.is_flexible_benefit,
'variable_based_on_taxable_salary': struct_row.variable_based_on_taxable_salary,
'is_additional_component': struct_row.is_additional_component,
'tax_on_flexible_benefit': benefit_tax,
'tax_on_additional_salary': additional_tax
})
else:
if overwrite:
component_row.default_amount = amount
component_row.amount = amount
else:
component_row.default_amount += amount
component_row.amount = component_row.default_amount
component_row.tax_on_flexible_benefit = benefit_tax
component_row.tax_on_additional_salary = additional_tax
def eval_condition_and_formula(self, d, data):
try:
condition = d.condition.strip() if d.condition else None
if condition:
if not frappe.safe_eval(condition, self.whitelisted_globals, data):
return None
amount = d.amount
if d.amount_based_on_formula:
formula = d.formula.strip() if d.formula else None
if formula:
amount = frappe.safe_eval(formula, self.whitelisted_globals, data)
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Assignment",
{"employee": self.employee, "salary_structure": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def get_date_details(self):
if not self.end_date:
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def check_sal_struct(self, joining_date, relieving_date):
cond = """and sa.employee=%(employee)s and (sa.from_date <= %(start_date)s or
sa.from_date <= %(end_date)s or sa.from_date <= %(joining_date)s)"""
if self.payroll_frequency:
cond += """and ss.payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""
select sa.salary_structure
from `tabSalary Structure Assignment` sa join `tabSalary Structure` ss
where sa.salary_structure=ss.name
and sa.docstatus = 1 and ss.docstatus = 1 and ss.is_active ='Yes' %s
order by sa.from_date desc
limit 1
""" %cond, {'employee': self.employee, 'start_date': self.start_date,
'end_date': self.end_date, 'joining_date': joining_date})
if st_name:
self.salary_structure = st_name[0][0]
return self.salary_structure
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours
}
doc.append('earnings', wages_row)
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
actual_lwp = self.calculate_lwp(holidays, working_days)
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field, precision):
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for d in self.get(component_type):
if (self.salary_structure and
cint(d.depends_on_lwp) and
(not
self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
d.amount = rounded(
(flt(d.default_amount, precision) * flt(self.payment_days)
/ cint(self.total_working_days)), self.precision("amount", component_type)
)
elif not self.payment_days and not self.salary_slip_based_on_timesheet and \
cint(d.depends_on_lwp):
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
if not d.do_not_include_in_total:
self.set(total_field, self.get(total_field) + flt(d.amount, precision))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
precision = frappe.defaults.get_global_default("currency_precision")
self.total_deduction = 0
self.gross_pay = 0
self.sum_components('earnings', 'gross_pay', precision)
self.sum_components('deductions', 'total_deduction', precision)
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
if self.net_pay < 0:
frappe.throw(_("Net Pay cannnot be negative"))
def set_loan_repayment(self):
self.set('loans', [])
self.total_loan_repayment = 0
self.total_interest_amount = 0
self.total_principal_amount = 0
for loan in self.get_loan_details():
self.append('loans', {
'loan': loan.name,
'total_payment': loan.total_payment,
'interest_amount': loan.interest_amount,
'principal_amount': loan.principal_amount,
'loan_account': loan.loan_account,
'interest_income_account': loan.interest_income_account
})
self.total_loan_repayment += loan.total_payment
self.total_interest_amount += loan.interest_amount
self.total_principal_amount += loan.principal_amount
def get_loan_details(self):
return frappe.db.sql("""select rps.principal_amount, rps.interest_amount, l.name,
rps.total_payment, l.loan_account, l.interest_income_account
from
`tabRepayment Schedule` as rps, `tabLoan` as l
where
l.name = rps.parent and rps.payment_date between %s and %s and
l.repay_from_salary = 1 and l.docstatus = 1 and l.applicant = %s""",
(self.start_date, self.end_date, self.employee), as_dict=True) or []
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
self.update_salary_slip_in_additional_salary()
if (frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")) and not frappe.flags.via_payroll_entry:
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
self.update_salary_slip_in_additional_salary()
def on_trash(self):
from frappe.model.naming import revert_series_if_last
revert_series_if_last(self.series, self.name)
def update_salary_slip_in_additional_salary(self):
salary_slip = self.name if self.docstatus==1 else None
frappe.db.sql("""
update `tabAdditional Salary` set salary_slip=%s
where employee=%s and payroll_date between %s and %s and docstatus=1
""", (salary_slip, self.employee, self.start_date, self.end_date))
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
email_args = {
"recipients": [receiver],
"message": _("Please see attachment"),
"subject": 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date),
"attachments": [frappe.attach_print(self.doctype, self.name, file_name=self.name)],
"reference_doctype": self.doctype,
"reference_name": self.name
}
if not frappe.flags.in_test:
enqueue(method=frappe.sendmail, queue='short', timeout=300, is_async=True, **email_args)
else:
frappe.sendmail(**email_args)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def calculate_variable_based_on_taxable_salary(self, tax_component):
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
if not payroll_period:
frappe.msgprint(_("Start and end dates not in a valid Payroll Period, cannot calculate {0}.")
.format(tax_component))
return False
if payroll_period.end_date <= getdate(self.end_date):
if not self.deduct_tax_for_unsubmitted_tax_exemption_proof or not\
self.deduct_tax_for_unclaimed_employee_benefits:
frappe.throw(_("You have to Deduct Tax for Unsubmitted Tax Exemption Proof and Unclaimed \
Employee Benefits in the last Salary Slip of Payroll Period"))
# calc prorata tax to be applied
return self.calculate_variable_tax(tax_component, payroll_period)
def calculate_variable_tax(self, tax_component, payroll_period):
annual_taxable_earning, period_factor = 0, 0
pro_rata_tax_paid, additional_tax_paid, benefit_tax_paid = 0, 0, 0
unclaimed_earning, unclaimed_benefit, additional_income = 0, 0, 0
# get taxable_earning, additional_income in this slip
taxable_earning = self.get_taxable_earnings()
if self.deduct_tax_for_unclaimed_employee_benefits:
# get all untaxed benefits till date, pass amount to be taxed by later methods
unclaimed_benefit = self.calculate_unclaimed_taxable_benefit(payroll_period)
# flexi's excluded from monthly tax, add flexis in this slip to unclaimed_benefit
unclaimed_benefit += self.get_taxable_earnings(only_flexi=True)["taxable_earning"]
if self.deduct_tax_for_unsubmitted_tax_exemption_proof:
# do not consider exemption, calc tax to be paid for the period till date
# considering prorata taxes paid and proofs submitted
unclaimed_earning = self.calculate_unclaimed_taxable_earning(payroll_period, tax_component)
earning_in_period = taxable_earning["taxable_earning"] + unclaimed_earning
period_factor = self.get_period_factor(payroll_period.start_date, payroll_period.end_date,
payroll_period.start_date, self.end_date)
annual_taxable_earning = earning_in_period * period_factor
additional_income += self.get_total_additional_income(payroll_period.start_date)
else:
# consider exemption declaration, find annual_earning by monthly taxable salary
period_factor = self.get_period_factor(payroll_period.start_date, payroll_period.end_date)
annual_earning = taxable_earning["taxable_earning"] * period_factor
exemption_amount = 0
if frappe.db.exists("Employee Tax Exemption Declaration", {"employee": self.employee,
"payroll_period": payroll_period.name, "docstatus": 1}):
exemption_amount = frappe.db.get_value("Employee Tax Exemption Declaration",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
"total_exemption_amount")
annual_taxable_earning = annual_earning - exemption_amount
if self.deduct_tax_for_unclaimed_employee_benefits or self.deduct_tax_for_unsubmitted_tax_exemption_proof:
tax_detail = self.get_tax_paid_in_period(payroll_period, tax_component)
if tax_detail:
pro_rata_tax_paid = tax_detail["total_tax_paid"] - tax_detail["additional_tax"] - tax_detail["benefit_tax"]
additional_tax_paid = tax_detail["additional_tax"]
benefit_tax_paid = tax_detail["benefit_tax"]
# add any additional income in this slip
additional_income += taxable_earning["additional_income"]
args = {"payroll_period": payroll_period.name, "tax_component": tax_component,
"annual_taxable_earning": annual_taxable_earning, "period_factor": period_factor,
"unclaimed_benefit": unclaimed_benefit, "additional_income": additional_income,
"pro_rata_tax_paid": pro_rata_tax_paid, "benefit_tax_paid": benefit_tax_paid,
"additional_tax_paid": additional_tax_paid}
return self.calculate_tax(args)
def calculate_unclaimed_taxable_benefit(self, payroll_period):
total_benefit, total_benefit_claim = 0, 0
# get total sum of benefits paid
sum_benefit = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_flexible_benefit=1 and ss.docstatus=1
and ss.employee='{0}' and ss.start_date between '{1}' and '{2}' and
ss.end_date between '{1}' and '{2}'""".format(self.employee,
payroll_period.start_date, self.start_date))
if sum_benefit and sum_benefit[0][0]:
total_benefit = sum_benefit[0][0]
# get total benefits claimed
sum_benefit_claim = frappe.db.sql("""select sum(claimed_amount) from
`tabEmployee Benefit Claim` where docstatus=1 and employee='{0}' and claim_date
between '{1}' and '{2}'""".format(self.employee, payroll_period.start_date, self.end_date))
if sum_benefit_claim and sum_benefit_claim[0][0]:
total_benefit_claim = sum_benefit_claim[0][0]
return total_benefit - total_benefit_claim
def calculate_unclaimed_taxable_earning(self, payroll_period, tax_component):
total_taxable_earning, total_exemption_amount = 0, 0
# calc total taxable amount in period
sum_taxable_earning = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_additional_component=0 and is_flexible_benefit=0
and ss.docstatus=1 and ss.employee='{0}' and ss.start_date between '{1}' and '{2}'
and ss.end_date between '{1}' and '{2}'""".format(self.employee,
payroll_period.start_date, self.start_date))
if sum_taxable_earning and sum_taxable_earning[0][0]:
total_taxable_earning = sum_taxable_earning[0][0]
# add up total Proof Submission
sum_exemption = frappe.db.sql("""select sum(exemption_amount) from
`tabEmployee Tax Exemption Proof Submission` where docstatus=1 and employee='{0}' and
payroll_period='{1}' and submission_date between '{2}' and '{3}'""".format(self.employee,
payroll_period.name, payroll_period.start_date, self.end_date))
if sum_exemption and sum_exemption[0][0]:
total_exemption_amount = sum_exemption[0][0]
total_taxable_earning -= total_exemption_amount
return total_taxable_earning
def get_total_additional_income(self, from_date):
total_additional_pay = 0
sum_additional_earning = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_additional_component=1 and is_flexible_benefit=0
and ss.docstatus=1 and ss.employee='{0}' and ss.start_date between '{1}' and '{2}'
and ss.end_date between '{1}' and '{2}'""".format(self.employee,
from_date, self.start_date))
if sum_additional_earning and sum_additional_earning[0][0]:
total_additional_pay = sum_additional_earning[0][0]
return total_additional_pay
def get_tax_paid_in_period(self, payroll_period, tax_component, only_total=False):
# find total_tax_paid, tax paid for benefit, additional_salary
sum_tax_paid = frappe.db.sql("""select sum(sd.amount), sum(tax_on_flexible_benefit),
sum(tax_on_additional_salary) from `tabSalary Detail` sd join `tabSalary Slip`
ss on sd.parent=ss.name where sd.parentfield='deductions' and sd.salary_component='{3}'
and sd.variable_based_on_taxable_salary=1 and ss.docstatus=1 and ss.employee='{0}'
and ss.start_date between '{1}' and '{2}' and ss.end_date between '{1}' and
'{2}'""".format(self.employee, payroll_period.start_date, self.start_date, tax_component))
if sum_tax_paid and sum_tax_paid[0][0]:
return {'total_tax_paid': sum_tax_paid[0][0], 'benefit_tax':sum_tax_paid[0][1], 'additional_tax': sum_tax_paid[0][2]}
def get_taxable_earnings(self, include_flexi=0, only_flexi=0):
taxable_earning = 0
additional_income = 0
for earning in self.earnings:
if earning.is_tax_applicable:
if earning.is_additional_component:
additional_income += earning.amount
continue
if only_flexi:
if earning.is_tax_applicable and earning.is_flexible_benefit:
taxable_earning += earning.amount
continue
if include_flexi:
if earning.is_tax_applicable or (earning.is_tax_applicable and earning.is_flexible_benefit):
taxable_earning += earning.amount
else:
if earning.is_tax_applicable and not earning.is_flexible_benefit:
taxable_earning += earning.amount
return {"taxable_earning": taxable_earning, "additional_income": additional_income}
def calculate_tax(self, args):
tax_amount, benefit_tax, additional_tax = 0, 0, 0
annual_taxable_earning = args.get("annual_taxable_earning")
benefit_to_tax = args.get("unclaimed_benefit")
additional_income = args.get("additional_income")
# Get tax calc by period
annual_tax = self.calculate_tax_by_tax_slab(args.get("payroll_period"), annual_taxable_earning)
# Calc prorata tax
tax_amount = annual_tax / args.get("period_factor")
# Benefit is a part of Salary Structure, add the tax diff, update annual_tax
if benefit_to_tax > 0:
annual_taxable_earning += benefit_to_tax
annual_tax_with_benefit_income = self.calculate_tax_by_tax_slab(
args.get("payroll_period"), annual_taxable_earning)
benefit_tax = annual_tax_with_benefit_income - annual_tax - args.get("benefit_tax_paid")
tax_amount += benefit_tax
annual_tax = annual_tax_with_benefit_income
# find the annual tax diff caused by additional_income, add to tax_amount
if additional_income > 0:
annual_tax_with_additional_income = self.calculate_tax_by_tax_slab(
args.get("payroll_period"), annual_taxable_earning + additional_income)
additional_tax = annual_tax_with_additional_income - annual_tax - args.get("additional_tax_paid")
tax_amount += additional_tax
# less paid taxes
if args.get("pro_rata_tax_paid"):
tax_amount -= args.get("pro_rata_tax_paid")
struct_row = self.get_salary_slip_row(args.get("tax_component"))
return [struct_row, tax_amount, benefit_tax, additional_tax]
def calculate_tax_by_tax_slab(self, payroll_period, annual_earning):
payroll_period_obj = frappe.get_doc("Payroll Period", payroll_period)
data = self.get_data_for_eval()
taxable_amount = 0
for slab in payroll_period_obj.taxable_salary_slabs:
if slab.condition and not self.eval_tax_slab_condition(slab.condition, data):
continue
if not slab.to_amount and annual_earning > slab.from_amount:
taxable_amount += (annual_earning - slab.from_amount) * slab.percent_deduction *.01
continue
if annual_earning > slab.from_amount and annual_earning < slab.to_amount:
taxable_amount += (annual_earning - slab.from_amount) * slab.percent_deduction *.01
elif annual_earning > slab.from_amount and annual_earning > slab.to_amount:
taxable_amount += (slab.to_amount - slab.from_amount) * slab.percent_deduction * .01
return taxable_amount
def eval_tax_slab_condition(self, condition, data):
try:
condition = condition.strip()
if condition:
return frappe.safe_eval(condition, self.whitelisted_globals, data)
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_period_factor(self, period_start, period_end, start_date=None, end_date=None):
# TODO if both deduct checked update the factor to make tax consistent
payroll_days = date_diff(period_end, period_start) + 1
if start_date and end_date:
salary_days = date_diff(end_date, start_date) + 1
return flt(payroll_days)/flt(salary_days)
# if period configured for a year and monthly frequency return 12 to make tax calc consistent
if 360 <= payroll_days <= 370 and self.payroll_frequency == "Monthly":
return 12
salary_days = date_diff(self.end_date, self.start_date) + 1
return flt(payroll_days)/flt(salary_days)
def get_salary_slip_row(self, salary_component):
component = frappe.get_doc("Salary Component", salary_component)
# Data for update_component_row
struct_row = {}
struct_row['depends_on_lwp'] = component.depends_on_lwp
struct_row['salary_component'] = component.name
struct_row['abbr'] = component.salary_component_abbr
struct_row['do_not_include_in_total'] = component.do_not_include_in_total
struct_row['is_tax_applicable'] = component.is_tax_applicable
struct_row['is_flexible_benefit'] = component.is_flexible_benefit
struct_row['variable_based_on_taxable_salary'] = component.variable_based_on_taxable_salary
return struct_row
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
| gpl-3.0 | 4,243,228,669,414,350,300 | 42.415212 | 176 | 0.711881 | false | 2.979293 | false | false | false |
SISC2014/JobAnalysis | MongoRetrieval/src/EfficiencyHistogram.py | 1 | 6076 | '''
Created on Jun 19, 2014
@author: Erik Halperin
List of Keys
_id
JobStartDate
Requirements
TransferInput
TotalSuspensions
LastJobStatus
BufferBlockSize
OrigMaxHosts
RequestMemory
WantRemoteSyscalls
LastHoldReasonCode
ExitStatus
Args
JobFinishedHookDone
JobCurrentStartDate
CompletionDate
JobLeaseDuration
Err
RemoteWallClockTime
JobUniverse
RequestCpus
RemoveReason
StreamErr
Rank
WantRemoteIO
LocalSysCpu
UsedOCWrapper
CumulativeSlotTime
TransferIn
MachineAttrCpus0
CondorPlatform
CurrentTime
ExitReason
StreamOut
WantCheckpoint
GlobalJobId
TransferInputSizeMB
JobStatus
LastPublicClaimId
MemoryUsage
NumSystemHolds
TransferOutput
PeriodicRemove
NumShadowStarts
LastHoldReasonSubCode
LastSuspensionTime
ShouldTransferFiles
QDate
RemoteSysCpu
ImageSize_RAW
LastRemoteHost
CondorVersion
DiskUsage_RAW
PeriodicRelease
NumCkpts_RAW
JobCurrentStartExecutingDate
ProjectName
CoreSize
RemoteUserCpu
BytesSent
Owner
BytesRecvd
ExitCode
NumJobStarts
ExecutableSize_RAW
Notification
ExecutableSize
Environment
StartdPrincipal
RootDir
MinHosts
CumulativeSuspensionTime
JOBGLIDEIN_ResourceName
ProcId
MATCH_EXP_JOBGLIDEIN_ResourceName
OnExitRemove
User
UserLog
CommittedSuspensionTime
NumRestarts
JobCoreDumped
Cmd
NumJobMatches
DiskUsage
LastRemotePool
CommittedSlotTime
ResidentSetSize
WhenToTransferOutput
ExitBySignal
Out
RequestDisk
ImageSize
NumCkpts
LastJobLeaseRenewal
MachineAttrSlotWeight0
ResidentSetSize_RAW
JobPrio
JobRunCount
PeriodicHold
ClusterId
NiceUser
MyType
LocalUserCpu
BufferSize
LastHoldReason
CurrentHosts
LeaveJobInQueue
OnExitHold
EnteredCurrentStatus
MaxHosts
CommittedTime
LastMatchTime
In
JobNotification
'''
import re
import matplotlib.pyplot as plt
from pymongo import MongoClient
#takes a list of dictionaries and returns a list of floats
def parseList(l):
l = map(str, l)
newlist = []
for k in l:
newlist.append(re.sub('[RemoteWallClockTimeUsrpu_id\"\'{}: ]', '', k))
newlist = map(float, newlist)
return list(newlist)
#returns a list of dictionaries
#item is from list of keys, username: "[email protected]", cluster: "123456", site: "phys.ucconn.edu",
#coll: MongoDB collection
#username/cluster/site may be None, in which case they will not be used
#item should be _id
def dbFindItemFromUser(item, username, cluster, site, coll):
mylist = []
rgx = "$regex"
if(username != None):
username = '\"' + username + '\"'
dicU = {'User': username }
else:
dicU = {}
if(cluster != None):
dicC = { 'ClusterId': cluster }
else:
dicC = {}
if(site != None):
dicS = { 'LastRemoteHost': { rgx: site } }
else:
dicS = {}
dicU.update(dicC)
dicU.update(dicS)
pr = { item: 1, '_id': 0 }
for condor_history in coll.find(dicU, pr):
mylist.append(condor_history)
return mylist
#returns a list of dictionaries
#username and coll are same as above
def dbFindIdFromUser(username, coll):
mylist = []
username = '\"' + username + '\"'
cr = { 'User': username }
pr = { '_id': 1 }
for condor_history in coll.find(cr, pr):
mylist.append(condor_history)
return mylist
#creates a scatterplot of two items
def plotScatter(item1, item2, username, cluster, coll, xlab, ylab, title):
lst1 = parseList(dbFindItemFromUser(item1, username, cluster, coll))
lst2 = parseList(dbFindItemFromUser(item2, username, cluster, coll))
plt.plot(lst1, lst2, 'bo')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.show()
#creates a histogram of a list
#l: list to plot, bs: number of bins
def plotHist(l, bs, xlab, ylab, title):
plt.hist(l, bins=bs)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def getEfficiency(username, cluster, site, coll):
ruc = parseList(dbFindItemFromUser("RemoteUserCpu", username, cluster, site, coll))
rwct = parseList(dbFindItemFromUser("RemoteWallClockTime", username, cluster, site, coll))
efflist = []
totcount = 0
goodcount = 0 #certain efficiency values are >1 due to a condor error. these values are discarded
zerocount = 0 #testing possible condor bug where RemoteUserCpu is 0 but RemoteWallClockTime is quite large
for x,y in zip(ruc, rwct):
if(y == 0):
totcount += 1
elif(x/y > 1):
totcount += 1
else:
if(x == 0):
zerocount +=1
efflist.append(x/y)
totcount += 1
goodcount +=1
return [efflist, goodcount, totcount]
#Given at least one input for username/cluster/site, creates a histogram of the RemoteUserCpu/RemoteWallClockTime for the results
def efficiencyHistogram(username, cluster, site, coll, bins, xlab, ylab, title):
retlist = getEfficiency(username, cluster, site, coll) #0: efflist, 1: goodcount, 2: totcount
print("Jobs Plotted:", retlist[1], "/", retlist[2])
plotHist(retlist[0], bins, xlab, ylab, title)
def fourEffHists(lst1, lst2, lst3, lst4, lab1, lab2, lab3, lab4, bs, xlab, ylab, title):
plt.hist(lst1, bins=bs, histtype='stepfilled', label=lab1)
plt.hist(lst2, bins=bs, histtype='stepfilled', label=lab2)
plt.hist(lst3, bins=bs, histtype='stepfilled', label=lab3)
plt.hist(lst4, bins=bs, histtype='stepfilled', label=lab4)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend()
plt.show()
def mainEH(host, port):
client = MongoClient(host, port)
db = client.condor_history
coll = db.history_records
#sites: uc.mwt2.org, phys.uconn.edu, hpc.smu.edu, usatlas.bnl.gov
#names (@login01.osgconnect.net): lfzhao, sthapa, echism, wcatino, bamitchell
str_name = "[email protected]"
efficiencyHistogram(str_name, None, None, coll, 75, "UserCPU/WallClockTime", "Frequency", "Efficiencies for " + str_name)
mainEH('mc.mwt2.org', 27017) | mit | 4,049,639,096,715,589,000 | 22.46332 | 129 | 0.697334 | false | 3.295011 | false | false | false |
openprocurement/openprocurement.edge | openprocurement/edge/views/auctions.py | 1 | 7066 | # -*- coding: utf-8 -*-
from functools import partial
from openprocurement.edge.utils import (
context_unpack,
decrypt,
encrypt,
APIResource,
json_view
)
from openprocurement.edge.utils import eaopresource
from openprocurement.edge.design import (
by_dateModified_view_ViewDefinition,
real_by_dateModified_view_ViewDefinition,
test_by_dateModified_view_ViewDefinition,
by_local_seq_view_ViewDefinition,
real_by_local_seq_view_ViewDefinition,
test_by_local_seq_view_ViewDefinition,
)
from openprocurement.edge.design import AUCTION_FIELDS as FIELDS
VIEW_MAP = {
u'': real_by_dateModified_view_ViewDefinition('auctions'),
u'test': test_by_dateModified_view_ViewDefinition('auctions'),
u'_all_': by_dateModified_view_ViewDefinition('auctions'),
}
CHANGES_VIEW_MAP = {
u'': real_by_local_seq_view_ViewDefinition('auctions'),
u'test': test_by_local_seq_view_ViewDefinition('auctions'),
u'_all_': by_local_seq_view_ViewDefinition('auctions'),
}
FEED = {
u'dateModified': VIEW_MAP,
u'changes': CHANGES_VIEW_MAP,
}
@eaopresource(name='Auctions',
path='/auctions',
description="Open Contracting compatible data exchange format. See http://ocds.open-contracting.org/standard/r/master/#auction for more info")
class AuctionsResource(APIResource):
def __init__(self, request, context):
super(AuctionsResource, self).__init__(request, context)
self.server = request.registry.couchdb_server
self.update_after = request.registry.update_after
@json_view()
def get(self):
"""Auctions List
Get Auctions List
----------------
Example request to get auctions list:
.. sourcecode:: http
GET /auctions HTTP/1.1
Host: example.com
Accept: application/json
This is what one should expect in response:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"data": [
{
"id": "64e93250be76435397e8c992ed4214d1",
"dateModified": "2014-10-27T08:06:58.158Z"
}
]
}
"""
# http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options
params = {}
pparams = {}
fields = self.request.params.get('opt_fields', '')
if fields:
params['opt_fields'] = fields
pparams['opt_fields'] = fields
fields = fields.split(',')
view_fields = fields + ['dateModified', 'id']
limit = self.request.params.get('limit', '')
if limit:
params['limit'] = limit
pparams['limit'] = limit
limit = int(limit) if limit.isdigit() and 1000 >= int(limit) > 0 else 100
descending = bool(self.request.params.get('descending'))
offset = self.request.params.get('offset', '')
if descending:
params['descending'] = 1
else:
pparams['descending'] = 1
feed = self.request.params.get('feed', '')
view_map = FEED.get(feed, VIEW_MAP)
changes = view_map is CHANGES_VIEW_MAP
if feed and feed in FEED:
params['feed'] = feed
pparams['feed'] = feed
mode = self.request.params.get('mode', '')
if mode and mode in view_map:
params['mode'] = mode
pparams['mode'] = mode
view_limit = limit + 1 if offset else limit
if changes:
if offset:
view_offset = decrypt(self.server.uuid, self.db.name, offset)
if view_offset and view_offset.isdigit():
view_offset = int(view_offset)
else:
self.request.errors.add('params', 'offset', 'Offset expired/invalid')
self.request.errors.status = 404
return
if not offset:
view_offset = 'now' if descending else 0
else:
if offset:
view_offset = offset
else:
view_offset = '9' if descending else ''
list_view = view_map.get(mode, view_map[u''])
if self.update_after:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending, stale='update_after')
else:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending)
if fields:
if not changes and set(fields).issubset(set(FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id), ('dateModified', x.key)] if i in view_fields]), x.key)
for x in view()
]
elif changes and set(fields).issubset(set(FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id)] if i in view_fields]), x.key)
for x in view()
]
elif fields:
self.LOGGER.info('Used custom fields for auctions list: {}'.format(','.join(sorted(fields))),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_list_custom'}))
results = [
(dict([(k, j) for k, j in i[u'doc'].items() if k in view_fields]), i.key)
for i in view(include_docs=True)
]
else:
results = [
({'id': i.id, 'dateModified': i.value['dateModified']} if changes else {'id': i.id, 'dateModified': i.key}, i.key)
for i in view()
]
if results:
params['offset'], pparams['offset'] = results[-1][1], results[0][1]
if offset and view_offset == results[0][1]:
results = results[1:]
elif offset and view_offset != results[0][1]:
results = results[:limit]
params['offset'], pparams['offset'] = results[-1][1], view_offset
results = [i[0] for i in results]
if changes:
params['offset'] = encrypt(self.server.uuid, self.db.name, params['offset'])
pparams['offset'] = encrypt(self.server.uuid, self.db.name, pparams['offset'])
else:
params['offset'] = offset
pparams['offset'] = offset
data = {
'data': results,
'next_page': {
"offset": params['offset'],
"path": self.request.route_path('Auctions', _query=params),
"uri": self.request.route_url('Auctions', _query=params)
}
}
if descending or offset:
data['prev_page'] = {
"offset": pparams['offset'],
"path": self.request.route_path('Auctions', _query=pparams),
"uri": self.request.route_url('Auctions', _query=pparams)
}
return data
| apache-2.0 | -6,018,885,631,503,351,000 | 37.612022 | 154 | 0.538353 | false | 3.994347 | true | false | false |
IhToN/DAW1-PRG | Ejercicios/SeguTrim/Objetos/Punto.py | 1 | 4377 | """
Clase Punto
coord x
coord y
suma(punto)
resta(punto)
Clase Traza
instancias Punto en una Lista
añadir punto
comparar dos trazas (dos trazas serán iguales si sus puntos son iguales)
"""
import math
import turtle
class Punto:
def __init__(self, x=0.0, y=0.0):
self.x = float(x)
self.y = float(y)
def __str__(self):
return "Punto({}, {})".format(self.x, self.y)
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def suma(self, punto):
""" Devuelve la suma vectorial del punto con otro
"""
return Punto(self.x + punto.x, self.y + punto.y)
def resta(self, punto):
""" Devuelve la resta vectorial del punto con otro
"""
return self.suma(-punto)
def distancia(self, punto):
""" Devuelve la distancia que hay entre un punto y otro
"""
return math.hypot(self.x - punto.x, self.y - punto.y)
class Traza:
def __init__(self, *args):
self.trazado = []
self.i = -1
for arg in args:
if isinstance(arg, Punto):
self.trazado.append(arg)
else:
raise ValueError(arg, "no es un punto.")
def __str__(self):
out = ""
for punto in self.trazado:
out += str(punto) + " "
return out
def __eq__(self, other):
return self.trazado == other.trazado
def __next__(self):
self.i += 1
if self.i < len(self.trazado):
return self.trazado[self.i]
else:
raise StopIteration
def __iter__(self):
return self
def add_punto(self, punto):
""" Añade un punto nuevo a la Traza
"""
if isinstance(punto, Punto):
self.trazado.append(punto)
else:
raise ValueError("¡Ioputa, que en las trazas sólo puede haber puntos y no cosas raras!")
def longitud_traza(self):
""" Devuelve la suma de la distancia entre todos los puntos de la traza
"""
ret = 0
for p in range(len(self.trazado) - 1):
ret += self.trazado[p].distancia(self.trazado[p + 1])
return ret
def dump_traza(self, fichero='traza.txt'):
""" Guardamos la traza en un fichero de trazas
"""
fichero = open(fichero, 'w', encoding="utf-8")
for punto in self.trazado:
fichero.write("{},{}\n".format(punto.x, punto.y))
fichero.close()
def load_traza(self, fichero):
try:
fichero = open(fichero, encoding="utf-8")
self.trazado = []
for linea in fichero:
if linea != "":
punto = linea.split(",")
self.add_punto(Punto(punto[0].strip(), punto[1].strip()))
except FileNotFoundError:
print("No existe el fichero.")
def dibuja(self):
tortuga = self.turtle
tortuga.down()
for punto in self.trazado:
tortuga.setpos(punto.x, punto.y)
tortuga.up()
def toggle_capture(self):
"""Activamos o desactivamos el modo captura, según toque"""
self.capture_mode = not self.capture_mode
if not self.capture_mode:
self.turtle.reset()
self.turtle.up()
self.turtle.setpos(self.trazado[0].x, self.trazado[0].y)
self.dibuja()
fichero = self.screen.textinput("Guardar Traza", "Dime el nombre del fichero:")
self.dump_traza(fichero + ".txt")
print(self)
def move_turtle(self, x, y):
"""Si estamos en modo captura, movemos la tortuga y vamos guardando los puntos"""
tortuga = self.turtle
if self.capture_mode:
tortuga.setheading(tortuga.towards(x, y))
tortuga.setpos(x, y)
self.add_punto(Punto(x, y))
def test():
p = Punto(3, 0)
k = Punto(0, 4)
tr = Traza(p, k)
print(tr)
tr.dump_traza("traza.txt")
tr.load_traza("traza.txt")
print(tr)
s = turtle.Screen()
t = turtle.Turtle()
tr.turtle = t
tr.screen = s
tr.capture_mode = False
s.onkey(tr.toggle_capture, 'space')
s.onclick(tr.move_turtle)
s.listen()
tr.dibuja()
turtle.done()
tr.dump_traza("traza.txt")
test()
| apache-2.0 | -7,849,022,810,489,935,000 | 25.815951 | 100 | 0.538778 | false | 3.133333 | false | false | false |
pmediano/ComputationalNeurodynamics | Fall2016/Exercise_1/Solutions/IzNeuronRK4.py | 1 | 1897 | """
Computational Neurodynamics
Exercise 1
Simulates Izhikevich's neuron model using the Runge-Kutta 4 method.
Parameters for regular spiking, fast spiking and bursting
neurons extracted from:
http://www.izhikevich.org/publications/spikes.htm
(C) Murray Shanahan et al, 2016
"""
import numpy as np
import matplotlib.pyplot as plt
# Create time points
Tmin = 0
Tmax = 200 # Simulation time
dt = 0.01 # Step size
T = np.arange(Tmin, Tmax+dt, dt)
# Base current
I = 10
## Parameters of Izhikevich's model (regular spiking)
a = 0.02
b = 0.2
c = -65
d = 8
## Parameters of Izhikevich's model (fast spiking)
# a = 0.02
# b = 0.25
# c = -65
# d = 2
## Parameters of Izhikevich's model (bursting)
# a = 0.02
# b = 0.2
# c = -50
# d = 2
## Make a state vector that has a (v, u) pair for each timestep
s = np.zeros((len(T), 2))
## Initial values
s[0, 0] = -65
s[0, 1] = -1
# Note that s1[0] is v, s1[1] is u. This is Izhikevich equation in vector form
def s_dt(s1, I):
v_dt = 0.04*(s1[0]**2) + 5*s1[0] + 140 - s1[1] + I
u_dt = a*(b*s1[0] - s1[1])
return np.array([v_dt, u_dt])
## SIMULATE
for t in range(len(T)-1):
# Calculate the four constants of Runge-Kutta method
k_1 = s_dt(s[t], I)
k_2 = s_dt(s[t] + 0.5*dt*k_1, I)
k_3 = s_dt(s[t] + 0.5*dt*k_2, I)
k_4 = s_dt(s[t] + dt*k_3, I)
s[t+1] = s[t] + (1.0/6)*dt*(k_1 + 2*k_2 + 2*k_3 + k_4)
# Reset the neuron if it has spiked
if s[t+1, 0] >= 30:
s[t, 0] = 30 # Add a Dirac pulse for visualisation
s[t+1, 0] = c # Reset to resting potential
s[t+1, 1] += d # Update recovery variable
v = s[:, 0]
u = s[:, 1]
## Plot the membrane potential
plt.subplot(211)
plt.plot(T, v)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential v (mV)')
plt.title('Izhikevich Neuron')
# Plot the reset variable
plt.subplot(212)
plt.plot(T, u)
plt.xlabel('Time (ms)')
plt.ylabel('Reset variable u')
plt.show()
| gpl-3.0 | 194,822,784,332,023,800 | 19.397849 | 78 | 0.618345 | false | 2.319071 | false | false | false |
RaminderSinghSahni/micro-ram-bot | tasks.py | 1 | 4411 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from microsoftbotframework import Response
import celery
import os
import sys
import json
# import argparse
from google.cloud import language
import google.auth
# import language
try:
import apiai
except ImportError:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import apiai
# CLIENT_ACCESS_TOKEN = 'd18b44be8d0b41269a42704c00d44d77'
CLIENT_ACCESS_TOKEN = '039129d3176644e9ac91464ee9e7b5df'
def respond_to_conversation_update(message):
if message["type"] == "conversationUpdate":
response = Response(message)
message_response = 'Have fun with the Microsoft Bot Framework'
response.reply_to_activity(message_response, recipient={"id": response["conversation"]["id"]})
def echo_response(message):
if message["type"] == "message":
response = Response(message)
message_response = message["text"]
# response_info = response.reply_to_activity("in this")
# response_info = response.reply_to_activity(message_response)
with open('intervention.json') as data_file:
iData = json.load(data_file)
if iData["intervention"] == "0":
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request1 = ai.text_request()
# request1.lang = 'en' # optional, default value equal 'en'
# request.session_id = "<SESSION ID, UBIQUE FOR EACH USER>"
# print("\n\nYour Input : ",end=" ")
# response.reply_to_activity(ai)
with open('session.json') as data_file:
data = json.load(data_file)
if data["session_id"] != "":
request1.session_id = data["session_id"]
request1.query = message_response
# request1.resetContexts = False
# request1.
# response_info = response.reply_to_activity("hello"+request1)
# print("\n\nBot\'s response :",end=" ")
response1 = request1.getresponse()
responsestr = response1.read().decode('utf-8')
response_obj = json.loads(responsestr)
with open('session.json', 'w') as outfile:
json.dump({"session_id": response_obj["sessionId"]}, outfile)
# print(response_obj["result"]["fulfillment"]["speech"])
response_info = response.reply_to_activity(response_obj["result"]["fulfillment"]["speech"])
else:
with open('message.json') as data_file:
data = json.load(data_file)
if data["message"] != "":
new_response = Response(data["message"])
# language_client = language.Client()
language_client = language.Client.from_service_account_json('Micro-av-bot-1.json')
# language_client = language.client
document = language_client.document_from_text(message_response)
# Detects sentiment in the document.
annotations = document.annotate_text(include_sentiment=True,
include_syntax=False,
include_entities=False)
score = annotations.sentiment.score
magnitude = annotations.sentiment.magnitude
# response_info = new_response.reply_to_activity('Overall Sentiment: score')
response_info = new_response.reply_to_activity('Overall Sentiment: score of {} with magnitude of {}'.format(score, magnitude))
# response_info = response.reply_to_activity("Intervention is turned on")
# from time import sleep
# sleep(2)
# response.delete_activity(activity_id=response_info.json()['id'])
# sleep(2)
# response.create_conversation('lets talk about something really interesting')
# This is a asynchronous task
@celery.task()
def echo_response_async(message):
if message["type"] == "message":
response = Response(message)
message_response = message["text"]
response.send_to_conversation(message_response)
| mit | -5,725,038,473,106,126,000 | 37.692982 | 150 | 0.568579 | false | 4.261836 | false | false | false |
QudevETH/PycQED_py3 | pycqed/simulations/chevron_sim.py | 1 | 2373 | """
Based on Olli's mathematica notebook used to simulate chevrons
"""
import numpy as np
from scipy.linalg import expm
ham = lambda e, g: np.array([[0.5*e, g], [g, -0.5*e]])
evol = lambda e, g, dt: expm(dt*1j*ham(e, g))
def rabisim(efun, g, t, dt):
"""
This function returns the evolution of a system described by the hamiltonian:
H = efun sigma_z + g sigma_x
Inputs:
efun, Function that returns the energy parameter vs time.s
g, Coupling parameter
t, Final time of the evolution
dt, Stepsize of the time evolution
Outputs:
f_vec, Evolution for times (1, 1+dt, ..., t)
"""
s0 = np.array([1, 0])
ts = np.arange(1., t+0.5*dt, dt)
f = lambda st, ti: np.dot(evol(efun(ti), g, dt), st)
f_vec = np.zeros((len(ts), 2), dtype=np.complex128)
f_vec[0, :] = s0
for i, t in enumerate(ts[:-1]):
f_vec[i+1, :] = f(f_vec[i], t)
return f_vec
qamp = lambda vec: np.abs(vec[:, 1])**2
def chevron(e0, emin, emax, n, g, t, dt, sf):
"""
Inputs:
e0, set energy scale at the center(detuning).
emin, sets min energy to simulate, in e0 units.
emax, sets max energy to simulate, in e0 units.
n, sets number of points in energy array.
g, Coupling parameter.
t, Final time of the evolution.
dt, Stepsize of the time evolution.
sf, Step function of the distortion kernel.
"""
energy_func = lambda energy, t: e0*(1.-(energy*sf(t))**2)
energy_vec = np.arange(1+emin, 1+emax, (emax-emin)/(n-1))
chevron_vec = []
for ee in energy_vec:
chevron_vec.append(
qamp(rabisim(lambda t: energy_func(ee, t), g, t, dt)))
return np.array(chevron_vec)
def chevron_slice(e0, energy, g, t, dt, sf):
"""
Inputs:
e0, set energy scale at the center(detuning).
energy, energy of the slice to simulate, in e0 units.
g, Coupling parameter.
t, Final time of the evolution.
dt, Stepsize of the time evolution.
sf, Step function of the distortion kernel.
"""
energy_func = lambda energy, t: e0*(1.-(energy*sf(t))**2)
return qamp(rabisim(lambda t: energy_func(energy, t), g, t, dt))
| mit | -6,412,169,818,103,500,000 | 33.897059 | 81 | 0.554994 | false | 3.085826 | false | false | false |
felipead/breakout | source/breakout/game/GameController.py | 1 | 4147 | from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.constants import *
from breakout.game.GameEngine import GameEngine
_FRAMES_PER_SECOND = 60
_MOUSE_VISIBLE = True
_CANVAS_WIDTH = 250
_CANVAS_HEIGHT = 300
_DEFAULT_SCREEN_WIDTH = 500
_DEFAULT_SCREEN_HEIGHT = 600
class GameController(object):
def __init__(self):
self.__engine = GameEngine(_CANVAS_WIDTH, _CANVAS_HEIGHT)
self.__screenWidth = _DEFAULT_SCREEN_WIDTH
self.__screenHeight = _DEFAULT_SCREEN_HEIGHT
def run(self):
self.__initialize()
self.__gameLoop()
def __initialize(self):
pygame.init()
pygame.mouse.set_visible(_MOUSE_VISIBLE)
pygame.display.set_mode((self.__screenWidth, self.__screenHeight), OPENGL | DOUBLEBUF)
glClearColor(0.0, 0.0, 0.0, 1.0)
glShadeModel(GL_FLAT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_ADD)
self.__handleScreenResizeEvent(self.__screenWidth, self.__screenHeight)
self.__engine.initialize()
def __gameLoop(self):
clock = pygame.time.Clock()
ticks = 0
while True:
for event in pygame.event.get():
self.__handleInputEvent(event)
milliseconds = clock.tick(_FRAMES_PER_SECOND)
ticks += 1
self.__engine.update(milliseconds, ticks)
self.__engine.display(milliseconds, ticks, self.__screenWidth, self.__screenHeight, clock.get_fps())
pygame.display.flip() # swap buffers
def __handleInputEvent(self, event):
if event.type == QUIT:
exit()
elif event.type == VIDEORESIZE:
self.__handleScreenResizeEvent(event.w, event.h)
elif event.type == MOUSEMOTION:
self.__handleMouseMoveEvent(event.pos, event.rel, event.buttons)
elif event.type == MOUSEBUTTONUP:
self.__handleMouseButtonUpEvent(event.button, event.pos)
elif event.type == MOUSEBUTTONDOWN:
self.__handleMouseButtonDownEvent(event.button, event.pos)
elif event.type == KEYUP:
self.__handleKeyUpEvent(event.key, event.mod)
elif event.type == KEYDOWN:
self.__handleKeyDownEvent(event.key, event.mod, event.unicode)
def __handleScreenResizeEvent(self, width, height):
self.__screenWidth = width
self.__screenHeight = height
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(self.__engine.canvas.left, self.__engine.canvas.right,
self.__engine.canvas.bottom, self.__engine.canvas.top)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def __handleMouseButtonUpEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonUpEvent(button, mappedCoordinates)
def __handleMouseButtonDownEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonDownEvent(button, mappedCoordinates)
def __handleMouseMoveEvent(self, absolute_coordinates, relative_coordinates, buttons):
mapped_absolute_coordinates = self.__mapScreenCoordinatesToCanvas(absolute_coordinates)
self.__engine.handleMouseMoveEvent(mapped_absolute_coordinates, relative_coordinates, buttons)
def __handleKeyUpEvent(self, key, modifiers):
self.__engine.handleKeyUpEvent(key, modifiers)
def __handleKeyDownEvent(self, key, modifiers, char):
self.__engine.handleKeyDownEvent(key, modifiers, char)
def __mapScreenCoordinatesToCanvas(self, coordinates):
horizontalCanvasToScreenRatio = self.__engine.canvas.width / float(self.__screenWidth)
verticalCanvasToScreenRatio = self.__engine.canvas.height / float(self.__screenHeight)
(x, y) = coordinates
x *= horizontalCanvasToScreenRatio
y *= verticalCanvasToScreenRatio
y = self.__engine.canvas.top - y
return x, y
| gpl-2.0 | 2,273,906,769,018,133,000 | 35.699115 | 112 | 0.661201 | false | 3.938272 | false | false | false |
pampi/pad | backend.py | 1 | 26210 | #Copyright (C) 2014 Adrian "APi" Pielech
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide.QtNetwork import *
from PySide.QtCore import *
from PySide.QtGui import *
from subprocess import Popen, PIPE
class EPISODE(QListWidgetItem):
def __init__(self, parent=None, title='<Title>', value='<Value>'):
super(EPISODE, self).__init__(parent)
self.title = title
self.value = value
def setText(self, title):
self.title = title
super(EPISODE, self).setText(self.title)
def setValue(self, value):
self.value = value
def getValue(self):
return self.value
class DownloadEpisodeThread(QThread):
saveTo = None
who_am_i = 0
def __init__(self, parent, threadId):
super(DownloadEpisodeThread, self).__init__()
self.parentObject = parent
self.who_am_i = threadId
def run(self):
qNetMgr = QNetworkAccessManager()
downloadLoop = QEventLoop()
loopArg = True
item = None
p = self.parentObject
while(loopArg is True):
p.downloadMutex.tryLock(-1)
if(p.lstwToDownload.count() > 0):
item = p.lstwToDownload.takeItem(0)
p.appendLogs.emit('Zaczynam pobierać: ' + item.text())
else:
loopArg = False
item = None
if p.downloadedEps == p.mustDownloadEps:
p.btnDownload.setEnabled(True)
p.freezeSettings(True)
p.btnDownloadEpisodesList.setEnabled(True)
p.downloadMutex.unlock()
if not(item is None):
qReply = qNetMgr.get(QNetworkRequest(QUrl(item.getValue())))
if item.getValue().count('https://') > 0:
qReply.sslErrors.connect(qReply.ignoreSslErrors)
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
redirURL = qReply.attribute(QNetworkRequest.RedirectionTargetAttribute)
qReply = qNetMgr.get(QNetworkRequest(QUrl(redirURL)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
p.lblThreadArray[self.who_am_i].setText(item.text())
p.pbThreadArray[self.who_am_i].setEnabled(True)
self.saveTo = QFile(item.text())
if not self.saveTo.open(QIODevice.WriteOnly):
print('Nie moge otworzyc panie ;_;')
qReply.downloadProgress.connect(self.saveToFile)
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
p.pbThreadArray[self.who_am_i].setEnabled(False)
self.saveTo.write(qReply.readAll())
self.saveTo.close()
p.downloadMutex.tryLock(-1)
p.downloadedEps = p.downloadedEps + 1
p.pbDownloaded.setValue(p.downloadedEps)
p.appendLogs.emit(item.text() + ' pobrano!')
if p.chkbConvert.isChecked() is True:
p.lstwToConvert.addItem(item)
p.sigConvert.emit()
p.downloadMutex.unlock()
else:
p.downloadMutex.tryLock(-1)
p.appendLogs.emit('Nie udało się pobrać ' + item.text() + '! Błąd: ' + str(qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute)) + '.')
p.downloadedEps = p.downloadedEps + 1
p.pbDownloaded.setValue(p.downloadedEps)
p.downloadMutex.unlock()
def saveToFile(self, received, total):
if total != self.parentObject.pbThreadArray[self.who_am_i].maximum():
self.parentObject.pbThreadArray[self.who_am_i].setMaximum(total)
self.parentObject.pbThreadArray[self.who_am_i].setValue(received)
class Backend:
def writeLog(self, log_message):
self.logDest.setPlainText(log_message + "\n" + self.logDest.toPlainText())
def convertEpisode(self):
self.convertMutex.tryLock(-1)
self.downloadMutex.tryLock(-1)
workItem = self.lstwToConvert.takeItem(0)
self.downloadMutex.unlock()
output_file = workItem.text()[:len(workItem.text()) - 3] + self.cbOutputFormat.currentText()
file_info = Popen(['ffmpeg', '-i', workItem.text()], stderr=PIPE)
file_info.wait()
file_info = file_info.stderr.read(-1).decode('utf-8')
file_info = file_info[file_info.find('Duration:') + 10:]
file_info = file_info[:file_info.find(',')]
file_time_info = file_info.split(':')
file_time_info = file_time_info + file_time_info[2].split('.')
length = int(file_time_info[0]) * 3600 + int(file_time_info[1]) * 60 + int(file_time_info[3])
self.pbConverted.setMaximum(length)
self.pbConverted.setValue(0)
self.appendLogs.emit('Zaczynam konwertować: ' + workItem.text())
'''TO DO Start converting'''
self.convertMutex.unlock()
def getEpisodesListFromWeb(self, linkToSeries, lblSeriesName, lstItems, log):
lstItems.clear()
self.logDest = log
if len(linkToSeries) > 15:
if linkToSeries.find('animeon.pl') >= 0:
lblSeriesName.setText(self.getAnimeOnList(linkToSeries, lstItems))
elif linkToSeries.find('anime-shinden.info') >= 0:
lblSeriesName.setText(self.getAShindenList(linkToSeries, lstItems))
else:
self.writeLog("Podano URL do nieobsługiwanego serwisu!")
else:
self.writeLog("Nieprawidłowy URL do serii!")
def getVideoListFromURL(self, get_from):
ret_val = [None]
basic_filename = get_from.text()
episode_page_url = get_from.getValue()
'''print(episode_page_url)'''
if episode_page_url.find('animeon.pl') > 0:
ret_val = self.extractLinksFromAnimeOn(episode_page_url, basic_filename)
elif (episode_page_url.find('anime-shinden.info') > 0) or (episode_page_url.find('shinden-anime.info') > 0):
episode_page_url = episode_page_url.replace('shinden-anime.info', 'anime-shinden.info')
ret_val = self.extractLinksFromAShinden(episode_page_url, basic_filename)
else:
self.writeLog('Coś poszło nie tak... Nie rozpoznano serwisu anime.\nCzy przypadkiem nie bawisz się w inżynierię odwrotną?')
return ret_val
def extractLinksFromAShinden(self, link, basename):
ret_val = [None]
self.writeLog('Pobieranie i parsowanie strony ' + basename + '...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(link)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Serwer zwrócił niepoprawną zawartość...')
self.writeLog(str(qReply.readAll().data()))
else:
done = 0
data = str(qReply.readAll().data())
data = data[data.find('video_tabs'):]
data = data[:data.find('<script')]
fb_count = int(data.count('http://anime-shinden.info/player/hd.php') / 2)
sibnet_count = data.count('video.sibnet.ru')
daily_count = data.count('www.dailymotion.com/embed/video')
if daily_count == 0:
daily_count = int(data.count('www.dailymotion.com/swf/video/') / 2)
data_backup = data
'''#jwplayer - fb'''
if fb_count > 0:
done = 1
fb_table = [None]
for i in range(0, fb_count):
data = data[data.find('http://anime-shinden.info/player/hd.php') + 10:]
data = data[data.find('http://anime-shinden.info/player/hd.php'):]
data = data[data.find('link=') + 5:]
vid = data[:data.find('.mp4')]
vid = 'https://www.facebook.com/video/embed?video_id=' + vid
link_to_face = self.getEmbedFacebookVideoLink(vid)
if len(link_to_face) > 0:
ep = EPISODE()
if fb_count == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_face)
fb_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_face)
fb_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do fejsa...')
done = 0
if done == 1:
ret_val = fb_table
if (done == 0) and (sibnet_count > 0):
data = data_backup
done = 1
sib_table = [None]
for i in range(0, sibnet_count):
data = data[data.find('http://video.sibnet.ru/'):]
data = data[data.find('=') + 1:]
vid = data[:data.find('''"''')]
link_to_sib = self.getEmbedSibnetRUVideoLink(vid)
if len(link_to_sib) > 0:
ep = EPISODE()
if sibnet_count > 0:
ep.setText(basename + ".mp4")
ep.setValue(link_to_sib)
sib_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_sib)
fb_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do Sibnetu...')
done = 0
if done == 1:
ret_val = sib_table
print('Sibnet :D')
if (done == 0) and (daily_count > 0):
print('Daily lol')
data = data_backup
data = data.replace('http://www.dailymotion.com/swf/video/', 'http://www.dailymotion.com/embed/video/')
done = 1
daily_table = [None]
for i in range(0, daily_count):
data = data[data.find('http://www.dailymotion.com/embed/video/'):]
daily_temple_link = data[:data.find('''"''')]
data = data[data.find('''"'''):]
link_to_daily = self.getEmbedDailyVideoLink(daily_temple_link)
if len(link_to_daily) > 0:
ep = EPISODE()
if daily_count == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_daily)
daily_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_daily)
daily_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do DailyMotion...')
done = 0
if done == 1:
ret_val = daily_table
if done == 0:
self.writeLog('Wybacz, nie udało mi się znaleźć linku do żadnego działającego serwisu :(')
return ret_val
def extractLinksFromAnimeOn(self, link, basename):
ret_val = [None]
self.writeLog('Pobieranie i parsowanie strony ' + basename + '...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(link)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Serwer zwrócił niepoprawną zawartość...')
else:
data = str(qReply.readAll().data())
data = data[data.find('float-left player-container'):]
data = data[:data.find('float-left episode-nav')]
if data.count('<iframe src=') > 0:
counter = data.count('<iframe src=')
for i in range(0, data.count('<iframe src=')):
data = data[data.find('<iframe src='):]
data = data[data.find("""'""") + 1:]
the_link = data[:data.find("\\")]
data = data[data.find('</iframe>'):]
if the_link.find('facebook.com') > 0:
link_to_face = self.getEmbedFacebookVideoLink(the_link)
if len(link_to_face) > 0:
'''link_to_face = download'''
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_face)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_face)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do fejsa...')
elif the_link.find('vk.com') > 0:
link_to_vk = self.getEmbedVKVideoLink(the_link)
if len(link_to_vk) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_vk)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_vk)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do VK...')
else:
self.writeLog('I dont know this player...')
elif data.count('<embed src=') > 0:
counter = data.count('<embed src=')
for i in range(0, data.count('<embed src=')):
data = data[data.find('<embed src='):]
data = data[data.find("""'""") + 1:]
the_link = data[:data.find("\\")]
data = data[data.find('</embed>'):]
if the_link.find('video.sibnet.ru') > 0:
the_link = the_link[the_link.find('=') + 1:]
link_to_sibnet = self.getEmbedSibnetRUVideoLink(the_link)
if len(link_to_sibnet) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_sibnet)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_sibnet)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do Sibnetu...')
else:
self.writeLog('I dont know this player...')
elif data.count('jwplayer(') > 0:
counter = data.count('jwplayer(')
for i in range(0, counter):
data = data[data.find('jwplayer('):]
data = data[data.find('http://'):]
jw_link = data[:data.find("""'""") - 1]
qReply = qNetMgr.get(QNetworkRequest(QUrl(jw_link)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if not ((qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200) or (qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302)):
jw_link = ''
if len(jw_link) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(jw_link)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(jw_link)
ret_val.append(ep)
else:
self.writeLog('No player found.')
return ret_val
def getEmbedDailyVideoLink(self, url):
ret_val = ''
if url.count('/swf/') > 0:
url = url.replace('/swf/', '/embed/')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
print((qReply.size()))
data = qReply.readAll().data().decode('UTF-8')
if data.count('''"stream_h264_hd_url"''') > 0:
data = data[data.find('''"stream_h264_hd_url"'''):]
data = data[data.find('http:'):]
data = data[:data.find('''"''')]
data = data.replace("\\", '')
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
'''302 Found'''
ret_val = data
return ret_val
def getEmbedSibnetRUVideoLink(self, vid):
ret_val = ''
url = 'http://video.sibnet.ru/shell_config_xml.php?videoid=' + vid + '&type=video.sibnet.ru'
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('UTF-8')
data = data[data.find('<file>') + 6:data.find('</file>')]
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
'''302 Found'''
ret_val = data
return ret_val
def getEmbedVKVideoLink(self, url):
ret_val = ''
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('windows-1251')
data = data[data.find('url720=') + 7:]
data = data[:data.find('&')]
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
'''200 OK'''
ret_val = data
return ret_val
def getEmbedFacebookVideoLink(self, url):
ret_val = ''
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
qReply.sslErrors.connect(qReply.ignoreSslErrors)
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('UTF-8')
if data.count('hd_src') > 0:
data = data[data.find('hd_src'):]
data = data[data.find('https'):]
data = data[:data.find('u002522') - 1]
data = data.replace("\\", "/")
data = data.replace("/u00255C", "").replace("/u00252F", "/").replace("/u00253F", "?").replace("/u00253D", "=").replace("/u002526", "&").replace("/u00253A",":")
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.sslErrors.connect(qReply.ignoreSslErrors)
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
'''200 OK'''
ret_val = data
return ret_val
def getAShindenList(self, url, items):
series_name = "-"
self.writeLog('Trwa pobieranie listy odcinków serii(A-Shinden)...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
loop = QEventLoop()
qReply.finished.connect(loop.quit)
loop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Pobieranie danych o liście odcników nie powiodło się!')
else:
self.writeLog('Pobrano dane. Trwa parsowanie danych...')
data = str(qReply.readAll().data())
series_name = data[data.find('base fullstory'):]
series_name = series_name[:series_name.find('</a>')]
series_name = series_name[series_name.find('>', series_name.find('<a href=') + 7) + 1:]
series_name = series_name[:series_name.find('(') - 1]
self.writeLog('Pobierana seria: ' + series_name)
'''Extract episode list'''
'''Shrink data'''
data = data[data.find('daj online'):]
data = data[:data.find('</table>')]
data = data[data.find('<a href='):]
data = data[:data.find('</td>')]
i = data.find('<a href=')
while i >= 0:
ep = EPISODE()
ep.setValue(data[i + 9:data.find("\"", i + 9)])
data = data[data.find('>') + 1:]
ep.setText(data[:data.find('</a>')])
if data.find('<a href') >= 0:
data = data[data.find('<a href'):]
i = data.find('<a href')
if (ep.text().lower().find('odcinek') >= 0) or (ep.text().lower().find('ova') >= 0) or (ep.text().lower().find('odc') >= 0):
items.addItem(ep)
self.writeLog('Lista odcinków pobrana.')
else:
self.writeLog('Błąd połączenia. Pobieranie danych o liście odcników nie powiodło się!')
return series_name
def getAnimeOnList(self, url, items):
series_name = "-"
self.writeLog('Trwa pobieranie listy odcinków serii(AnimeOn)...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
loop = QEventLoop()
qReply.finished.connect(loop.quit)
loop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Pobieranie danych o liście odcników nie powiodło się!')
else:
self.writeLog('Pobrano dane. Trwa parsowanie danych...')
data = str(qReply.readAll().data())
series_name = data[data.find('<title>') + 7: data.find(' Anime Online PL')]
data = data[data.find('episode-table') + 13:]
data = data[:data.find('</table')]
i = data.find('http://')
while i >= 0:
ep = EPISODE()
data = data[data.find('http://'):]
ep.setValue(data[:data.find('\\')])
ep.setText(data[data.find('odcinek'):data.find('</a>')])
items.addItem(ep)
data = data[data.find('</a>'):]
i = data.find('http://')
else:
self.writeLog('Błąd połączenia. Pobieranie danych o liście odcników nie powiodło się!')
return series_name
| gpl-3.0 | -7,560,106,690,500,875,000 | 44.687063 | 175 | 0.488578 | false | 3.980655 | false | false | false |
endlessm/chromium-browser | third_party/chromite/scripts/test_image.py | 1 | 4062 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to mount a built image and run tests on it."""
from __future__ import print_function
import os
import sys
import unittest
from chromite.lib import constants
from chromite.lib import commandline
from chromite.lib import image_lib
from chromite.lib import image_test_lib
from chromite.lib import osutils
from chromite.lib import path_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def ParseArgs(args):
"""Return parsed commandline arguments."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--test_results_root', type='path',
help='Directory to store test results')
parser.add_argument('--board', type=str, help='Board (wolf, beaglebone...)')
parser.add_argument('image', type='path',
help='Image directory (or file) with mount_image.sh and '
'umount_image.sh')
parser.add_argument('-l', '--list', default=False, action='store_true',
help='List all the available tests')
parser.add_argument('tests', nargs='*', metavar='test',
help='Specific tests to run (default runs all)')
opts = parser.parse_args(args)
opts.Freeze()
return opts
def FindImage(image_path):
"""Return the path to the image file.
Args:
image_path: A path to the image file, or a directory containing the base
image.
Returns:
ImageFileAndMountScripts containing absolute paths to the image,
the mount and umount invocation commands
"""
if os.path.isdir(image_path):
# Assume base image.
image_file = os.path.join(image_path, constants.BASE_IMAGE_NAME + '.bin')
if not os.path.exists(image_file):
raise ValueError('Cannot find base image %s' % image_file)
elif os.path.isfile(image_path):
image_file = image_path
else:
raise ValueError('%s is neither a directory nor a file' % image_path)
return image_file
def main(args):
opts = ParseArgs(args)
# Build up test suites.
loader = unittest.TestLoader()
loader.suiteClass = image_test_lib.ImageTestSuite
# We use a different prefix here so that unittest DO NOT pick up the
# image tests automatically because they depend on a proper environment.
loader.testMethodPrefix = 'Test'
tests_namespace = 'chromite.cros.test.image_test'
if opts.tests:
tests = ['%s.%s' % (tests_namespace, x) for x in opts.tests]
else:
tests = (tests_namespace,)
all_tests = loader.loadTestsFromNames(tests)
# If they just want to see the lists of tests, show them now.
if opts.list:
def _WalkSuite(suite):
for test in suite:
if isinstance(test, unittest.BaseTestSuite):
for result in _WalkSuite(test):
yield result
else:
yield (test.id()[len(tests_namespace) + 1:],
test.shortDescription() or '')
test_list = list(_WalkSuite(all_tests))
maxlen = max(len(x[0]) for x in test_list)
for name, desc in test_list:
print('%-*s %s' % (maxlen, name, desc))
return
# Run them in the image directory.
runner = image_test_lib.ImageTestRunner()
runner.SetBoard(opts.board)
runner.SetResultDir(opts.test_results_root)
image_file = FindImage(opts.image)
tmp_in_chroot = path_util.FromChrootPath('/tmp')
with osutils.TempDir(base_dir=tmp_in_chroot) as temp_dir:
with image_lib.LoopbackPartitions(image_file, temp_dir) as image:
# Due to the lack of mount context, we mount the partitions
# but do not reference directly. This will be removed with the
# submission of http://crrev/c/1795578
_ = image.Mount((constants.PART_ROOT_A,))[0]
_ = image.Mount((constants.PART_STATE,))[0]
with osutils.ChdirContext(temp_dir):
result = runner.run(all_tests)
if result and not result.wasSuccessful():
return 1
return 0
| bsd-3-clause | -3,099,706,782,971,267,600 | 32.295082 | 79 | 0.670852 | false | 3.754159 | true | false | false |
OCA/contract | contract_variable_quantity/models/contract_line.py | 1 | 2127 | # Copyright 2016 Tecnativa - Pedro M. Baeza
# Copyright 2018 Tecnativa - Carlos Dauden
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
from odoo.tools import float_is_zero
from odoo.tools.safe_eval import safe_eval
class AccountAnalyticInvoiceLine(models.Model):
_inherit = 'contract.line'
@api.multi
def _get_quantity_to_invoice(
self, period_first_date, period_last_date, invoice_date
):
quantity = super(
AccountAnalyticInvoiceLine, self
)._get_quantity_to_invoice(
period_first_date, period_last_date, invoice_date
)
if not period_first_date or not period_last_date or not invoice_date:
return quantity
if self.qty_type == 'variable':
eval_context = {
'env': self.env,
'context': self.env.context,
'user': self.env.user,
'line': self,
'quantity': quantity,
'period_first_date': period_first_date,
'period_last_date': period_last_date,
'invoice_date': invoice_date,
'contract': self.contract_id,
}
safe_eval(
self.qty_formula_id.code.strip(),
eval_context,
mode="exec",
nocopy=True,
) # nocopy for returning result
quantity = eval_context.get('result', 0)
return quantity
@api.multi
def _prepare_invoice_line(self, invoice_id=False, invoice_values=False):
vals = super(AccountAnalyticInvoiceLine, self)._prepare_invoice_line(
invoice_id=invoice_id, invoice_values=invoice_values,
)
if (
'quantity' in vals
and self.contract_id.skip_zero_qty
and float_is_zero(
vals['quantity'],
self.env['decimal.precision'].precision_get(
'Product Unit of Measure'
),
)
):
vals = {}
return vals
| agpl-3.0 | 1,996,008,599,080,476,700 | 33.306452 | 77 | 0.5496 | false | 4.05916 | false | false | false |
fusionbox/satchless | examples/demo/core/views.py | 1 | 1250 | # -*- coding:utf-8 -*-
from django.contrib import messages
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from satchless.order.app import order_app
def home_page(request):
messages.success(request, _(u'<strong>Welcome!</strong> This is a demo shop built on Satchless. Enjoy!'))
return TemplateResponse(request, 'core/home_page.html')
def thank_you_page(request, order_token):
order = order_app.get_order(request, order_token)
if not order.status in ('payment-failed', 'payment-complete', 'delivery'):
return redirect(order_app.reverse('satchless-order-view',
args=(order.token,)))
if order.status == 'payment-failed':
return redirect('payment-failed', order_token=order.token)
return TemplateResponse(request, 'satchless/checkout/thank_you.html', {
'order': order,
})
def payment_failed(request, order_token):
order = order_app.get_order(request, order_token)
if order.status != 'payment-failed':
return redirect(order)
return TemplateResponse(request, 'satchless/checkout/payment_failed.html', {
'order': order,
})
| bsd-3-clause | -7,441,253,123,787,888,000 | 36.878788 | 109 | 0.6872 | false | 3.869969 | false | false | false |
Robbie1977/AlignmentPipe | align/settings.py | 1 | 10620 | import psycopg2, os
# import subprocess
from socket import gethostname
host = gethostname()
con = psycopg2.connect(host='bocian.inf.ed.ac.uk', database='alignment', user='aligner_admin', password='default99')
cur = con.cursor()
cur.execute("SELECT upload_dir FROM system_server WHERE host_name like '" + host + "'")
record = cur.fetchone()
if record == None:
print 'Missing server settings for ' + str(host)
cur.execute("SELECT upload_dir, host_name FROM system_server")
record = cur.fetchone()
print 'Having to use settings for ' + str(record[1])
host = str(record[1])
uploadfolder = str(record[0])
cur.close()
con.close()
del cur, con, record
# Use to reset file permission only if error occurs
# for file in os.listdir(uploadfolder):
# try:
# # os.chmod(uploadfolder + file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# subprocess.call(['chmod', '0777', uploadfolder + file])
# print 'OK: ' + file
# except:
# print 'Error: ' + file
#
# Django settings for align project.
# DEBUG = True
# TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Robert Court', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'django_mongodb_engine'.
'NAME': 'alignment', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'aligner_admin',
'PASSWORD': 'default99',
'HOST': 'bocian.inf.ed.ac.uk', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'bocian.inf.ed.ac.uk', 'vfbaligner.inf.ed.ac.uk']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# SITE_ID=u'5395bb746c132991c57933f6'
SITE_ID=1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/disk/data/VFB/aligner/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/disk/data/VFB/aligner/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'dv16bwh3f1x%p9csb3o7l9k#o8d_oqp-)aa=afq%yj+2$s96_('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_GITHUB_KEY = 'e8bfae9142f86f36b391'
SOCIAL_AUTH_GITHUB_SECRET = 'b7617cf006cace2e60d90f089816924e0eabbd0f'
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email']
# SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '884257168498-3gec80pdfpullsaeavbg2nqra3aflru5.apps.googleusercontent.com'
# SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'pvsqhFUx1kmBiGlVWERy_Q-b'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get(
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'884257168498-3gec80pdfpullsaeavbg2nqra3aflru5.apps.googleusercontent.com'
)
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get(
'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET',
'pvsqhFUx1kmBiGlVWERy_Q-b'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# MIDDLEWARE_CLASSES = (
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# # 'permission_backend_nonrel.backends.NonrelPermissionBackend'
# 'django.contrib.messages.middleware.MessageMiddleware',
# # Uncomment the next line for simple clickjacking protection:
# # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# )
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'align.wsgi.application'
TEMPLATE_DIRS = (
"/disk/data/VFB/aligner/AlignmentPipe/align/images/templates/admin_copies" ,
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# AUTHENTICATION_BACKENDS = (
# 'permission_backend_nonrel.backends.NonrelPermissionBackend',
# )
# AUTHENTICATION_BACKENDS = (
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GooglePlusAuth',
# 'django.contrib.auth.backends.ModelBackend',
# )
# AUTHENTICATION_BACKENDS = (
# 'social.backends.open_id.OpenIdAuth',
# 'social.backends.google.GoogleOpenId',
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GoogleOAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.yahoo.YahooOpenId',
# 'django.contrib.auth.backends.ModelBackend',
# )
AUTHENTICATION_BACKENDS = (
# 'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GooglePlusAuth',
# 'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
'social.backends.github.GithubOAuth2',
# 'social_auth.backends.contrib.linkedin.LinkedinBackend',
)
# SOCIAL_AUTH_USER_MODEL = 'users.User'
SOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'email']
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.google.GooglePlusAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.yahoo.YahooOpenId',
'social.backends.github.GithubOAuth2',
# 'social_auth.backends.contrib.linkedin.LinkedinBackend',
)
INSTALLED_APPS = (
'adminactions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django_mongodb_engine',
'django.db.backends.postgresql_psycopg2',
# 'djangotoolbox',
# 'permission_backend_nonrel',
'system',
'images',
'users',
'bootstrap3',
'images.templatetags.images_extras',
'users.templatetags.backend_utils',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'social.apps.django_app.default',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | -6,374,926,531,072,543,000 | 34.165563 | 146 | 0.706874 | false | 3.366086 | false | false | false |
apel/rest | api/tests/test_cloud_record_summary_get.py | 1 | 9827 | """This module tests GET requests to the Cloud Sumamry Record endpoint."""
import logging
import MySQLdb
from api.utils.TokenChecker import TokenChecker
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from mock import patch
QPATH_TEST = '/tmp/django-test/'
class CloudRecordSummaryGetTest(TestCase):
"""Tests GET requests to the Cloud Sumamry Record endpoint."""
def setUp(self):
"""Prevent logging from appearing in test output."""
logging.disable(logging.CRITICAL)
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_IAM_fail(self, mock_valid_token_to_id):
"""
Test what happens if we fail to contact the IAM.
i.e, _token_to_id returns None
IAM = Identity and Access Management
"""
# Mock the functionality of the IAM
# Used in the underlying GET method
# Simulates a failure to translate a token to an ID
mock_valid_token_to_id.return_value = None
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_400(self, mock_valid_token_to_id):
"""Test a GET request without the from field."""
# Mock the functionality of the IAM
# Simulates the translation of a token to an ID
# Used in the underlying GET method
mock_valid_token_to_id.return_value = 'TestService'
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(400, options="?group=TestGroup",
authZ_header_cont="Bearer TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_403(self, mock_valid_token_to_id):
"""Test an unauthorized service cannot make a GET request."""
# Mock the functionality of the IAM
# Simulates the translation of a token to an unauthorized ID
# Used in the underlying GET method
mock_valid_token_to_id.return_value = 'FakeService'
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(403,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
def test_cloud_record_summary_get_401(self):
"""Test an unauthenticated GET request."""
# Test without the HTTP_AUTHORIZATION header
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"))
# Test with a malformed HTTP_AUTHORIZATION header
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_200(self, mock_valid_token_to_id):
"""Test a successful GET request."""
# Connect to database
database = self._connect_to_database()
# Clean up any lingering example data.
self._clear_database(database)
# Add example data
self._populate_database(database)
# Mock the functionality of the IAM
mock_valid_token_to_id.return_value = 'TestService'
expected_response = ('{'
'"count":2,'
'"next":null,'
'"previous":null,'
'"results":[{'
'"WallDuration":86399,'
'"Year":2016,'
'"Day":30,'
'"Month":7'
'},{'
'"WallDuration":43200,'
'"Year":2016,'
'"Day":31,'
'"Month":7}]}')
with self.settings(ALLOWED_FOR_GET='TestService',
RETURN_HEADERS=["WallDuration",
"Day",
"Month",
"Year"]):
try:
self._check_summary_get(200,
expected_response=expected_response,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
finally:
# Clean up after test.
self._clear_database(database)
database.close()
def tearDown(self):
"""Delete any messages under QPATH and re-enable logging.INFO."""
logging.disable(logging.NOTSET)
def _check_summary_get(self, expected_status, expected_response=None,
options='', authZ_header_cont=None):
"""Helper method to make a GET request."""
test_client = Client()
# Form the URL to make the GET request to
url = ''.join((reverse('CloudRecordSummaryView'), options))
if authZ_header_cont is not None:
# If content for a HTTP_AUTHORIZATION has been provided,
# make the GET request with the appropriate header
response = test_client.get(url,
HTTP_AUTHORIZATION=authZ_header_cont)
else:
# Otherise, make a GET request without a HTTP_AUTHORIZATION header
response = test_client.get(url)
# Check the expected response code has been received.
self.assertEqual(response.status_code, expected_status)
if expected_response is not None:
# Check the response received is as expected.
self.assertEqual(response.content, expected_response)
def _populate_database(self, database):
"""Populate the database with example summaries."""
cursor = database.cursor()
# Insert example usage data
cursor.execute('INSERT INTO CloudRecords '
'(VMUUID, SiteID, GlobalUserNameID, VOID, '
'VOGroupID, VORoleID, Status, StartTime, '
'SuspendDuration, WallDuration, PublisherDNID, '
'CloudType, ImageId, '
'CloudComputeServiceID) '
'VALUES '
'("TEST-VM", 1, 1, 1, 1, 1, "Running", '
'"2016-07-30 00:00:00", 0, 86399, 1, "TEST", "1", '
'1);')
# Insert example usage data
cursor.execute('INSERT INTO CloudRecords '
'(VMUUID, SiteID, GlobalUserNameID, VOID, '
'VOGroupID, VORoleID, Status, StartTime, '
'SuspendDuration, WallDuration, PublisherDNID, '
'CloudType, ImageId, '
'CloudComputeServiceID) '
'VALUES '
'("TEST-VM", 1, 1, 1, 1, 1, "Running", '
'"2016-07-30 00:00:00", 0, 129599, 1, "TEST", "1", '
'1);')
# These INSERT statements are needed
# because we query VCloudSummaries
cursor.execute('INSERT INTO Sites VALUES (1, "TestSite");')
cursor.execute('INSERT INTO VOs VALUES (1, "TestVO");')
cursor.execute('INSERT INTO VOGroups VALUES (1, "TestGroup");')
cursor.execute('INSERT INTO VORoles VALUES (1, "TestRole");')
cursor.execute('INSERT INTO DNs VALUES (1, "TestDN");')
cursor.execute('INSERT INTO CloudComputeServices '
'VALUES (1, "TestService");')
# Summarise example usage data
cursor.execute('CALL SummariseVMs();')
database.commit()
def _clear_database(self, database):
"""Clear the database of example data."""
cursor = database.cursor()
cursor.execute('DELETE FROM CloudRecords '
'WHERE VMUUID="TEST-VM";')
cursor.execute('DELETE FROM CloudSummaries '
'WHERE CloudType="TEST";')
cursor.execute('DELETE FROM Sites '
'WHERE id=1;')
cursor.execute('DELETE FROM VOs '
'WHERE id=1;')
cursor.execute('DELETE FROM VOGroups '
'WHERE id=1;')
cursor.execute('DELETE FROM VORoles '
'WHERE id=1;')
cursor.execute('DELETE FROM DNs '
'WHERE id=1;')
cursor.execute('DELETE FROM CloudComputeServices '
'WHERE id=1;')
database.commit()
def _connect_to_database(self,
host='localhost',
user='root',
password='',
name='apel_rest'):
"""Connect to and return a cursor to the given database."""
database = MySQLdb.connect(host, user, password, name)
return database
| apache-2.0 | -8,940,463,494,486,466,000 | 40.817021 | 79 | 0.51908 | false | 4.681753 | true | false | false |
berkmancenter/mediacloud | apps/webapp-api/src/python/webapp/auth/password.py | 1 | 5455 | import base64
import hashlib
import os
from mediawords.db import DatabaseHandler
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
__HASH_SALT_PREFIX = "{SSHA256}"
__HASH_LENGTH = 64 # SHA-256 hash length
__SALT_LENGTH = 64
__MIN_PASSWORD_LENGTH = 8
__MAX_PASSWORD_LENGTH = 120
log = create_logger(__name__)
class McAuthPasswordException(Exception):
"""Password-related exceptions."""
pass
def password_hash_is_valid(password_hash: str, password: str) -> bool:
"""Validate a password / password token.
Ported from Crypt::SaltedHash: https://metacpan.org/pod/Crypt::SaltedHash
"""
password_hash = decode_object_from_bytes_if_needed(password_hash)
password = decode_object_from_bytes_if_needed(password)
if not password_hash:
raise McAuthPasswordException("Password hash is None or empty.")
if password is None:
raise McAuthPasswordException("Password is None.")
# Password can be an empty string but that would be weird so we only spit out a warning
if not password:
log.warning("Password is empty.")
if not password_hash.startswith(__HASH_SALT_PREFIX):
raise McAuthPasswordException("Password hash does not start with an expected prefix.")
if len(password_hash) != len(__HASH_SALT_PREFIX) + __HASH_LENGTH + __SALT_LENGTH:
raise McAuthPasswordException("Password hash is of the incorrect length.")
try:
password = password.encode('utf-8', errors='replace') # to concatenate with 'bytes' salt later
password_hash = password_hash[len(__HASH_SALT_PREFIX):]
salted_hash_salt = base64.b64decode(password_hash)
salt = salted_hash_salt[-1 * __SALT_LENGTH:]
expected_salted_hash = salted_hash_salt[:len(salted_hash_salt) - __SALT_LENGTH]
actual_password_salt = password + salt
sha256 = hashlib.sha256()
sha256.update(actual_password_salt)
actual_salted_hash = sha256.digest()
if expected_salted_hash == actual_salted_hash:
return True
else:
return False
except Exception as ex:
log.warning("Failed to validate hash: %s" % str(ex))
return False
def generate_secure_hash(password: str) -> str:
"""Hash a secure hash (password / password reset token) with a random salt.
Ported from Crypt::SaltedHash: https://metacpan.org/pod/Crypt::SaltedHash
"""
password = decode_object_from_bytes_if_needed(password)
if password is None:
raise McAuthPasswordException("Password is None.")
# Password can be an empty string but that would be weird so we only spit out a warning
if not password:
log.warning("Password is empty.")
password = password.encode('utf-8', errors='replace') # to concatenate with 'bytes' salt later
# os.urandom() is supposed to be crypto-secure
salt = os.urandom(__SALT_LENGTH)
password_salt = password + salt
sha256 = hashlib.sha256()
sha256.update(password_salt)
salted_hash = sha256.digest()
salted_hash_salt = salted_hash + salt
base64_salted_hash = base64.b64encode(salted_hash_salt).decode('ascii')
return __HASH_SALT_PREFIX + base64_salted_hash
def password_reset_token_is_valid(db: DatabaseHandler, email: str, password_reset_token: str) -> bool:
"""Validate password reset token (used for both user activation and password reset)."""
email = decode_object_from_bytes_if_needed(email)
password_reset_token = decode_object_from_bytes_if_needed(password_reset_token)
if not (email and password_reset_token):
log.error("Email and / or password reset token is empty.")
return False
# Fetch readonly information about the user
password_reset_token_hash = db.query("""
SELECT auth_users_id,
email,
password_reset_token_hash
FROM auth_users
WHERE email = %(email)s
LIMIT 1
""", {'email': email}).hash()
if password_reset_token_hash is None or 'auth_users_id' not in password_reset_token_hash:
log.error("Unable to find user %s in the database." % email)
return False
password_reset_token_hash = password_reset_token_hash['password_reset_token_hash']
if password_hash_is_valid(password_hash=password_reset_token_hash, password=password_reset_token):
return True
else:
return False
def validate_new_password(email: str, password: str, password_repeat: str) -> str:
"""Check if password complies with strength the requirements.
Returns empty string on valid password, error message on invalid password."""
email = decode_object_from_bytes_if_needed(email)
password = decode_object_from_bytes_if_needed(password)
password_repeat = decode_object_from_bytes_if_needed(password_repeat)
if not email:
return 'Email address is empty.'
if not (password and password_repeat):
return 'To set the password, please repeat the new password twice.'
if password != password_repeat:
return 'Passwords do not match.'
if len(password) < __MIN_PASSWORD_LENGTH or len(password) > __MAX_PASSWORD_LENGTH:
return 'Password must be between %d and %d characters length.' % (__MIN_PASSWORD_LENGTH, __MAX_PASSWORD_LENGTH,)
if password == email:
return "New password is your email address; don't cheat!"
return ''
| agpl-3.0 | 8,678,888,388,578,591,000 | 32.67284 | 120 | 0.67846 | false | 3.841549 | false | false | false |
jakubtyniecki/pact | sort/hybrid.py | 1 | 2447 | """ hybrid sort module """
from sort.framework import validate
THRESHOLD = 10 # threshold when to fallback to insert sort
@validate
def sort(arr):
""" hybrid sort """
hybridsort(arr, 0, len(arr) - 1)
return arr
def hybridsort(arr, first, last):
""" hybrid sort """
stack = []
stack.append((first, last))
while stack:
pos = stack.pop()
left, right = pos[0], pos[1]
if right - left < THRESHOLD:
""" if array is smaller then given threshold
use insert sort as it'll be more efficient """
insertsort(arr, left, right)
else:
piv = partition(arr, left, right)
if piv - 1 > left:
stack.append((left, piv - 1))
if piv + 1 < right:
stack.append((piv + 1, right))
def insertsort(arr, first, last):
""" insert sort """
assert first <= len(arr) and last < len(arr), \
"first: {}, last: {}".format(first, last)
for i in range(first, last + 1):
position, currentvalue = i, arr[i]
while position > 0 and arr[position - 1] > currentvalue:
arr[position] = arr[position - 1]
position -= 1
arr[position] = currentvalue
def partition(arr, first, last):
""" partition """
assert first < len(arr) and last < len(arr) and first < last, \
"first: {}, last: {}".format(first, last)
pivotindex = pivotpoint(arr, first, last)
if pivotindex > first:
arr[first], arr[pivotindex] = arr[pivotindex], arr[first]
pivotvalue = arr[first]
left, right = first + 1, last
while right >= left:
while left <= right and arr[left] <= pivotvalue:
left += 1
while arr[right] >= pivotvalue and right >= left:
right -= 1
assert right >= 0 and left <= len(arr)
if right > left:
arr[left], arr[right] = arr[right], arr[left]
if right > first:
arr[first], arr[right] = arr[right], arr[first]
return right
def pivotpoint(arr, first, last):
""" pivot point strategy
using median of first, mid and last elements
to prevent worst case scenario """
mid = first + (last - first) >> 1
if (arr[first] - arr[mid]) * (arr[last] - arr[first]) >= 0:
return first
elif (arr[mid] - arr[first]) * (arr[last] - arr[mid]) >= 0:
return mid
else:
return last
| mit | 7,002,549,922,402,661,000 | 23.969388 | 67 | 0.548018 | false | 3.74732 | false | false | false |
xuludev/CVLH_tutorial | netease_spider.py | 1 | 3334 | import json
import time
import os
import re
import requests
from bs4 import BeautifulSoup
import chardet
"""
url_list = [
'http://tech.163.com/special/00097UHL/tech_datalist_02.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_index_02.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_index_02.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_index.js?callback=data_callback',
'http://edu.163.com/special/002987KB/newsdata_edu_hot_02.js?callback=data_callback'
]
"""
def crawl(pn):
headers = {
'Accept': '*/*',
'Connection': 'keep-alive',
'Host': 'ent.163.com',
'Referer': 'http://ent.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
request_url = 'http://ent.163.com/special/000380VU/newsdata_index_0' + str(pn) + '.js?callback=data_callback'
# print('is crawling page ' + request_url + '......')
response = requests.get(request_url, headers=headers)
if response.status_code == 200:
page_encoding = chardet.detect(response.content)['encoding']
temp_str = response.text.replace('data_callback(', '')
# temp_str = response.content.decode(page_encoding).replace('data_callback(', '')
temp_str = temp_str.replace(temp_str[-1], '')
for each_news in json.loads(temp_str):
print(each_news['docurl'])
download_news_content(each_news['title'], each_news['docurl'])
elif response.status_code == 404:
raise Exception('No Page Found! ' + request_url)
else:
print('ERROR! ' + str(response.status_code))
def download_news_content(title, news_url):
if news_url.startswith('http://v'):
print('This page contains video ...')
else:
# r_content = re.compile('<img \w')
r_title = re.compile('[\?\"\?\:\s\/\·]')
file_dir = 'd:/网易新闻/娱乐'
if not os.path.exists(file_dir) or not os.path.isdir(file_dir):
os.makedirs(file_dir)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
response = requests.get(news_url, headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html5lib')
if response.url.startswith('http://digi.163.com') or response.url.startswith('http://tech.163.com') or response.url.startswith('http://ent.163.com'):
text_soup = soup.select('#endText')[0]
content_text = text_soup.get_text()
elif response.url.startswith('http://dy.163.com'):
text_soup = soup.select('#content')[0]
content_text = text_soup.get_text()
elif response.url.startswith('http://mobile.163.com'):
text_soup = soup.select('#epContentLeft')[0]
content_text = text_soup.get_text()
with open(file_dir + os.path.sep + re.sub(r_title, '', title, count=0) + '.txt', mode='wt', encoding='utf-8') as f:
f.write(content_text)
f.flush()
f.close()
print(title + '.txt has been written done!')
if __name__ == '__main__':
for i in range(2, 10, 1):
crawl(i)
time.sleep(5) | apache-2.0 | -1,926,016,843,630,203,600 | 34.855556 | 152 | 0.653092 | false | 2.785714 | false | false | false |
mdavidsaver/spicetools | spicetools/view/mainwin_ui.py | 1 | 8609 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'spicetools/view/mainwin.ui'
#
# Created: Sun Apr 27 13:13:01 2014
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(746, 516)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon.svg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.sets = QtGui.QComboBox(self.centralwidget)
self.sets.setObjectName(_fromUtf8("sets"))
self.verticalLayout.addWidget(self.sets)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.xaxis = QtGui.QComboBox(self.centralwidget)
self.xaxis.setObjectName(_fromUtf8("xaxis"))
self.verticalLayout.addWidget(self.xaxis)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout.addWidget(self.label_4)
self.ops = QtGui.QComboBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ops.sizePolicy().hasHeightForWidth())
self.ops.setSizePolicy(sizePolicy)
self.ops.setObjectName(_fromUtf8("ops"))
self.horizontalLayout.addWidget(self.ops)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.signals = QtGui.QListWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.signals.sizePolicy().hasHeightForWidth())
self.signals.setSizePolicy(sizePolicy)
self.signals.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.signals.setObjectName(_fromUtf8("signals"))
self.verticalLayout.addWidget(self.signals)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.canvas = PlotArea(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.canvas.sizePolicy().hasHeightForWidth())
self.canvas.setSizePolicy(sizePolicy)
self.canvas.setObjectName(_fromUtf8("canvas"))
self.horizontalLayout_3.addWidget(self.canvas)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 746, 20))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menu_File = QtGui.QMenu(self.menubar)
self.menu_File.setObjectName(_fromUtf8("menu_File"))
self.menuAbout = QtGui.QMenu(self.menubar)
self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionE_xit = QtGui.QAction(MainWindow)
self.actionE_xit.setMenuRole(QtGui.QAction.QuitRole)
self.actionE_xit.setObjectName(_fromUtf8("actionE_xit"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionAboutQt = QtGui.QAction(MainWindow)
self.actionAboutQt.setObjectName(_fromUtf8("actionAboutQt"))
self.actionClose = QtGui.QAction(MainWindow)
self.actionClose.setObjectName(_fromUtf8("actionClose"))
self.actionReload = QtGui.QAction(MainWindow)
self.actionReload.setObjectName(_fromUtf8("actionReload"))
self.actionCloneWindow = QtGui.QAction(MainWindow)
self.actionCloneWindow.setObjectName(_fromUtf8("actionCloneWindow"))
self.menu_File.addAction(self.actionCloneWindow)
self.menu_File.addAction(self.actionOpen)
self.menu_File.addAction(self.actionReload)
self.menu_File.addAction(self.actionClose)
self.menu_File.addSeparator()
self.menu_File.addAction(self.actionE_xit)
self.menuAbout.addAction(self.actionAbout)
self.menuAbout.addAction(self.actionAboutQt)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.actionE_xit, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "SpiceView", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Vector Set", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "X Vector", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Op:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Y Vector(s)", None, QtGui.QApplication.UnicodeUTF8))
self.signals.setSortingEnabled(True)
self.menu_File.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuAbout.setTitle(QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setText(QtGui.QApplication.translate("MainWindow", "&Open", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+O", None, QtGui.QApplication.UnicodeUTF8))
self.actionE_xit.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.actionE_xit.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutQt.setText(QtGui.QApplication.translate("MainWindow", "About Qt", None, QtGui.QApplication.UnicodeUTF8))
self.actionClose.setText(QtGui.QApplication.translate("MainWindow", "&Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionReload.setText(QtGui.QApplication.translate("MainWindow", "&Reload", None, QtGui.QApplication.UnicodeUTF8))
self.actionReload.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R", None, QtGui.QApplication.UnicodeUTF8))
self.actionCloneWindow.setText(QtGui.QApplication.translate("MainWindow", "Clone Wi&ndow", None, QtGui.QApplication.UnicodeUTF8))
from .plotarea import PlotArea
from . import viewer_rc
| gpl-3.0 | 8,151,757,473,375,844,000 | 58.784722 | 137 | 0.724707 | false | 4.043682 | false | false | false |
Jumpscale/jumpscale_portal8 | apps/portalbase/macros/page/action/3_action.py | 1 | 1083 | def main(j, args, params, tags, tasklet):
page = args.page
data = {'action': args.getTag('id'),
'class': args.getTag('class') or '',
'deleterow': args.getTag('deleterow') or 'false',
'label': args.getTag('label') or '',
}
extradata = {}
tags = j.data.tags.getObject(args.cmdstr, None)
for tagname, tagvalue in tags.getDict().items():
if tagname.startswith('data-'):
extradata[tagname[5:]] = tagvalue
data['data'] = j.data.serializer.json.dumps(extradata)
if data['class']:
data['label'] = "<span class='%(class)s'></span> %(label)s" % data
element = "<a class='js_action'" \
" data-action='%(action)s'" \
" data-extradata='%(data)s'" \
" data-deleterow='%(deleterow)s'" \
"href='javascript:void(0);'>%(label)s</a>" % data
page.addMessage(element)
page.addJS('/system/.files/js/action.js', header=False)
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
| apache-2.0 | 6,079,685,514,749,661,000 | 32.84375 | 74 | 0.554017 | false | 3.493548 | false | false | false |
volnrok/sortable-challenge | main.py | 1 | 3683 | import json
import random
import re
from check import check_match
from encoder import Encoder
from listing import Listing
from product import Product
# We'll sort products by manufacturer first
man_lookup = {}
# List of common manufacturer aliases
aliases = {
'agfaphoto': 'agfa',
'fuji': 'fujifilm',
'hewlett': 'hp',
'konica': 'konica minolta',
'sigmatek': 'sigma'
}
with open('products.txt', encoding='utf-8') as file:
for j in file:
product = Product(j)
man = product.manufacturer.lower()
# New manufacturer
if man not in man_lookup:
man_lookup[man] = []
# Enter product into data store
man_lookup[man].append(product)
with open('listings.txt', encoding='utf-8') as file:
mcount = 0
lcount = 0
man_cutoff = 3 # Only check the first few words for manufacturer matches
word_pattern = re.compile('\w+')
for j in file:
listing = Listing(j)
man = listing.manufacturer.lower()
if man not in man_lookup:
if man in aliases:
# First look for manufacturer aliases match
man = aliases[man]
else:
i = 0
# Try to find a manufacturer match, look for words in the listing title
for match in word_pattern.finditer(listing.title):
match_str = match.group(0).lower()
if match_str in aliases:
man = aliases[match_str]
break
if match_str in man_lookup:
man = match_str
break
i += 1
# Actual product matches (vs accessories) will have a manufacturer match in the first few words
if i >= man_cutoff:
break
if man in man_lookup:
model_matches = []
family_matches = []
for product in man_lookup[man]:
match = check_match(product, listing)
# Don't count model matches with single-character models
if match['m_match'] and len(product.model) > 1:
model_matches.append((product, match['m_index']))
if match['f_match'] >= 2:
family_matches.append((product, match['m_index']))
matched = False
if len(model_matches) == 1:
matched = model_matches[0]
elif len(family_matches) == 1:
matched = family_matches[0]
if matched:
# If the manufacturer is present in the title multiple times, check that the product model happens before the second
i = 0
second_index = 0
for man_match in re.finditer(man, listing.title, re.IGNORECASE):
i += 1
if i >= 2:
second_index = man_match.start(0)
break
if i >= 2 and second_index < matched[1]:
pass
else:
mcount += 1
matched[0].matches.append(listing)
lcount += 1
if lcount % 1000 == 0:
print('.', end='')
print()
print(lcount, 'listings read,', mcount, 'matches found')
with open('matches.txt', mode='w', encoding='utf-8') as file:
for man in man_lookup:
for product in man_lookup[man]:
if len(product.matches):
file.write(json.dumps(product, cls=Encoder, ensure_ascii=False))
file.write('\n')
print('Results saved to matches.txt')
| mit | -3,413,363,037,030,869,000 | 31.307018 | 132 | 0.519685 | false | 4.38975 | false | false | false |
Jorgesolis1989/SIVORE | corporaciones/views.py | 1 | 11935 | from django.shortcuts import render_to_response
from django.shortcuts import render ,redirect
from django.template.context import RequestContext
from corporaciones.models import Corporacion , Sede
from django.contrib.auth.decorators import permission_required
from corporaciones.forms import FormularioRegistroCorporacion, FormularioEditarCorporacion , FormularioCargar
from votantes.models import Votante
from planchas.models import Plancha
from candidatos.models import Candidato
from usuarios.models import Usuario
from django.db.models import Q
import csv
# -*- coding: utf-8 -*-
from io import StringIO
@permission_required("usuarios.Administrador" , login_url="/")
def registro_corporacion(request):
mensaje = ""
if request.method == 'POST' and "btncreate" in request.POST:
form = FormularioRegistroCorporacion(request.POST)
form2 = FormularioCargar(request.POST , request.FILES)
#Si el formulario es valido y tiene datos
if form.is_valid():
#Capture el id de corporacion
id_corporation = form.cleaned_data["id_corporation"]
sede = form.cleaned_data["sede"]
#Consultando la corporacion en la base de datos.
try:
corporacion = Corporacion.objects.get(id_corporation=id_corporation , sede=sede)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
corporacion_create(corporacion, form)
llamarMensaje = "exito_corporacion"
if form.cleaned_data["facultad"] is None or form.cleaned_data["sede"] is None:
mensaje = "La corporacion "+ str(id_corporation) +" se guardo correctamente"
else:
mensaje = "La corporacion "+ str(id_corporation) +" sede "+str(sede.nombre_sede)+" se guardo correctamente"
else:
if not corporacion.is_active:
corporacion_create(corporacion, form)
llamarMensaje = "exito_corporacion"
mensaje = "La corporación "+ str(id_corporation) +" se guardo correctamente"
else:
llamarMensaje = "fracaso_corporacion"
mensaje = "La corporacion "+ str(corporacion) + " ya esta registrada"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
form = FormularioRegistroCorporacion()
data = {
'form': form,
}
return render_to_response('registro_corporacion.html', data, context_instance=RequestContext(request))
elif request.method == 'POST' and "btnload" in request.POST:
form = FormularioRegistroCorporacion()
form2 = FormularioCargar(request.POST , request.FILES)
#Si el formulario es valido y tiene datos
if form2.is_valid():
try:
csvf = StringIO(request.FILES['file'].read().decode('ISO-8859-3'))
except Exception as e:
llamarMensaje = "fracaso_corporacion"
mensaje = "Error en el formato del archivo de entrada"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
reader = csv.reader(csvf, delimiter=';')
line=0
counter= 0
# Verificar y crear si estan creadas las sedes.
diccionarioSedes= {'CALI':'0', 'BUGA':'1', 'CAICEDONIA':'2', 'CARTAGO':'3', 'PACIFICO':'4',
'PALMIRA':'5','TULUA':'6', 'ZARZAL':'7', 'YUMBO':'14',
'SANTANDER DE QUILICHAO':'21'}
#COD. PROGRAMA;PROGRAMA ACADÉMICO;JORNADA;FACULTAD;SEDE
for nombre,codigo in diccionarioSedes.items():
try:
Sede.objects.get(codigo=codigo, is_active=True)
except Sede.DoesNotExist:
sede = Sede()
sede.codigo = codigo
sede.nombre_sede = nombre
try:
sede.save()
except Exception as e:
print("No se pudo guardar la sede" + sede.nombre_sede)
# Verificar y crear si estan creadas las facultades
diccionarioFacultad= { 'CONSEJO SUPERIOR':'1', 'CONSEJO ACADÉMICO':'2',
'INGENIERÍA':'3', 'CIENCIAS DE LA ADMINISTRACIÓN':'4',
'CIENCIAS NATURALES Y EXACTAS':'5', 'HUMANIDADES':'6',
'CIENCIAS SOCIALES Y ECONÓMICAS':'7', 'ARTES INTEGRADAS':'8',
'SALUD':'9', 'INSTITUTO DE EDUCACIÓN Y PEDAGOGÍA':'10' ,
'INSTITUTO DE PSICOLOGÍA': '11'}
# Creando las facultades
for nombre,codigo in diccionarioFacultad.items():
try:
Corporacion.objects.get(id_corporation=codigo, is_active=True)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
corporacion.id_corporation = codigo
# Codigo de las corproaciones que no llevan Facultad
if codigo not in {'1' , '2' , '10', '11'}:
corporacion.name_corporation = "FACULTAD DE " + nombre
else:
corporacion.name_corporation = nombre
try:
corporacion.save()
except Exception as e:
print("No se pudo guardar la corporacion" + corporacion.name_corporation)
#COD. PROGRAMA;PROGRAMA ACADÉMICO;JORNADA;FACULTAD;SEDE
# Ahora crear las corporaciones
for row in reader:
if line > 0:
try:
Corporacion.objects.get(id_corporation=row[0] , sede__codigo=diccionarioSedes.get(row[4]) , is_active=True)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
try:
corporacion.id_corporation = row[0]
corporacion.name_corporation = row[1]
corporacion.facultad = Corporacion.objects.get(id_corporation=diccionarioFacultad.get(row[3]))
sede = diccionarioSedes.get(row[4])
corporacion.sede = Sede.objects.get(codigo=sede)
corporacion.save()
counter+= 1
except Exception as e:
print(e)
except Exception as e:
llamarMensaje = "fracaso_corporacion"
mensaje = "Hubo un problema con el archivo de entrada, no coinciden los datos de entrada con la especificaciones dadaas"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
line =+ 1
#Consultando la corporacion en la base de datos.
llamarMensaje = "exito_corporacion"
mensaje = "Se crearon " + str(counter)+" corporacion(es) sactisfactoriamente"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
form = FormularioRegistroCorporacion()
form2 = FormularioCargar()
return render(request, 'registro_corporacion.html',{'mensaje': mensaje, 'form': form , 'form2': form2})
# Vista para listar corporaciones
@permission_required("usuarios.Administrador", login_url="/")
def listar_corporacion(request):
llamarMensaje = request.session.pop('llamarMensaje', None)
mensaje = request.session.pop('mensaje', None)
corporaciones = Corporacion.objects.filter(is_active=True)
return render(request, 'listar_corporaciones.html', {'corporaciones': corporaciones , 'llamarMensaje': llamarMensaje,'mensaje':mensaje })
#Edicion usuarios
@permission_required("usuarios.Administrador" , login_url="/")
def editar_corporacion(request, id_corporation=None):
corporacion = Corporacion.objects.get(id=id_corporation)
if request.method == 'POST':
form = FormularioEditarCorporacion(request.POST)
#Si el formulario es valido y tiene datos
if form.is_valid():
#Capture el id de corporacion
corporacion.name_corporation = form.cleaned_data["name_corporation"]
corporacion.facultad = form.cleaned_data["facultad"]
corporacion.sede = form.cleaned_data["sede"]
#Actualiza la corporacion en la BD si hay excepcion
try:
corporacion.save()
except Exception as e:
print(e)
#Consultando la corporacion en la base de datos.
llamarMensaje = "edito_corporacion"
mensaje = "Se editó la corporacion " +str(corporacion) +" sactisfactoriamente"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
print("por aca")
else:
if id_corporation is None:
return render(request, 'administrador.html')
else:
form = FormularioEditarCorporacion()
form.initial = {'id_corporation': corporacion.id_corporation, 'name_corporation': corporacion.name_corporation, 'facultad': corporacion.facultad,
'sede': corporacion.sede}
if corporacion.facultad is not None:
form.fields["facultad"].empty_label = None
form.fields["sede"].empty_label = None
else:
form.fields['facultad'].widget.attrs['disabled'] = True
form.fields['sede'].widget.attrs['disabled'] = True
return render(request, 'editar_corporacion.html', {'form': form})
# Este metodo no elimina en la base de datos, sino que desactiva la corporacion
@permission_required("usuarios.Administrador", login_url="/")
def eliminar_corporacion(request, id_corporation=None):
if request.method == 'POST':
corporacion=Corporacion.objects.get(id=id_corporation)
# sacando los votantes de la corporacion
votantes_corporacion = Votante.objects.filter((Q(plan__facultad__id=corporacion.id) | Q(plan__id=corporacion.id)) & Q(is_active=True))
# Si la corporacion tiene votantes
if votantes_corporacion:
llamarMensaje = "fracaso_usuario"
mensaje = "No se eliminó la corporacion " + str(id_corporation) +" porque tiene votantes asociados"
else:
corporacion.is_active = False
llamarMensaje = "exito_usuario"
mensaje = "Se eliminó la corporacion " + str(id_corporation) +" sactisfactoriamente"
try:
corporacion.save()
except Exception as e:
print(e)
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
def corporacion_create(corporacion, form):
corporacion.id_corporation= form.cleaned_data["id_corporation"]
corporacion.name_corporation= form.cleaned_data["name_corporation"]
corporacion.facultad= form.cleaned_data["facultad"]
corporacion.sede= form.cleaned_data["sede"]
corporacion.is_active = True
try:
corporacion.save()
except Exception as e:
print(e)
| apache-2.0 | 9,110,471,766,066,271,000 | 43.155556 | 157 | 0.585472 | false | 3.509567 | false | false | false |
teamCarel/EyeTracker | src/shared_modules/calibration_routines/finish_calibration.py | 1 | 17482 | '''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os
import numpy as np
from . import calibrate
from math_helper import *
from file_methods import load_object,save_object
from . camera_intrinsics_estimation import load_camera_calibration
from . optimization_calibration import bundle_adjust_calibration
from . calibrate import find_rigid_transform
#logging
import logging
logger = logging.getLogger(__name__)
from . gaze_mappers import *
not_enough_data_error_msg = 'Did not collect enough data during calibration.'
solver_failed_to_converge_error_msg = 'Paramters could not be estimated from data.'
def finish_calibration(g_pool,pupil_list,ref_list):
if pupil_list and ref_list:
pass
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
camera_intrinsics = load_camera_calibration(g_pool)
# match eye data and check if biocular and or monocular
pupil0 = [p for p in pupil_list if p['id']==0]
pupil1 = [p for p in pupil_list if p['id']==1]
#TODO unify this and don't do both
matched_binocular_data = calibrate.closest_matches_binocular(ref_list,pupil_list)
matched_pupil0_data = calibrate.closest_matches_monocular(ref_list,pupil0)
matched_pupil1_data = calibrate.closest_matches_monocular(ref_list,pupil1)
if len(matched_pupil0_data)>len(matched_pupil1_data):
matched_monocular_data = matched_pupil0_data
else:
matched_monocular_data = matched_pupil1_data
logger.info('Collected {} monocular calibration data.'.format(len(matched_monocular_data)))
logger.info('Collected {} binocular calibration data.'.format(len(matched_binocular_data)))
mode = g_pool.detection_mapping_mode
if mode == '3d' and not camera_intrinsics:
mode = '2d'
logger.warning("Please calibrate your world camera using 'camera intrinsics estimation' for 3d gaze mapping.")
if mode == '3d':
hardcoded_translation0 = np.array([20,15,-20])
hardcoded_translation1 = np.array([-40,15,-20])
if matched_binocular_data:
method = 'binocular 3d model'
#TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
# right now we solve using a few permutations of K
smallest_residual = 1000
scales = list(np.linspace(0.7,1.4,20))
K = camera_intrinsics["camera_matrix"]
for s in scales:
scale = np.ones(K.shape)
scale[0,0] *= s
scale[1,1] *= s
camera_intrinsics["camera_matrix"] = K*scale
ref_dir, gaze0_dir, gaze1_dir = calibrate.preprocess_3d_data(matched_binocular_data,
camera_intrinsics = camera_intrinsics )
if len(ref_dir) < 1 or len(gaze0_dir) < 1 or len(gaze1_dir) < 1:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
sphere_pos0 = pupil0[-1]['sphere']['center']
sphere_pos1 = pupil1[-1]['sphere']['center']
initial_R0,initial_t0 = find_rigid_transform(np.array(gaze0_dir)*500,np.array(ref_dir)*500)
initial_rotation0 = math_helper.quaternion_from_rotation_matrix(initial_R0)
initial_translation0 = np.array(initial_t0).reshape(3)
initial_R1,initial_t1 = find_rigid_transform(np.array(gaze1_dir)*500,np.array(ref_dir)*500)
initial_rotation1 = math_helper.quaternion_from_rotation_matrix(initial_R1)
initial_translation1 = np.array(initial_t1).reshape(3)
eye0 = { "observations" : gaze0_dir , "translation" : hardcoded_translation0 , "rotation" : initial_rotation0,'fix':['translation'] }
eye1 = { "observations" : gaze1_dir , "translation" : hardcoded_translation1 , "rotation" : initial_rotation1,'fix':['translation'] }
world = { "observations" : ref_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'],'fix':['translation','rotation'] }
initial_observers = [eye0,eye1,world]
initial_points = np.array(ref_dir)*500
success,residual, observers, points = bundle_adjust_calibration(initial_observers , initial_points, fix_points=False )
if residual <= smallest_residual:
smallest_residual = residual
scales[-1] = s
if not success:
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
logger.error("Calibration solver faild to converge.")
return
eye0,eye1,world = observers
t_world0 = np.array(eye0['translation'])
R_world0 = math_helper.quaternion_rotation_matrix(np.array(eye0['rotation']))
t_world1 = np.array(eye1['translation'])
R_world1 = math_helper.quaternion_rotation_matrix(np.array(eye1['rotation']))
def toWorld0(p):
return np.dot(R_world0, p)+t_world0
def toWorld1(p):
return np.dot(R_world1, p)+t_world1
points_a = [] #world coords
points_b = [] #eye0 coords
points_c = [] #eye1 coords
for a,b,c,point in zip(world['observations'] , eye0['observations'],eye1['observations'],points):
line_a = np.array([0,0,0]) , np.array(a) #observation as line
line_b = toWorld0(np.array([0,0,0])) , toWorld0(b) #eye0 observation line in world coords
line_c = toWorld1(np.array([0,0,0])) , toWorld1(c) #eye1 observation line in world coords
close_point_a,_ = math_helper.nearest_linepoint_to_point( point , line_a )
close_point_b,_ = math_helper.nearest_linepoint_to_point( point , line_b )
close_point_c,_ = math_helper.nearest_linepoint_to_point( point , line_c )
points_a.append(close_point_a)
points_b.append(close_point_b)
points_c.append(close_point_c)
# we need to take the sphere position into account
# orientation and translation are referring to the sphere center.
# but we want to have it referring to the camera center
# since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
sphere_translation = np.array( sphere_pos0 )
sphere_translation_world = np.dot( R_world0 , sphere_translation)
camera_translation = t_world0 - sphere_translation_world
eye_camera_to_world_matrix0 = np.eye(4)
eye_camera_to_world_matrix0[:3,:3] = R_world0
eye_camera_to_world_matrix0[:3,3:4] = np.reshape(camera_translation, (3,1) )
sphere_translation = np.array( sphere_pos1 )
sphere_translation_world = np.dot( R_world1 , sphere_translation)
camera_translation = t_world1 - sphere_translation_world
eye_camera_to_world_matrix1 = np.eye(4)
eye_camera_to_world_matrix1[:3,:3] = R_world1
eye_camera_to_world_matrix1[:3,3:4] = np.reshape(camera_translation, (3,1) )
g_pool.plugins.add(Binocular_Vector_Gaze_Mapper,args={
'eye_camera_to_world_matrix0':eye_camera_to_world_matrix0,
'eye_camera_to_world_matrix1':eye_camera_to_world_matrix1 ,
'camera_intrinsics': camera_intrinsics ,
'cal_points_3d': points,
'cal_ref_points_3d': points_a,
'cal_gaze_points0_3d': points_b,
'cal_gaze_points1_3d': points_c})
elif matched_monocular_data:
method = 'monocular 3d model'
#TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
# right now we solve using a few permutations of K
smallest_residual = 1000
scales = list(np.linspace(0.7,1.4,20))
K = camera_intrinsics["camera_matrix"]
for s in scales:
scale = np.ones(K.shape)
scale[0,0] *= s
scale[1,1] *= s
camera_intrinsics["camera_matrix"] = K*scale
ref_dir , gaze_dir, _ = calibrate.preprocess_3d_data(matched_monocular_data,
camera_intrinsics = camera_intrinsics )
# save_object((ref_dir,gaze_dir),os.path.join(g_pool.user_dir, "testdata"))
if len(ref_dir) < 1 or len(gaze_dir) < 1:
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
logger.error(not_enough_data_error_msg + " Using:" + method)
return
### monocular calibration strategy: mimize the reprojection error by moving the world camera.
# we fix the eye points and work in the eye coord system.
initial_R,initial_t = find_rigid_transform(np.array(ref_dir)*500,np.array(gaze_dir)*500)
initial_rotation = math_helper.quaternion_from_rotation_matrix(initial_R)
initial_translation = np.array(initial_t).reshape(3)
# this problem is scale invariant so we scale to some sensical value.
if matched_monocular_data[0]['pupil']['id'] == 0:
hardcoded_translation = hardcoded_translation0
else:
hardcoded_translation = hardcoded_translation1
eye = { "observations" : gaze_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'] }
world = { "observations" : ref_dir , "translation" : np.dot(initial_R,-hardcoded_translation) , "rotation" : initial_rotation,'fix':['translation'] }
initial_observers = [eye,world]
initial_points = np.array(gaze_dir)*500
success,residual, observers, points_in_eye = bundle_adjust_calibration(initial_observers , initial_points, fix_points=True )
if residual <= smallest_residual:
smallest_residual = residual
scales[-1] = s
eye, world = observers
if not success:
logger.error("Calibration solver faild to converge.")
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
#pose of the world in eye coords.
rotation = np.array(world['rotation'])
t_world = np.array(world['translation'])
R_world = math_helper.quaternion_rotation_matrix(rotation)
# inverse is pose of eye in world coords
R_eye = R_world.T
t_eye = np.dot(R_eye,-t_world)
def toWorld(p):
return np.dot(R_eye, p)+np.array(t_eye)
points_in_world = [toWorld(p) for p in points_in_eye]
points_a = [] #world coords
points_b = [] #cam2 coords
for a,b,point in zip(world['observations'] , eye['observations'],points_in_world):
line_a = np.array([0,0,0]) , np.array(a) #observation as line
line_b = toWorld(np.array([0,0,0])) , toWorld(b) #cam2 observation line in cam1 coords
close_point_a,_ = math_helper.nearest_linepoint_to_point( point , line_a )
close_point_b,_ = math_helper.nearest_linepoint_to_point( point , line_b )
# print np.linalg.norm(point-close_point_a),np.linalg.norm(point-close_point_b)
points_a.append(close_point_a)
points_b.append(close_point_b)
# we need to take the sphere position into account
# orientation and translation are referring to the sphere center.
# but we want to have it referring to the camera center
# since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
sphere_translation = np.array( matched_monocular_data[-1]['pupil']['sphere']['center'] )
sphere_translation_world = np.dot( R_eye , sphere_translation)
camera_translation = t_eye - sphere_translation_world
eye_camera_to_world_matrix = np.eye(4)
eye_camera_to_world_matrix[:3,:3] = R_eye
eye_camera_to_world_matrix[:3,3:4] = np.reshape(camera_translation, (3,1) )
g_pool.plugins.add(Vector_Gaze_Mapper,args=
{'eye_camera_to_world_matrix':eye_camera_to_world_matrix ,
'camera_intrinsics': camera_intrinsics ,
'cal_points_3d': points_in_world,
'cal_ref_points_3d': points_a,
'cal_gaze_points_3d': points_b,
'gaze_distance':500})
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
elif mode == '2d':
if matched_binocular_data:
method = 'binocular polynomial regression'
cal_pt_cloud_binocular = calibrate.preprocess_2d_data_binocular(matched_binocular_data)
cal_pt_cloud0 = calibrate.preprocess_2d_data_monocular(matched_pupil0_data)
cal_pt_cloud1 = calibrate.preprocess_2d_data_monocular(matched_pupil1_data)
map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud_binocular,g_pool.capture.frame_size,binocular=True)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
map_fn,inliers,params_eye0 = calibrate.calibrate_2d_polynomial(cal_pt_cloud0,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
map_fn,inliers,params_eye1 = calibrate.calibrate_2d_polynomial(cal_pt_cloud1,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
g_pool.plugins.add(Binocular_Gaze_Mapper,args={'params':params, 'params_eye0':params_eye0, 'params_eye1':params_eye1})
elif matched_monocular_data:
method = 'monocular polynomial regression'
cal_pt_cloud = calibrate.preprocess_2d_data_monocular(matched_monocular_data)
map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
g_pool.plugins.add(Monocular_Gaze_Mapper,args={'params':params})
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
ts = g_pool.get_timestamp()
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.successful','method':method,'timestamp': ts, 'record':True})
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.calibration_data','timestamp': ts, 'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method,'record':True})
#this is only used by show calibration. TODO: rewrite show calibraiton.
user_calibration_data = {'timestamp': ts,'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method}
save_object(user_calibration_data,os.path.join(g_pool.user_dir, "user_calibration_data"))
| lgpl-3.0 | 1,249,891,660,788,555,000 | 50.875371 | 194 | 0.605709 | false | 3.668065 | false | false | false |
nfqsolutions/pylm | tests/test_services/test_subscribed_client.py | 1 | 2736 | import concurrent.futures
import time
from concurrent.futures import ThreadPoolExecutor
import zmq
from pylm.clients import Client
from pylm.parts.core import zmq_context
from pylm.parts.messages_pb2 import PalmMessage
def fake_server(messages=1):
db_socket = zmq_context.socket(zmq.REP)
db_socket.bind('inproc://db')
pull_socket = zmq_context.socket(zmq.PULL)
pull_socket.bind('inproc://pull')
pub_socket = zmq_context.socket(zmq.PUB)
pub_socket.bind('inproc://pub')
# PUB-SUB takes a while
time.sleep(1.0)
for i in range(messages):
message_data = pull_socket.recv()
print(i)
message = PalmMessage()
message.ParseFromString(message_data)
topic = message.client
pub_socket.send_multipart([topic.encode('utf-8'), message_data])
def test_subscribed_client_single():
got = []
client = Client(
server_name='someserver',
db_address='inproc://db',
push_address='inproc://pull',
sub_address='inproc://pub',
this_config=True)
with ThreadPoolExecutor(max_workers=2) as executor:
results = [
executor.submit(fake_server, messages=2),
executor.submit(client.job, 'f', [b'1', b'2'], messages=2)
]
for future in concurrent.futures.as_completed(results):
try:
result = future.result()
if result:
for r in result:
got.append(r)
except Exception as exc:
print(exc)
assert len(got) == 2
def test_subscribed_client_multiple():
got = []
client = Client(
server_name='someserver',
db_address='inproc://db',
sub_address='inproc://pub',
push_address='inproc://pull',
this_config=True)
client1 = Client(
server_name='someserver',
db_address='inproc://db',
sub_address='inproc://pub',
push_address='inproc://pull',
this_config=True)
with ThreadPoolExecutor(max_workers=2) as executor:
results = [
executor.submit(fake_server, messages=4),
executor.submit(client.job, 'f', [b'1', b'2'], messages=2),
executor.submit(client1.job, 'f', [b'a', b'b'], messages=2)
]
for future in concurrent.futures.as_completed(results):
try:
result = future.result()
if result:
for r in result:
got.append(r)
except Exception as exc:
print(exc)
assert len(got) == 4
if __name__ == '__main__':
test_subscribed_client_single()
test_subscribed_client_multiple()
| agpl-3.0 | -7,718,448,006,989,908,000 | 25.823529 | 72 | 0.56981 | false | 3.858956 | false | false | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/sesame.py | 1 | 3792 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.sesame Contains the Sesame class.
# -----------------------------------------------------------------
"""
Created on Mar 13, 2011
Sesame class to access Sesame name resolver service
Based on 2005-06-11 by Shui Hung Kwok
See http://cdsweb.u-strasbg.fr/doc/sesame.htx for description of Sesame
@author: shkwok
"""
from urllib2 import urlopen
#from xparser.XParser import XParser
#from .. import XParser
# -----------------------------------------------------------------
class Sesame (object):
"""
This class ...
"""
CatalogOpt = "SNV" # S simbad, N ned, V vizier, A All
OutputOpt = "oxp" # xp for xml as text/plain rather then text/xml (-ox)
SesameURL = "http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame"
def __init__(self, urn=SesameURL, opt=CatalogOpt, opt1=OutputOpt):
"""
Initializes Sesame URL and options
Default options are SNV for CatalogOpt
and -oxp for OutputOpt.
SNV = Simbad + NED + Vizier and A for All
The order indicates the order to search.
"All" means search all services, otherwise stops when
first entry found.
Output options start with -o
followed by
x : xml output
p : plain text
I : include all identifiers
"""
self.catOpt = opt
self.outOpt = opt1
self.urn = urn
# Sesame
def getCoord(self, node):
"""
Helper method to extract ra and dec from node
"""
res = node.getResource("/Sesame/Target");
resolvers = res.getChildren ("Resolver")
for r in resolvers:
try:
ra = float (r.getResourceContent("/Resolver/jradeg").strip())
dec = float (r.getResourceContent("/Resolver/jdedeg").strip())
return ra, dec
except Exception:
raise Exception, "invalid coordinates"
else:
raise Exception, "no ra/dec values found"
# getCoord
def getAliases(self):
"""
Extracts aliases for the given target.
Returns a list of names.
"""
res = []
for resolver in self.xml.root.Sesame.Resolver:
try:
for a in resolver.alias:
res.append (a.content)
except:
pass
return res
def buildQuery(self, name, all=True):
"""
Builds query URL for use with HTTP GET
If all is true, then all known identifiers shall be returned.
"""
opt = self.catOpt
opt1 = '-' + self.outOpt
if all:
opt += 'A'
opt1 += 'I' # all identifiers
queryURL = "%s/%s/%s?%s" % (self.urn, opt1, opt, name)
return queryURL
def resolveRaw(self, name, all=True):
"""
Performs a raw query.
Returns what the server returns.
"""
query = self.buildQuery (name, all)
print "query=", query
hcon = urlopen (query)
res = hcon.read ()
hcon.close ()
return res
def resolve(self, name, all=True):
"""
Performs a query.
Returns ra and dec
"""
query = self.buildQuery(name, all)
xp = XParser()
xn = xp.parseFromFile(query)
return self.getCoord(xn) | mit | -434,090,002,494,834,500 | 27.727273 | 78 | 0.498813 | false | 4.193584 | false | false | false |
containers-ftw/cftw | cftw/utils.py | 1 | 8502 | '''
utils.py: part of cftw package
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import fnmatch
import os
import json
import re
import shutil
import simplejson
from cftw.logger import bot
import sys
import subprocess
import tempfile
import tarfile
import zipfile
######################################################################################
# Local commands and requests
######################################################################################
def get_installdir():
'''get_installdir returns the installation directory of the application
'''
return os.path.abspath(os.path.dirname(__file__))
def run_command(cmd,error_message=None,sudopw=None,suppress=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param execute: if True, will add `` around command (default is False)
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudopw == None:
sudopw = os.environ.get('pancakes',None)
if sudopw != None:
cmd = ' '.join(["echo", sudopw,"|","sudo","-S"] + cmd)
if suppress == False:
output = os.popen(cmd).read().strip('\n')
else:
output = cmd
os.system(cmd)
else:
try:
process = subprocess.Popen(cmd,stdout=subprocess.PIPE)
output, err = process.communicate()
except OSError as error:
bot.error(err)
return None
return output
############################################################################
## FILE OPERATIONS #########################################################
############################################################################
def write_file(filename,content,mode="w"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
filey.writelines(content)
return filename
def write_json(json_obj,filename,mode="w",print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
:param json_obj: the dict to print to json
:param filename: the output file to write to
:param pretty_print: if True, will use nicer formatting
'''
with open(filename,mode) as filey:
if print_pretty == True:
filey.writelines(simplejson.dumps(json_obj, indent=4, separators=(',', ': ')))
else:
filey.writelines(simplejson.dumps(json_obj))
return filename
def read_file(filename,mode="r"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
content = filey.readlines()
return content
def read_json(filename,mode="r"):
'''read_json will open a file, "filename" and read the string as json
'''
with open(filename,mode) as filey:
content = json.loads(filey.read())
return content
def recursive_find(base,pattern=None):
'''recursive find dicoms will search for dicom files in all directory levels
below a base. It uses get_dcm_files to find the files in the bases.
'''
if pattern is None:
pattern = "*"
files = []
for root, dirnames, filenames in os.walk(base):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
############################################################################
## FOLDER OPERATIONS #########################################################
############################################################################
def mkdir_p(path):
'''mkdir_p attempts to get the same functionality as mkdir -p
:param path: the path to create.
'''
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
bot.error("Error creating path %s, exiting." %path)
sys.exit(1)
def tree(base):
'''print a simple directory tree, primarily for showing
content of templates'''
for root, dirs, files in os.walk(base):
level = root.replace(base, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
############################################################################
## COMPRESSED FILES ########################################################
############################################################################
def detect_compressed(folder,compressed_types=None):
'''detect compressed will return a list of files in
some folder that are compressed, by default this means
.zip or .tar.gz, but the called can specify a custom list
:param folder: the folder base to use.
:param compressed_types: a list of types to include, should
be extensions in format like *.tar.gz, *.zip, etc.
'''
compressed = []
if compressed_types == None:
compressed_types = ["*.tar.gz",'*zip']
bot.debug("Searching for %s" %", ".join(compressed_types))
for filey in os.listdir(folder):
for compressed_type in compressed_types:
if fnmatch.fnmatch(filey, compressed_type):
compressed.append("%s/%s" %(folder,filey))
bot.debug("Found %s compressed files in %s" %len(compressed),folder)
return compressed
def unzip_dir(zip_file,dest_dir=None):
'''unzip_dir will extract a zipfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param zip_file: the .zip file to unzip
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
with zipfile.ZipFile(zip_file,"r") as zf:
zf.extractall(dest_dir)
return dest_dir
def zip_dir(zip_dir, zip_name, output_folder=None):
'''zip_dir will zip up and entire zip directory
:param folder_path: the folder to zip up
:param zip_name: the name of the zip to return
:output_folder:
'''
tmpdir = tempfile.mkdtemp()
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
for root, dirs, files in os.walk(zip_dir):
for file in files:
zf.write(os.path.join(root, file))
zf.close()
if output_folder != None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip
def untar_dir(tar_file,dest_dir=None):
'''untar_dir will extract a tarfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param tar_file: the .tar.gz file to untar/decompress
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
contents = []
if tarfile.is_tarfile(tar_file):
with tarfile.open(tar_file) as tf:
tf.extractall(dest_dir)
return dest_dir
| mit | 8,000,063,582,081,847,000 | 32.472441 | 90 | 0.597389 | false | 4.217262 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.